[hopem,r=]
Adds support for changing the min_part_hours value on swift rings post-install and resyncing rings across cluster on-demand. Default min-hours is now 0 to allow any number of storage units to join and enforce a rebalance and resync of rings across the cluster. Also refactored code and cleaned up style.
This commit is contained in:
parent
6b2a7e7737
commit
56fe58cc6b
18
config.yaml
18
config.yaml
@ -30,9 +30,15 @@ options:
|
||||
type: int
|
||||
description: Minimum replicas.
|
||||
min-hours:
|
||||
default: 1
|
||||
default: 0
|
||||
type: int
|
||||
description: Minimum hours between balances
|
||||
description: |
|
||||
This is the Swift ring builder min_part_hours parameter. This
|
||||
setting represents the amount of time in hours that Swift will wait
|
||||
between subsequent ring rebalances in order to avoid large IO loads as
|
||||
data is rebalanced when new devices are added to the cluster. Once your
|
||||
cluster has been built, you can set this to a higher value e.g. 1
|
||||
(upstream default).
|
||||
zone-assignment:
|
||||
default: "manual"
|
||||
type: string
|
||||
@ -46,6 +52,14 @@ options:
|
||||
zones before the storage ring will be initially balance. Deployment
|
||||
requirements differ based on the zone-assignment policy configured, see
|
||||
this charm's README for details.
|
||||
force-cluster-ring-sync:
|
||||
default: False
|
||||
type: boolean
|
||||
description: |
|
||||
There are some cases where one might want to resync rings and
|
||||
builders across storage relations perhaps as a result of a manual
|
||||
ring (e.g. rebalance) update so this toggle can be used to trigger a
|
||||
resync across the entire cluster.
|
||||
# User provided SSL cert and key
|
||||
ssl_cert:
|
||||
type: string
|
||||
|
1
hooks/swift-storage-relation-joined
Symbolic link
1
hooks/swift-storage-relation-joined
Symbolic link
@ -0,0 +1 @@
|
||||
swift_hooks.py
|
@ -1,3 +1,6 @@
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
@ -5,38 +8,35 @@ from charmhelpers.core.hookenv import (
|
||||
related_units,
|
||||
relation_get,
|
||||
unit_get,
|
||||
service_name
|
||||
service_name,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.context import (
|
||||
OSContextGenerator,
|
||||
ApacheSSLContext as SSLContext,
|
||||
context_complete,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_api_port,
|
||||
determine_apache_port,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_ipv6_addr
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import get_host_ip
|
||||
import os
|
||||
import uuid
|
||||
|
||||
|
||||
SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'
|
||||
WWW_DIR = '/var/www/swift-rings'
|
||||
|
||||
|
||||
class HAProxyContext(OSContextGenerator):
|
||||
interfaces = ['cluster']
|
||||
|
||||
def __call__(self):
|
||||
'''
|
||||
Extends the main charmhelpers HAProxyContext with a port mapping
|
||||
"""Extends the main charmhelpers HAProxyContext with a port mapping
|
||||
specific to this charm.
|
||||
Also used to extend cinder.conf context with correct api_listening_port
|
||||
'''
|
||||
"""
|
||||
haproxy_port = config('bind-port')
|
||||
api_port = determine_apache_port(config('bind-port'))
|
||||
|
||||
@ -46,9 +46,6 @@ class HAProxyContext(OSContextGenerator):
|
||||
return ctxt
|
||||
|
||||
|
||||
WWW_DIR = '/var/www/swift-rings'
|
||||
|
||||
|
||||
class ApacheSSLContext(SSLContext):
|
||||
interfaces = ['https']
|
||||
external_ports = [config('bind-port')]
|
||||
@ -66,6 +63,7 @@ class SwiftRingContext(OSContextGenerator):
|
||||
host_ip = get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
else:
|
||||
host_ip = get_host_ip(host)
|
||||
|
||||
allowed_hosts.append(host_ip)
|
||||
|
||||
ctxt = {
|
||||
@ -90,6 +88,7 @@ class SwiftIdentityContext(OSContextGenerator):
|
||||
else:
|
||||
proxy_ip = get_host_ip(unit_get('private-address'))
|
||||
memcached_ip = get_host_ip(unit_get('private-address'))
|
||||
|
||||
ctxt = {
|
||||
'proxy_ip': proxy_ip,
|
||||
'memcached_ip': memcached_ip,
|
||||
@ -147,6 +146,7 @@ class SwiftIdentityContext(OSContextGenerator):
|
||||
}
|
||||
if context_complete(ks_auth):
|
||||
ctxt.update(ks_auth)
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
@ -158,9 +158,8 @@ class MemcachedContext(OSContextGenerator):
|
||||
ctxt['memcached_ip'] = 'ip6-localhost'
|
||||
else:
|
||||
ctxt['memcached_ip'] = get_host_ip(unit_get('private-address'))
|
||||
return ctxt
|
||||
|
||||
SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'
|
||||
return ctxt
|
||||
|
||||
|
||||
def get_swift_hash():
|
||||
@ -176,6 +175,7 @@ def get_swift_hash():
|
||||
service_name()))
|
||||
with open(SWIFT_HASH_FILE, 'w') as hashfile:
|
||||
hashfile.write(swift_hash)
|
||||
|
||||
return swift_hash
|
||||
|
||||
|
||||
|
@ -2,71 +2,79 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import uuid
|
||||
import subprocess
|
||||
import uuid
|
||||
|
||||
import charmhelpers.contrib.openstack.utils as openstack
|
||||
import charmhelpers.contrib.hahelpers.cluster as cluster
|
||||
from swift_utils import (
|
||||
register_configs,
|
||||
restart_map,
|
||||
determine_packages,
|
||||
ensure_swift_dir,
|
||||
SWIFT_RINGS, get_www_dir,
|
||||
SWIFT_RINGS,
|
||||
get_www_dir,
|
||||
initialize_ring,
|
||||
swift_user,
|
||||
SWIFT_HA_RES,
|
||||
balance_ring,
|
||||
SWIFT_CONF_DIR,
|
||||
get_zone,
|
||||
exists_in_ring,
|
||||
add_to_ring,
|
||||
should_balance,
|
||||
do_openstack_upgrade,
|
||||
write_rc_script,
|
||||
setup_ipv6
|
||||
setup_ipv6,
|
||||
balance_rings,
|
||||
builders_synced,
|
||||
sync_proxy_rings,
|
||||
update_min_part_hours,
|
||||
notify_storage_rings_available,
|
||||
notify_peers_builders_available,
|
||||
mark_www_rings_deleted,
|
||||
disable_peer_apis,
|
||||
)
|
||||
from swift_context import get_swift_hash
|
||||
|
||||
import charmhelpers.contrib.openstack.utils as openstack
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
is_elected_leader,
|
||||
is_crm_leader
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
unit_get,
|
||||
relation_set,
|
||||
relation_ids,
|
||||
relation_get,
|
||||
related_units,
|
||||
log,
|
||||
DEBUG,
|
||||
INFO,
|
||||
WARNING,
|
||||
ERROR,
|
||||
Hooks, UnregisteredHookError,
|
||||
open_port
|
||||
open_port,
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
service_restart,
|
||||
service_stop,
|
||||
service_start,
|
||||
restart_on_change
|
||||
restart_on_change,
|
||||
)
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
apt_update
|
||||
apt_update,
|
||||
)
|
||||
from charmhelpers.payload.execd import execd_preinstall
|
||||
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
canonical_url,
|
||||
PUBLIC, INTERNAL, ADMIN
|
||||
PUBLIC,
|
||||
INTERNAL,
|
||||
ADMIN,
|
||||
)
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_iface_for_address,
|
||||
get_netmask_for_address,
|
||||
get_address_in_network,
|
||||
get_ipv6_addr,
|
||||
format_ipv6_addr,
|
||||
is_ipv6
|
||||
is_ipv6,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
|
||||
|
||||
extra_pkgs = [
|
||||
@ -74,9 +82,7 @@ extra_pkgs = [
|
||||
"python-jinja2"
|
||||
]
|
||||
|
||||
|
||||
hooks = Hooks()
|
||||
|
||||
CONFIGS = register_configs()
|
||||
|
||||
|
||||
@ -86,19 +92,19 @@ def install():
|
||||
src = config('openstack-origin')
|
||||
if src != 'distro':
|
||||
openstack.configure_installation_source(src)
|
||||
|
||||
apt_update(fatal=True)
|
||||
rel = openstack.get_os_codename_install_source(src)
|
||||
|
||||
pkgs = determine_packages(rel)
|
||||
apt_install(pkgs, fatal=True)
|
||||
apt_install(extra_pkgs, fatal=True)
|
||||
ensure_swift_dir()
|
||||
|
||||
if cluster.is_elected_leader(SWIFT_HA_RES):
|
||||
log("Leader established, generating ring builders")
|
||||
if is_elected_leader(SWIFT_HA_RES):
|
||||
log("Leader established, generating ring builders", level=INFO)
|
||||
# initialize new storage rings.
|
||||
for ring in SWIFT_RINGS.iteritems():
|
||||
initialize_ring(ring[1],
|
||||
for ring, path in SWIFT_RINGS.iteritems():
|
||||
initialize_ring(path,
|
||||
config('partition-power'),
|
||||
config('replicas'),
|
||||
config('min-hours'))
|
||||
@ -107,14 +113,39 @@ def install():
|
||||
www_dir = get_www_dir()
|
||||
if not os.path.isdir(www_dir):
|
||||
os.mkdir(www_dir, 0o755)
|
||||
|
||||
uid, gid = swift_user()
|
||||
os.chown(www_dir, uid, gid)
|
||||
|
||||
|
||||
@hooks.hook('config-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def config_changed():
|
||||
if config('prefer-ipv6'):
|
||||
setup_ipv6()
|
||||
|
||||
configure_https()
|
||||
open_port(config('bind-port'))
|
||||
|
||||
# Determine whether or not we should do an upgrade.
|
||||
if openstack.openstack_upgrade_available('python-swift'):
|
||||
do_openstack_upgrade(CONFIGS)
|
||||
|
||||
update_min_part_hours()
|
||||
|
||||
if config('force-cluster-ring-sync'):
|
||||
log("Disabling peer proxy apis before syncing rings across cluster.")
|
||||
disable_peer_apis()
|
||||
|
||||
for r_id in relation_ids('identity-service'):
|
||||
keystone_joined(relid=r_id)
|
||||
|
||||
|
||||
@hooks.hook('identity-service-relation-joined')
|
||||
def keystone_joined(relid=None):
|
||||
if not cluster.eligible_leader(SWIFT_HA_RES):
|
||||
if not is_elected_leader(SWIFT_HA_RES):
|
||||
return
|
||||
|
||||
port = config('bind-port')
|
||||
admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port)
|
||||
internal_url = '%s:%s/v1/AUTH_$(tenant_id)s' % \
|
||||
@ -136,86 +167,34 @@ def keystone_changed():
|
||||
configure_https()
|
||||
|
||||
|
||||
def get_hostaddr():
|
||||
if config('prefer-ipv6'):
|
||||
return get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
@hooks.hook('swift-storage-relation-joined')
|
||||
def storage_joined():
|
||||
if not is_elected_leader(SWIFT_HA_RES):
|
||||
log("New storage relation joined - stopping proxy until ring builder "
|
||||
"synced", level=INFO)
|
||||
service_stop('swift-proxy')
|
||||
|
||||
return unit_get('private-address')
|
||||
|
||||
|
||||
def builders_synced():
|
||||
for ring in SWIFT_RINGS.itervalues():
|
||||
if not os.path.exists(ring):
|
||||
log("Builder not yet synced - %s" % (ring))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def balance_rings():
|
||||
'''handle doing ring balancing and distribution.'''
|
||||
if not cluster.eligible_leader(SWIFT_HA_RES):
|
||||
log("Balance rings called by non-leader - skipping", level=WARNING)
|
||||
return
|
||||
|
||||
new_ring = False
|
||||
for ring in SWIFT_RINGS.itervalues():
|
||||
if balance_ring(ring):
|
||||
log('Balanced ring %s' % ring)
|
||||
new_ring = True
|
||||
|
||||
if not new_ring:
|
||||
log("Rings unchanged by rebalance - skipping sync", level=INFO)
|
||||
return
|
||||
|
||||
www_dir = get_www_dir()
|
||||
for ring, builder_path in SWIFT_RINGS.iteritems():
|
||||
ringfile = '%s.ring.gz' % ring
|
||||
shutil.copyfile(os.path.join(SWIFT_CONF_DIR, ringfile),
|
||||
os.path.join(www_dir, ringfile))
|
||||
shutil.copyfile(builder_path,
|
||||
os.path.join(www_dir, os.path.basename(builder_path)))
|
||||
|
||||
if cluster.is_clustered():
|
||||
hostname = config('vip')
|
||||
else:
|
||||
hostname = get_hostaddr()
|
||||
|
||||
hostname = format_ipv6_addr(hostname) or hostname
|
||||
|
||||
# Notify peers that builders are available
|
||||
for rid in relation_ids('cluster'):
|
||||
log("Notifying peer(s) that rings are ready for sync (rid='%s')" %
|
||||
(rid))
|
||||
relation_set(relation_id=rid,
|
||||
relation_settings={'builder-broker': hostname})
|
||||
|
||||
log('Broadcasting notification to all storage nodes that new ring is '
|
||||
'ready for consumption.')
|
||||
|
||||
path = os.path.basename(www_dir)
|
||||
trigger = uuid.uuid4()
|
||||
|
||||
rings_url = 'http://%s/%s' % (hostname, path)
|
||||
# notify storage nodes that there is a new ring to fetch.
|
||||
for relid in relation_ids('swift-storage'):
|
||||
relation_set(relation_id=relid, swift_hash=get_swift_hash(),
|
||||
rings_url=rings_url, trigger=trigger)
|
||||
|
||||
service_restart('swift-proxy')
|
||||
# Mark rings in the www directory as stale since this unit is no longer
|
||||
# responsible distributing rings but may become responsible again at
|
||||
# some time in the future so were do this to avoid storage nodes
|
||||
# getting out-of-date rings.
|
||||
mark_www_rings_deleted()
|
||||
|
||||
|
||||
@hooks.hook('swift-storage-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def storage_changed():
|
||||
if not is_elected_leader(SWIFT_HA_RES):
|
||||
log("Not the leader - ignoring storage relation until leader ready.",
|
||||
level=DEBUG)
|
||||
return
|
||||
|
||||
log("Leader established, updating ring builders", level=INFO)
|
||||
if config('prefer-ipv6'):
|
||||
host_ip = '[%s]' % relation_get('private-address')
|
||||
else:
|
||||
host_ip = openstack.get_host_ip(relation_get('private-address'))
|
||||
|
||||
if cluster.is_elected_leader(SWIFT_HA_RES):
|
||||
log("Leader established, updating ring builders")
|
||||
|
||||
zone = get_zone(config('zone-assignment'))
|
||||
node_settings = {
|
||||
'ip': host_ip,
|
||||
@ -226,7 +205,10 @@ def storage_changed():
|
||||
}
|
||||
|
||||
if None in node_settings.itervalues():
|
||||
log('storage_changed: Relation not ready.')
|
||||
missing = [k for k, v in node_settings.iteritems()
|
||||
if node_settings[k] is None]
|
||||
log("Relation not ready - some required values not provided by "
|
||||
"relation (missing=%s)" % (', '.join(missing)), level=INFO)
|
||||
return None
|
||||
|
||||
for k in ['zone', 'account_port', 'object_port', 'container_port']:
|
||||
@ -234,7 +216,7 @@ def storage_changed():
|
||||
|
||||
CONFIGS.write_all()
|
||||
|
||||
# allow for multiple devs per unit, passed along as a : separated list
|
||||
# Allow for multiple devs per unit, passed along as a : separated list
|
||||
devs = relation_get('device').split(':')
|
||||
for dev in devs:
|
||||
node_settings['device'] = dev
|
||||
@ -243,22 +225,16 @@ def storage_changed():
|
||||
add_to_ring(ring, node_settings)
|
||||
|
||||
if should_balance([r for r in SWIFT_RINGS.itervalues()]):
|
||||
balance_rings()
|
||||
|
||||
# Notify peers that builders are available
|
||||
for rid in relation_ids('cluster'):
|
||||
log("Notifying peer(s) that ring builder is ready (rid='%s')" %
|
||||
(rid))
|
||||
relation_set(relation_id=rid,
|
||||
relation_settings={'builder-broker':
|
||||
get_hostaddr()})
|
||||
# NOTE(dosaboy): this may not change anything but we still sync rings
|
||||
# in case a storage node needs re-syncing.
|
||||
balance_rings(force_sync=True)
|
||||
notify_storage_rings_available()
|
||||
# Restart proxy here in case no config changes made (so
|
||||
# restart_on_change() ineffective).
|
||||
service_restart('swift-proxy')
|
||||
else:
|
||||
log("Not yet ready to balance rings - insufficient replicas?",
|
||||
level=INFO)
|
||||
else:
|
||||
log("New storage relation joined - stopping proxy until ring builder "
|
||||
"synced")
|
||||
service_stop('swift-proxy')
|
||||
|
||||
|
||||
@hooks.hook('swift-storage-relation-broken')
|
||||
@ -267,23 +243,6 @@ def storage_broken():
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
@hooks.hook('config-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def config_changed():
|
||||
if config('prefer-ipv6'):
|
||||
setup_ipv6()
|
||||
|
||||
configure_https()
|
||||
open_port(config('bind-port'))
|
||||
# Determine whether or not we should do an upgrade, based on the
|
||||
# the version offered in keyston-release.
|
||||
if (openstack.openstack_upgrade_available('python-swift')):
|
||||
do_openstack_upgrade(CONFIGS)
|
||||
for r_id in relation_ids('identity-service'):
|
||||
keystone_joined(relid=r_id)
|
||||
[cluster_joined(rid) for rid in relation_ids('cluster')]
|
||||
|
||||
|
||||
@hooks.hook('cluster-relation-joined')
|
||||
def cluster_joined(relation_id=None):
|
||||
for addr_type in ADDRESS_TYPES:
|
||||
@ -301,55 +260,59 @@ def cluster_joined(relation_id=None):
|
||||
private_addr = unit_get('private-address')
|
||||
|
||||
|
||||
def sync_proxy_rings(broker_url):
|
||||
"""The leader proxy is responsible for intialising, updating and
|
||||
rebalancing the ring. Once the leader is ready the rings must then be
|
||||
synced into each other proxy unit.
|
||||
|
||||
Note that we sync the ring builder and .gz files since the builder itself
|
||||
is linked to the underlying .gz ring.
|
||||
"""
|
||||
log('Fetching swift rings & builders from proxy @ %s.' % broker_url)
|
||||
target = '/etc/swift'
|
||||
for server in ['account', 'object', 'container']:
|
||||
url = '%s/%s.builder' % (broker_url, server)
|
||||
log('Fetching %s.' % url)
|
||||
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
|
||||
"%s/%s.builder" % (target, server)]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
url = '%s/%s.ring.gz' % (broker_url, server)
|
||||
log('Fetching %s.' % url)
|
||||
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
|
||||
'%s/%s.ring.gz' % (target, server)]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
@hooks.hook('cluster-relation-changed',
|
||||
'cluster-relation-departed')
|
||||
@restart_on_change(restart_map())
|
||||
def cluster_changed():
|
||||
if is_elected_leader(SWIFT_HA_RES):
|
||||
rel_ids = relation_ids('cluster')
|
||||
disabled = []
|
||||
units = 0
|
||||
for rid in rel_ids:
|
||||
for unit in related_units(rid):
|
||||
units += 1
|
||||
disabled.append(relation_get('disable-proxy-service', rid=rid))
|
||||
|
||||
disabled = [int(d) for d in disabled if d is not None]
|
||||
if not any(disabled) and len(set(disabled)) == 1:
|
||||
log("Syncing rings and builders across %s peer units" % (units),
|
||||
level=DEBUG)
|
||||
notify_peers_builders_available()
|
||||
notify_storage_rings_available()
|
||||
else:
|
||||
log("Not all apis disabled - skipping sync until all peers ready "
|
||||
"(got %s)" % (disabled), level=INFO)
|
||||
|
||||
CONFIGS.write_all()
|
||||
return
|
||||
|
||||
settings = relation_get()
|
||||
if int(settings.get('disable-proxy-service', 0)):
|
||||
log("Peer request to disable proxy api received", level=INFO)
|
||||
service_stop('swift-proxy')
|
||||
trigger = str(uuid.uuid4())
|
||||
relation_set(relation_settings={'trigger': trigger,
|
||||
'disable-proxy-service': 0})
|
||||
return
|
||||
|
||||
# If not the leader, see if there are any builder files we can sync from
|
||||
# the leader.
|
||||
if not cluster.is_elected_leader(SWIFT_HA_RES):
|
||||
settings = relation_get()
|
||||
log("Non-leader peer - checking if updated rings available", level=DEBUG)
|
||||
broker = settings.get('builder-broker', None)
|
||||
if broker:
|
||||
if not broker:
|
||||
log("No update available", level=DEBUG)
|
||||
return
|
||||
|
||||
path = os.path.basename(get_www_dir())
|
||||
broker_url = 'http://%s/%s' % (broker, path)
|
||||
try:
|
||||
sync_proxy_rings(broker_url)
|
||||
sync_proxy_rings('http://%s/%s' % (broker, path))
|
||||
except subprocess.CalledProcessError:
|
||||
log("Ring builder sync failed, builders not yet available - "
|
||||
"leader not ready?", level=WARNING)
|
||||
return None
|
||||
|
||||
if builders_synced():
|
||||
log("Ring builders synced - balancing rings and starting "
|
||||
"proxy")
|
||||
|
||||
log("Ring builders synced - starting proxy", level=INFO)
|
||||
CONFIGS.write_all()
|
||||
service_start('swift-proxy')
|
||||
else:
|
||||
@ -360,9 +323,9 @@ def cluster_changed():
|
||||
@hooks.hook('ha-relation-changed')
|
||||
def ha_relation_changed():
|
||||
clustered = relation_get('clustered')
|
||||
if clustered and cluster.is_leader(SWIFT_HA_RES):
|
||||
log('Cluster configured, notifying other services and'
|
||||
'updating keystone endpoint configuration')
|
||||
if clustered and is_crm_leader(SWIFT_HA_RES):
|
||||
log("Cluster configured, notifying other services and updating "
|
||||
"keystone endpoint configuration", level=INFO)
|
||||
# Tell all related services to start using
|
||||
# the VIP instead
|
||||
for r_id in relation_ids('identity-service'):
|
||||
@ -377,17 +340,12 @@ def ha_relation_joined():
|
||||
corosync_mcastport = config('ha-mcastport')
|
||||
vip = config('vip')
|
||||
if not vip:
|
||||
log('Unable to configure hacluster as vip not provided',
|
||||
level=ERROR)
|
||||
log('Unable to configure hacluster as vip not provided', level=ERROR)
|
||||
sys.exit(1)
|
||||
|
||||
# Obtain resources
|
||||
resources = {
|
||||
'res_swift_haproxy': 'lsb:haproxy'
|
||||
}
|
||||
resource_params = {
|
||||
'res_swift_haproxy': 'op monitor interval="5s"'
|
||||
}
|
||||
resources = {'res_swift_haproxy': 'lsb:haproxy'}
|
||||
resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'}
|
||||
|
||||
vip_group = []
|
||||
for vip in vip.split():
|
||||
@ -414,12 +372,8 @@ def ha_relation_joined():
|
||||
if len(vip_group) >= 1:
|
||||
relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
|
||||
|
||||
init_services = {
|
||||
'res_swift_haproxy': 'haproxy'
|
||||
}
|
||||
clones = {
|
||||
'cl_swift_haproxy': 'res_swift_haproxy'
|
||||
}
|
||||
init_services = {'res_swift_haproxy': 'haproxy'}
|
||||
clones = {'cl_swift_haproxy': 'res_swift_haproxy'}
|
||||
|
||||
relation_set(init_services=init_services,
|
||||
corosync_bindiface=corosync_bindiface,
|
||||
@ -430,10 +384,9 @@ def ha_relation_joined():
|
||||
|
||||
|
||||
def configure_https():
|
||||
'''
|
||||
Enables SSL API Apache config if appropriate and kicks identity-service
|
||||
"""Enables SSL API Apache config if appropriate and kicks identity-service
|
||||
with any required api updates.
|
||||
'''
|
||||
"""
|
||||
# need to write all to ensure changes to the entire request pipeline
|
||||
# propagate (c-api, haprxy, apache)
|
||||
CONFIGS.write_all()
|
||||
@ -451,14 +404,17 @@ def configure_https():
|
||||
for rid in relation_ids('identity-service'):
|
||||
keystone_joined(relid=rid)
|
||||
|
||||
write_rc_script()
|
||||
env_vars = {'OPENSTACK_SERVICE_SWIFT': 'proxy-server',
|
||||
'OPENSTACK_PORT_API': config('bind-port'),
|
||||
'OPENSTACK_PORT_MEMCACHED': 11211}
|
||||
openstack.save_script_rc(**env_vars)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
hooks.execute(sys.argv)
|
||||
except UnregisteredHookError as e:
|
||||
log('Unknown hook {} - skipping.'.format(e))
|
||||
log('Unknown hook {} - skipping.'.format(e), level=DEBUG)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,14 +1,41 @@
|
||||
import os
|
||||
import pwd
|
||||
import shutil
|
||||
import subprocess
|
||||
import charmhelpers.contrib.openstack.utils as openstack
|
||||
import sys
|
||||
from collections import OrderedDict
|
||||
import uuid
|
||||
|
||||
from collections import OrderedDict
|
||||
from swift_context import (
|
||||
get_swift_hash,
|
||||
SwiftHashContext,
|
||||
SwiftIdentityContext,
|
||||
HAProxyContext,
|
||||
SwiftRingContext,
|
||||
ApacheSSLContext,
|
||||
MemcachedContext,
|
||||
)
|
||||
|
||||
import charmhelpers.contrib.openstack.context as context
|
||||
import charmhelpers.contrib.openstack.templating as templating
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
get_os_codename_package,
|
||||
get_os_codename_install_source,
|
||||
configure_installation_source
|
||||
)
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
is_elected_leader,
|
||||
is_clustered,
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
log, ERROR,
|
||||
log,
|
||||
DEBUG,
|
||||
INFO,
|
||||
WARNING,
|
||||
config,
|
||||
relation_get,
|
||||
unit_get,
|
||||
relation_set,
|
||||
relation_ids,
|
||||
)
|
||||
from charmhelpers.fetch import (
|
||||
apt_update,
|
||||
@ -16,14 +43,13 @@ from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
add_source
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release
|
||||
)
|
||||
|
||||
import charmhelpers.contrib.openstack.context as context
|
||||
import charmhelpers.contrib.openstack.templating as templating
|
||||
import swift_context
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
format_ipv6_addr,
|
||||
get_ipv6_addr,
|
||||
)
|
||||
|
||||
|
||||
# Various config files that are managed via templating.
|
||||
@ -70,59 +96,58 @@ BASE_PACKAGES = [
|
||||
FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3']
|
||||
|
||||
SWIFT_HA_RES = 'grp_swift_vips'
|
||||
|
||||
TEMPLATES = 'templates/'
|
||||
|
||||
# Map config files to hook contexts and services that will be associated
|
||||
# with file in restart_on_changes()'s service map.
|
||||
CONFIG_FILES = OrderedDict([
|
||||
(SWIFT_CONF, {
|
||||
'hook_contexts': [swift_context.SwiftHashContext()],
|
||||
'hook_contexts': [SwiftHashContext()],
|
||||
'services': ['swift-proxy'],
|
||||
}),
|
||||
(SWIFT_PROXY_CONF, {
|
||||
'hook_contexts': [swift_context.SwiftIdentityContext(),
|
||||
'hook_contexts': [SwiftIdentityContext(),
|
||||
context.BindHostContext()],
|
||||
'services': ['swift-proxy'],
|
||||
}),
|
||||
(HAPROXY_CONF, {
|
||||
'hook_contexts': [context.HAProxyContext(),
|
||||
swift_context.HAProxyContext()],
|
||||
HAProxyContext()],
|
||||
'services': ['haproxy'],
|
||||
}),
|
||||
(SWIFT_RINGS_CONF, {
|
||||
'hook_contexts': [swift_context.SwiftRingContext()],
|
||||
'hook_contexts': [SwiftRingContext()],
|
||||
'services': ['apache2'],
|
||||
}),
|
||||
(SWIFT_RINGS_24_CONF, {
|
||||
'hook_contexts': [swift_context.SwiftRingContext()],
|
||||
'hook_contexts': [SwiftRingContext()],
|
||||
'services': ['apache2'],
|
||||
}),
|
||||
(APACHE_SITE_CONF, {
|
||||
'hook_contexts': [swift_context.ApacheSSLContext()],
|
||||
'hook_contexts': [ApacheSSLContext()],
|
||||
'services': ['apache2'],
|
||||
}),
|
||||
(APACHE_SITE_24_CONF, {
|
||||
'hook_contexts': [swift_context.ApacheSSLContext()],
|
||||
'hook_contexts': [ApacheSSLContext()],
|
||||
'services': ['apache2'],
|
||||
}),
|
||||
(MEMCACHED_CONF, {
|
||||
'hook_contexts': [swift_context.MemcachedContext()],
|
||||
'hook_contexts': [MemcachedContext()],
|
||||
'services': ['memcached'],
|
||||
}),
|
||||
])
|
||||
|
||||
|
||||
def register_configs():
|
||||
"""
|
||||
Register config files with their respective contexts.
|
||||
Regstration of some configs may not be required depending on
|
||||
"""Register config files with their respective contexts.
|
||||
|
||||
Registration of some configs may not be required depending on
|
||||
existing of certain relations.
|
||||
"""
|
||||
# if called without anything installed (eg during install hook)
|
||||
# just default to earliest supported release. configs dont get touched
|
||||
# till post-install, anyway.
|
||||
release = openstack.get_os_codename_package('swift-proxy', fatal=False) \
|
||||
release = get_os_codename_package('swift-proxy', fatal=False) \
|
||||
or 'essex'
|
||||
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
|
||||
openstack_release=release)
|
||||
@ -149,13 +174,12 @@ def register_configs():
|
||||
|
||||
|
||||
def restart_map():
|
||||
'''
|
||||
Determine the correct resource map to be passed to
|
||||
"""Determine the correct resource map to be passed to
|
||||
charmhelpers.core.restart_on_change() based on the services configured.
|
||||
|
||||
:returns: dict: A dictionary mapping config file to lists of services
|
||||
:returns dict: A dictionary mapping config file to lists of services
|
||||
that should be restarted when file changes.
|
||||
'''
|
||||
"""
|
||||
_map = []
|
||||
for f, ctxt in CONFIG_FILES.iteritems():
|
||||
svcs = []
|
||||
@ -163,6 +187,7 @@ def restart_map():
|
||||
svcs.append(svc)
|
||||
if svcs:
|
||||
_map.append((f, svcs))
|
||||
|
||||
return OrderedDict(_map)
|
||||
|
||||
|
||||
@ -174,12 +199,13 @@ def swift_user(username='swift'):
|
||||
def ensure_swift_dir(conf_dir=os.path.dirname(SWIFT_CONF)):
|
||||
if not os.path.isdir(conf_dir):
|
||||
os.mkdir(conf_dir, 0o750)
|
||||
|
||||
uid, gid = swift_user()
|
||||
os.chown(conf_dir, uid, gid)
|
||||
|
||||
|
||||
def determine_packages(release):
|
||||
'''determine what packages are needed for a given OpenStack release'''
|
||||
"""Determine what packages are needed for a given OpenStack release."""
|
||||
if release == 'essex':
|
||||
return BASE_PACKAGES
|
||||
elif release == 'folsom':
|
||||
@ -190,13 +216,6 @@ def determine_packages(release):
|
||||
return FOLSOM_PACKAGES
|
||||
|
||||
|
||||
def write_rc_script():
|
||||
env_vars = {'OPENSTACK_SERVICE_SWIFT': 'proxy-server',
|
||||
'OPENSTACK_PORT_API': config('bind-port'),
|
||||
'OPENSTACK_PORT_MEMCACHED': 11211}
|
||||
openstack.save_script_rc(**env_vars)
|
||||
|
||||
|
||||
def _load_builder(path):
|
||||
# lifted straight from /usr/bin/swift-ring-builder
|
||||
from swift.common.ring import RingBuilder
|
||||
@ -213,6 +232,7 @@ def _load_builder(path):
|
||||
for dev in builder.devs:
|
||||
if dev and 'meta' not in dev:
|
||||
dev['meta'] = ''
|
||||
|
||||
return builder
|
||||
|
||||
|
||||
@ -222,14 +242,14 @@ def _write_ring(ring, ring_path):
|
||||
|
||||
|
||||
def ring_port(ring_path, node):
|
||||
'''determine correct port from relation settings for a given ring file.'''
|
||||
"""Determine correct port from relation settings for a given ring file."""
|
||||
for name in ['account', 'object', 'container']:
|
||||
if name in ring_path:
|
||||
return node[('%s_port' % name)]
|
||||
|
||||
|
||||
def initialize_ring(path, part_power, replicas, min_hours):
|
||||
'''Initialize a new swift ring with given parameters.'''
|
||||
"""Initialize a new swift ring with given parameters."""
|
||||
from swift.common.ring import RingBuilder
|
||||
ring = RingBuilder(part_power, replicas, min_hours)
|
||||
_write_ring(ring, path)
|
||||
@ -244,8 +264,7 @@ def exists_in_ring(ring_path, node):
|
||||
n = [(i, node[i]) for i in node if i in dev and i != 'zone']
|
||||
if sorted(d) == sorted(n):
|
||||
|
||||
msg = 'Node already exists in ring (%s).' % ring_path
|
||||
log(msg)
|
||||
log('Node already exists in ring (%s).' % ring_path, level=INFO)
|
||||
return True
|
||||
|
||||
return False
|
||||
@ -271,10 +290,8 @@ def add_to_ring(ring_path, node):
|
||||
}
|
||||
ring.add_dev(new_dev)
|
||||
_write_ring(ring, ring_path)
|
||||
msg = 'Added new device to ring %s: %s' %\
|
||||
(ring_path,
|
||||
[k for k in new_dev.iteritems()])
|
||||
log(msg)
|
||||
msg = 'Added new device to ring %s: %s' % (ring_path, new_dev)
|
||||
log(msg, level=INFO)
|
||||
|
||||
|
||||
def _get_zone(ring_builder):
|
||||
@ -302,9 +319,19 @@ def _get_zone(ring_builder):
|
||||
return sorted(zone_distrib, key=zone_distrib.get).pop(0)
|
||||
|
||||
|
||||
def get_min_part_hours(ring):
|
||||
builder = _load_builder(ring)
|
||||
return builder.min_part_hours
|
||||
|
||||
|
||||
def set_min_part_hours(path, min_part_hours):
|
||||
builder = _load_builder(path)
|
||||
builder.min_part_hours = min_part_hours
|
||||
_write_ring(builder, path)
|
||||
|
||||
|
||||
def get_zone(assignment_policy):
|
||||
''' Determine the appropriate zone depending on configured assignment
|
||||
policy.
|
||||
"""Determine the appropriate zone depending on configured assignment policy.
|
||||
|
||||
Manual assignment relies on each storage zone being deployed as a
|
||||
separate service unit with its desired zone set as a configuration
|
||||
@ -314,7 +341,7 @@ def get_zone(assignment_policy):
|
||||
of zones equal to the configured minimum replicas. This allows for a
|
||||
single swift-storage service unit, with each 'add-unit'd machine unit
|
||||
being assigned to a different zone.
|
||||
'''
|
||||
"""
|
||||
if assignment_policy == 'manual':
|
||||
return relation_get('zone')
|
||||
elif assignment_policy == 'auto':
|
||||
@ -324,13 +351,12 @@ def get_zone(assignment_policy):
|
||||
potential_zones.append(_get_zone(builder))
|
||||
return set(potential_zones).pop()
|
||||
else:
|
||||
log('Invalid zone assignment policy: %s' % assignment_policy,
|
||||
level=ERROR)
|
||||
sys.exit(1)
|
||||
msg = ('Invalid zone assignment policy: %s' % assignment_policy)
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def balance_ring(ring_path):
|
||||
'''balance a ring. return True if it needs redistribution'''
|
||||
"""Balance a ring. return True if it needs redistribution."""
|
||||
# shell out to swift-ring-builder instead, since the balancing code there
|
||||
# does a bunch of un-importable validation.'''
|
||||
cmd = ['swift-ring-builder', ring_path, 'rebalance']
|
||||
@ -340,21 +366,32 @@ def balance_ring(ring_path):
|
||||
if rc == 0:
|
||||
return True
|
||||
elif rc == 1:
|
||||
# swift-ring-builder returns 1 on WARNING (ring didn't require balance)
|
||||
# Ring builder exit-code=1 is supposed to indicate warning but I have
|
||||
# noticed that it can also return 1 with the following sort of message:
|
||||
#
|
||||
# NOTE: Balance of 166.67 indicates you should push this ring, wait
|
||||
# at least 0 hours, and rebalance/repush.
|
||||
#
|
||||
# This indicates that a balance has occurred and a resync would be
|
||||
# required so not sure why 1 is returned in this case.
|
||||
return False
|
||||
else:
|
||||
log('balance_ring: %s returned %s' % (cmd, rc), level=ERROR)
|
||||
sys.exit(1)
|
||||
msg = ('balance_ring: %s returned %s' % (cmd, rc))
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def should_balance(rings):
|
||||
'''Based on zones vs min. replicas, determine whether or not the rings
|
||||
should be balanced during initial configuration.'''
|
||||
"""Based on zones vs min. replicas, determine whether or not the rings
|
||||
should be balanced during initial configuration.
|
||||
"""
|
||||
for ring in rings:
|
||||
builder = _load_builder(ring).to_dict()
|
||||
replicas = builder['replicas']
|
||||
zones = [dev['zone'] for dev in builder['devs']]
|
||||
if len(set(zones)) < replicas:
|
||||
num_zones = len(set(zones))
|
||||
if num_zones < replicas:
|
||||
log("Not enough zones (%d) defined to allow rebalance "
|
||||
"(need >= %d)" % (num_zones, replicas), level=DEBUG)
|
||||
return False
|
||||
|
||||
return True
|
||||
@ -362,10 +399,10 @@ def should_balance(rings):
|
||||
|
||||
def do_openstack_upgrade(configs):
|
||||
new_src = config('openstack-origin')
|
||||
new_os_rel = openstack.get_os_codename_install_source(new_src)
|
||||
new_os_rel = get_os_codename_install_source(new_src)
|
||||
|
||||
log('Performing OpenStack upgrade to %s.' % (new_os_rel))
|
||||
openstack.configure_installation_source(new_src)
|
||||
log('Performing OpenStack upgrade to %s.' % (new_os_rel), level=DEBUG)
|
||||
configure_installation_source(new_src)
|
||||
dpkg_opts = [
|
||||
'--option', 'Dpkg::Options::=--force-confnew',
|
||||
'--option', 'Dpkg::Options::=--force-confdef',
|
||||
@ -390,3 +427,194 @@ def setup_ipv6():
|
||||
' main')
|
||||
apt_update()
|
||||
apt_install('haproxy/trusty-backports', fatal=True)
|
||||
|
||||
|
||||
def sync_proxy_rings(broker_url):
|
||||
"""The leader proxy is responsible for intialising, updating and
|
||||
rebalancing the ring. Once the leader is ready the rings must then be
|
||||
synced into each other proxy unit.
|
||||
|
||||
Note that we sync the ring builder and .gz files since the builder itself
|
||||
is linked to the underlying .gz ring.
|
||||
"""
|
||||
log('Fetching swift rings & builders from proxy @ %s.' % broker_url,
|
||||
level=DEBUG)
|
||||
target = '/etc/swift'
|
||||
for server in ['account', 'object', 'container']:
|
||||
url = '%s/%s.builder' % (broker_url, server)
|
||||
log('Fetching %s.' % url, level=DEBUG)
|
||||
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
|
||||
"%s/%s.builder" % (target, server)]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
url = '%s/%s.ring.gz' % (broker_url, server)
|
||||
log('Fetching %s.' % url, level=DEBUG)
|
||||
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
|
||||
'%s/%s.ring.gz' % (target, server)]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def balance_rings(force_sync=False):
|
||||
"""Rebalance each ring and notify peers that new rings are available."""
|
||||
if not is_elected_leader(SWIFT_HA_RES):
|
||||
log("Balance rings called by non-leader - skipping", level=WARNING)
|
||||
return
|
||||
|
||||
rebalanced = False
|
||||
for path in SWIFT_RINGS.itervalues():
|
||||
if balance_ring(path):
|
||||
log('Balanced ring %s' % path, level=DEBUG)
|
||||
rebalanced = True
|
||||
else:
|
||||
log('Ring %s not rebalanced' % path, level=DEBUG)
|
||||
|
||||
if not rebalanced and not force_sync:
|
||||
log("Rings unchanged by rebalance - skipping sync", level=INFO)
|
||||
return
|
||||
|
||||
www_dir = get_www_dir()
|
||||
for ring, builder_path in SWIFT_RINGS.iteritems():
|
||||
ringfile = '%s.ring.gz' % ring
|
||||
shutil.copyfile(os.path.join(SWIFT_CONF_DIR, ringfile),
|
||||
os.path.join(www_dir, ringfile))
|
||||
shutil.copyfile(builder_path,
|
||||
os.path.join(www_dir, os.path.basename(builder_path)))
|
||||
|
||||
notify_peers_builders_available()
|
||||
|
||||
|
||||
def mark_www_rings_deleted():
|
||||
"""Mark any rings from the apache server directory as deleted so that
|
||||
storage units won't see them.
|
||||
"""
|
||||
www_dir = get_www_dir()
|
||||
for ring, _ in SWIFT_RINGS.iteritems():
|
||||
path = os.path.join(www_dir, '%s.ring.gz' % ring)
|
||||
if os.path.exists(path):
|
||||
os.rename(path, "%s.deleted" % (path))
|
||||
|
||||
|
||||
def notify_peers_builders_available():
|
||||
"""Notify peer swift-proxy peer units that they should synchronise ring and
|
||||
builder files.
|
||||
|
||||
Note that this should only be called from the leader unit.
|
||||
"""
|
||||
if not is_elected_leader(SWIFT_HA_RES):
|
||||
log("Ring availability peer broadcast requested by non-leader - "
|
||||
"skipping", level=WARNING)
|
||||
return
|
||||
|
||||
if is_clustered():
|
||||
hostname = config('vip')
|
||||
else:
|
||||
hostname = get_hostaddr()
|
||||
|
||||
hostname = format_ipv6_addr(hostname) or hostname
|
||||
# Notify peers that builders are available
|
||||
log("Notifying peer(s) that rings are ready for sync.", level=INFO)
|
||||
trigger = str(uuid.uuid4())
|
||||
for rid in relation_ids('cluster'):
|
||||
log("Notifying rid=%s" % (rid), level=DEBUG)
|
||||
# NOTE(dosaboy): we add some random data to the relation settings
|
||||
# otherwise subsequent calls will not fire (since hostname is always
|
||||
# the same).
|
||||
relation_set(relation_id=rid,
|
||||
relation_settings={'trigger': trigger,
|
||||
'builder-broker': hostname,
|
||||
'disable-proxy-service': 0})
|
||||
|
||||
|
||||
def disable_peer_apis():
|
||||
"""Notify peer relations that they should disable their proxy services.
|
||||
|
||||
This should only be called by the leader unit. Once update has been
|
||||
"""
|
||||
if not is_elected_leader(SWIFT_HA_RES):
|
||||
# Only the leader can do this.
|
||||
return
|
||||
|
||||
log("Sending request to disable proxy service to all peers", level=INFO)
|
||||
rel_ids = relation_ids('cluster')
|
||||
trigger = str(uuid.uuid4())
|
||||
for rid in rel_ids:
|
||||
relation_set(relation_id=rid,
|
||||
relation_settings={'trigger': trigger,
|
||||
'disable-proxy-service': 1})
|
||||
|
||||
|
||||
def notify_storage_rings_available():
|
||||
"""Notify peer swift-storage relations that they should synchronise ring and
|
||||
builder files.
|
||||
|
||||
Note that this should only be called from the leader unit.
|
||||
"""
|
||||
if not is_elected_leader(SWIFT_HA_RES):
|
||||
log("Ring availability storage-relation broadcast requested by "
|
||||
"non-leader - skipping", level=WARNING)
|
||||
return
|
||||
|
||||
if is_clustered():
|
||||
hostname = config('vip')
|
||||
else:
|
||||
hostname = get_hostaddr()
|
||||
|
||||
hostname = format_ipv6_addr(hostname) or hostname
|
||||
path = os.path.basename(get_www_dir())
|
||||
rings_url = 'http://%s/%s' % (hostname, path)
|
||||
trigger = uuid.uuid4()
|
||||
# Notify storage nodes that there is a new ring to fetch.
|
||||
log("Notifying storage nodes that new ring is ready for sync.", level=INFO)
|
||||
for relid in relation_ids('swift-storage'):
|
||||
relation_set(relation_id=relid, swift_hash=get_swift_hash(),
|
||||
rings_url=rings_url, trigger=trigger)
|
||||
|
||||
|
||||
def builders_synced():
|
||||
"""Check that we have all the ring builders synced from the leader.
|
||||
|
||||
Returns True if we have all ring builders.
|
||||
"""
|
||||
for ring in SWIFT_RINGS.itervalues():
|
||||
if not os.path.exists(ring):
|
||||
log("Builder not yet synced - %s" % (ring), level=DEBUG)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def get_hostaddr():
|
||||
if config('prefer-ipv6'):
|
||||
return get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
|
||||
return unit_get('private-address')
|
||||
|
||||
|
||||
def update_min_part_hours():
|
||||
"""Update the min_part_hours setting on swift rings.
|
||||
|
||||
This should only be called by the leader unit. Once update has been
|
||||
performed and if setting has changed, rings will be resynced across the
|
||||
cluster.
|
||||
"""
|
||||
if is_elected_leader(SWIFT_HA_RES):
|
||||
# Only the leader can do this.
|
||||
return
|
||||
|
||||
new_min_part_hours = config('min-hours')
|
||||
resync_builders = False
|
||||
# Only update if all exist
|
||||
if all([os.path.exists(p) for r, p in SWIFT_RINGS.iteritems()]):
|
||||
for ring, path in SWIFT_RINGS.iteritems():
|
||||
min_part_hours = get_min_part_hours(path)
|
||||
if min_part_hours != new_min_part_hours:
|
||||
log("Setting ring %s min_part_hours to %s" %
|
||||
(new_min_part_hours), level=INFO)
|
||||
set_min_part_hours(path, new_min_part_hours)
|
||||
resync_builders = True
|
||||
|
||||
if resync_builders:
|
||||
if should_balance([r for r in SWIFT_RINGS.itervalues()]):
|
||||
balance_rings()
|
||||
notify_peers_builders_available()
|
||||
notify_storage_rings_available()
|
||||
|
Loading…
Reference in New Issue
Block a user