[hopem,r=]

Adds support for changing the min_part_hours value on swift
rings post-install and resyncing rings across cluster on-demand.

Default min-hours is now 0 to allow any number of storage units
to join and enforce a rebalance and resync of rings across the
cluster.

Also refactored code and cleaned up style.
This commit is contained in:
Edward Hope-Morley 2014-12-01 18:37:56 +00:00
parent 6b2a7e7737
commit 56fe58cc6b
5 changed files with 497 additions and 298 deletions

View File

@ -30,9 +30,15 @@ options:
type: int type: int
description: Minimum replicas. description: Minimum replicas.
min-hours: min-hours:
default: 1 default: 0
type: int type: int
description: Minimum hours between balances description: |
This is the Swift ring builder min_part_hours parameter. This
setting represents the amount of time in hours that Swift will wait
between subsequent ring rebalances in order to avoid large IO loads as
data is rebalanced when new devices are added to the cluster. Once your
cluster has been built, you can set this to a higher value e.g. 1
(upstream default).
zone-assignment: zone-assignment:
default: "manual" default: "manual"
type: string type: string
@ -46,6 +52,14 @@ options:
zones before the storage ring will be initially balance. Deployment zones before the storage ring will be initially balance. Deployment
requirements differ based on the zone-assignment policy configured, see requirements differ based on the zone-assignment policy configured, see
this charm's README for details. this charm's README for details.
force-cluster-ring-sync:
default: False
type: boolean
description: |
There are some cases where one might want to resync rings and
builders across storage relations perhaps as a result of a manual
ring (e.g. rebalance) update so this toggle can be used to trigger a
resync across the entire cluster.
# User provided SSL cert and key # User provided SSL cert and key
ssl_cert: ssl_cert:
type: string type: string

View File

@ -0,0 +1 @@
swift_hooks.py

View File

@ -1,3 +1,6 @@
import os
import uuid
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
log, log,
@ -5,38 +8,35 @@ from charmhelpers.core.hookenv import (
related_units, related_units,
relation_get, relation_get,
unit_get, unit_get,
service_name service_name,
) )
from charmhelpers.contrib.openstack.context import ( from charmhelpers.contrib.openstack.context import (
OSContextGenerator, OSContextGenerator,
ApacheSSLContext as SSLContext, ApacheSSLContext as SSLContext,
context_complete, context_complete,
) )
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
determine_api_port, determine_api_port,
determine_apache_port, determine_apache_port,
) )
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_ipv6_addr get_ipv6_addr
) )
from charmhelpers.contrib.openstack.utils import get_host_ip from charmhelpers.contrib.openstack.utils import get_host_ip
import os
import uuid
SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'
WWW_DIR = '/var/www/swift-rings'
class HAProxyContext(OSContextGenerator): class HAProxyContext(OSContextGenerator):
interfaces = ['cluster'] interfaces = ['cluster']
def __call__(self): def __call__(self):
''' """Extends the main charmhelpers HAProxyContext with a port mapping
Extends the main charmhelpers HAProxyContext with a port mapping
specific to this charm. specific to this charm.
Also used to extend cinder.conf context with correct api_listening_port Also used to extend cinder.conf context with correct api_listening_port
''' """
haproxy_port = config('bind-port') haproxy_port = config('bind-port')
api_port = determine_apache_port(config('bind-port')) api_port = determine_apache_port(config('bind-port'))
@ -46,9 +46,6 @@ class HAProxyContext(OSContextGenerator):
return ctxt return ctxt
WWW_DIR = '/var/www/swift-rings'
class ApacheSSLContext(SSLContext): class ApacheSSLContext(SSLContext):
interfaces = ['https'] interfaces = ['https']
external_ports = [config('bind-port')] external_ports = [config('bind-port')]
@ -66,6 +63,7 @@ class SwiftRingContext(OSContextGenerator):
host_ip = get_ipv6_addr(exc_list=[config('vip')])[0] host_ip = get_ipv6_addr(exc_list=[config('vip')])[0]
else: else:
host_ip = get_host_ip(host) host_ip = get_host_ip(host)
allowed_hosts.append(host_ip) allowed_hosts.append(host_ip)
ctxt = { ctxt = {
@ -90,6 +88,7 @@ class SwiftIdentityContext(OSContextGenerator):
else: else:
proxy_ip = get_host_ip(unit_get('private-address')) proxy_ip = get_host_ip(unit_get('private-address'))
memcached_ip = get_host_ip(unit_get('private-address')) memcached_ip = get_host_ip(unit_get('private-address'))
ctxt = { ctxt = {
'proxy_ip': proxy_ip, 'proxy_ip': proxy_ip,
'memcached_ip': memcached_ip, 'memcached_ip': memcached_ip,
@ -147,6 +146,7 @@ class SwiftIdentityContext(OSContextGenerator):
} }
if context_complete(ks_auth): if context_complete(ks_auth):
ctxt.update(ks_auth) ctxt.update(ks_auth)
return ctxt return ctxt
@ -158,9 +158,8 @@ class MemcachedContext(OSContextGenerator):
ctxt['memcached_ip'] = 'ip6-localhost' ctxt['memcached_ip'] = 'ip6-localhost'
else: else:
ctxt['memcached_ip'] = get_host_ip(unit_get('private-address')) ctxt['memcached_ip'] = get_host_ip(unit_get('private-address'))
return ctxt
SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf' return ctxt
def get_swift_hash(): def get_swift_hash():
@ -176,6 +175,7 @@ def get_swift_hash():
service_name())) service_name()))
with open(SWIFT_HASH_FILE, 'w') as hashfile: with open(SWIFT_HASH_FILE, 'w') as hashfile:
hashfile.write(swift_hash) hashfile.write(swift_hash)
return swift_hash return swift_hash

View File

@ -2,71 +2,79 @@
import os import os
import sys import sys
import shutil
import uuid
import subprocess import subprocess
import uuid
import charmhelpers.contrib.openstack.utils as openstack
import charmhelpers.contrib.hahelpers.cluster as cluster
from swift_utils import ( from swift_utils import (
register_configs, register_configs,
restart_map, restart_map,
determine_packages, determine_packages,
ensure_swift_dir, ensure_swift_dir,
SWIFT_RINGS, get_www_dir, SWIFT_RINGS,
get_www_dir,
initialize_ring, initialize_ring,
swift_user, swift_user,
SWIFT_HA_RES, SWIFT_HA_RES,
balance_ring,
SWIFT_CONF_DIR,
get_zone, get_zone,
exists_in_ring, exists_in_ring,
add_to_ring, add_to_ring,
should_balance, should_balance,
do_openstack_upgrade, do_openstack_upgrade,
write_rc_script, setup_ipv6,
setup_ipv6 balance_rings,
builders_synced,
sync_proxy_rings,
update_min_part_hours,
notify_storage_rings_available,
notify_peers_builders_available,
mark_www_rings_deleted,
disable_peer_apis,
) )
from swift_context import get_swift_hash
import charmhelpers.contrib.openstack.utils as openstack
from charmhelpers.contrib.hahelpers.cluster import (
is_elected_leader,
is_crm_leader
)
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
unit_get, unit_get,
relation_set, relation_set,
relation_ids, relation_ids,
relation_get, relation_get,
related_units,
log, log,
DEBUG,
INFO, INFO,
WARNING, WARNING,
ERROR, ERROR,
Hooks, UnregisteredHookError, Hooks, UnregisteredHookError,
open_port open_port,
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
service_restart, service_restart,
service_stop, service_stop,
service_start, service_start,
restart_on_change restart_on_change,
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
apt_update apt_update,
) )
from charmhelpers.payload.execd import execd_preinstall from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.openstack.ip import (
canonical_url, canonical_url,
PUBLIC, INTERNAL, ADMIN PUBLIC,
INTERNAL,
ADMIN,
) )
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_iface_for_address, get_iface_for_address,
get_netmask_for_address, get_netmask_for_address,
get_address_in_network, get_address_in_network,
get_ipv6_addr, get_ipv6_addr,
format_ipv6_addr, is_ipv6,
is_ipv6
) )
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
extra_pkgs = [ extra_pkgs = [
@ -74,9 +82,7 @@ extra_pkgs = [
"python-jinja2" "python-jinja2"
] ]
hooks = Hooks() hooks = Hooks()
CONFIGS = register_configs() CONFIGS = register_configs()
@ -86,19 +92,19 @@ def install():
src = config('openstack-origin') src = config('openstack-origin')
if src != 'distro': if src != 'distro':
openstack.configure_installation_source(src) openstack.configure_installation_source(src)
apt_update(fatal=True) apt_update(fatal=True)
rel = openstack.get_os_codename_install_source(src) rel = openstack.get_os_codename_install_source(src)
pkgs = determine_packages(rel) pkgs = determine_packages(rel)
apt_install(pkgs, fatal=True) apt_install(pkgs, fatal=True)
apt_install(extra_pkgs, fatal=True) apt_install(extra_pkgs, fatal=True)
ensure_swift_dir() ensure_swift_dir()
if cluster.is_elected_leader(SWIFT_HA_RES): if is_elected_leader(SWIFT_HA_RES):
log("Leader established, generating ring builders") log("Leader established, generating ring builders", level=INFO)
# initialize new storage rings. # initialize new storage rings.
for ring in SWIFT_RINGS.iteritems(): for ring, path in SWIFT_RINGS.iteritems():
initialize_ring(ring[1], initialize_ring(path,
config('partition-power'), config('partition-power'),
config('replicas'), config('replicas'),
config('min-hours')) config('min-hours'))
@ -107,14 +113,39 @@ def install():
www_dir = get_www_dir() www_dir = get_www_dir()
if not os.path.isdir(www_dir): if not os.path.isdir(www_dir):
os.mkdir(www_dir, 0o755) os.mkdir(www_dir, 0o755)
uid, gid = swift_user() uid, gid = swift_user()
os.chown(www_dir, uid, gid) os.chown(www_dir, uid, gid)
@hooks.hook('config-changed')
@restart_on_change(restart_map())
def config_changed():
if config('prefer-ipv6'):
setup_ipv6()
configure_https()
open_port(config('bind-port'))
# Determine whether or not we should do an upgrade.
if openstack.openstack_upgrade_available('python-swift'):
do_openstack_upgrade(CONFIGS)
update_min_part_hours()
if config('force-cluster-ring-sync'):
log("Disabling peer proxy apis before syncing rings across cluster.")
disable_peer_apis()
for r_id in relation_ids('identity-service'):
keystone_joined(relid=r_id)
@hooks.hook('identity-service-relation-joined') @hooks.hook('identity-service-relation-joined')
def keystone_joined(relid=None): def keystone_joined(relid=None):
if not cluster.eligible_leader(SWIFT_HA_RES): if not is_elected_leader(SWIFT_HA_RES):
return return
port = config('bind-port') port = config('bind-port')
admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port) admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port)
internal_url = '%s:%s/v1/AUTH_$(tenant_id)s' % \ internal_url = '%s:%s/v1/AUTH_$(tenant_id)s' % \
@ -136,86 +167,34 @@ def keystone_changed():
configure_https() configure_https()
def get_hostaddr(): @hooks.hook('swift-storage-relation-joined')
if config('prefer-ipv6'): def storage_joined():
return get_ipv6_addr(exc_list=[config('vip')])[0] if not is_elected_leader(SWIFT_HA_RES):
log("New storage relation joined - stopping proxy until ring builder "
"synced", level=INFO)
service_stop('swift-proxy')
return unit_get('private-address') # Mark rings in the www directory as stale since this unit is no longer
# responsible distributing rings but may become responsible again at
# some time in the future so were do this to avoid storage nodes
def builders_synced(): # getting out-of-date rings.
for ring in SWIFT_RINGS.itervalues(): mark_www_rings_deleted()
if not os.path.exists(ring):
log("Builder not yet synced - %s" % (ring))
return False
return True
def balance_rings():
'''handle doing ring balancing and distribution.'''
if not cluster.eligible_leader(SWIFT_HA_RES):
log("Balance rings called by non-leader - skipping", level=WARNING)
return
new_ring = False
for ring in SWIFT_RINGS.itervalues():
if balance_ring(ring):
log('Balanced ring %s' % ring)
new_ring = True
if not new_ring:
log("Rings unchanged by rebalance - skipping sync", level=INFO)
return
www_dir = get_www_dir()
for ring, builder_path in SWIFT_RINGS.iteritems():
ringfile = '%s.ring.gz' % ring
shutil.copyfile(os.path.join(SWIFT_CONF_DIR, ringfile),
os.path.join(www_dir, ringfile))
shutil.copyfile(builder_path,
os.path.join(www_dir, os.path.basename(builder_path)))
if cluster.is_clustered():
hostname = config('vip')
else:
hostname = get_hostaddr()
hostname = format_ipv6_addr(hostname) or hostname
# Notify peers that builders are available
for rid in relation_ids('cluster'):
log("Notifying peer(s) that rings are ready for sync (rid='%s')" %
(rid))
relation_set(relation_id=rid,
relation_settings={'builder-broker': hostname})
log('Broadcasting notification to all storage nodes that new ring is '
'ready for consumption.')
path = os.path.basename(www_dir)
trigger = uuid.uuid4()
rings_url = 'http://%s/%s' % (hostname, path)
# notify storage nodes that there is a new ring to fetch.
for relid in relation_ids('swift-storage'):
relation_set(relation_id=relid, swift_hash=get_swift_hash(),
rings_url=rings_url, trigger=trigger)
service_restart('swift-proxy')
@hooks.hook('swift-storage-relation-changed') @hooks.hook('swift-storage-relation-changed')
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def storage_changed(): def storage_changed():
if not is_elected_leader(SWIFT_HA_RES):
log("Not the leader - ignoring storage relation until leader ready.",
level=DEBUG)
return
log("Leader established, updating ring builders", level=INFO)
if config('prefer-ipv6'): if config('prefer-ipv6'):
host_ip = '[%s]' % relation_get('private-address') host_ip = '[%s]' % relation_get('private-address')
else: else:
host_ip = openstack.get_host_ip(relation_get('private-address')) host_ip = openstack.get_host_ip(relation_get('private-address'))
if cluster.is_elected_leader(SWIFT_HA_RES):
log("Leader established, updating ring builders")
zone = get_zone(config('zone-assignment')) zone = get_zone(config('zone-assignment'))
node_settings = { node_settings = {
'ip': host_ip, 'ip': host_ip,
@ -226,7 +205,10 @@ def storage_changed():
} }
if None in node_settings.itervalues(): if None in node_settings.itervalues():
log('storage_changed: Relation not ready.') missing = [k for k, v in node_settings.iteritems()
if node_settings[k] is None]
log("Relation not ready - some required values not provided by "
"relation (missing=%s)" % (', '.join(missing)), level=INFO)
return None return None
for k in ['zone', 'account_port', 'object_port', 'container_port']: for k in ['zone', 'account_port', 'object_port', 'container_port']:
@ -234,7 +216,7 @@ def storage_changed():
CONFIGS.write_all() CONFIGS.write_all()
# allow for multiple devs per unit, passed along as a : separated list # Allow for multiple devs per unit, passed along as a : separated list
devs = relation_get('device').split(':') devs = relation_get('device').split(':')
for dev in devs: for dev in devs:
node_settings['device'] = dev node_settings['device'] = dev
@ -243,22 +225,16 @@ def storage_changed():
add_to_ring(ring, node_settings) add_to_ring(ring, node_settings)
if should_balance([r for r in SWIFT_RINGS.itervalues()]): if should_balance([r for r in SWIFT_RINGS.itervalues()]):
balance_rings() # NOTE(dosaboy): this may not change anything but we still sync rings
# in case a storage node needs re-syncing.
# Notify peers that builders are available balance_rings(force_sync=True)
for rid in relation_ids('cluster'): notify_storage_rings_available()
log("Notifying peer(s) that ring builder is ready (rid='%s')" % # Restart proxy here in case no config changes made (so
(rid)) # restart_on_change() ineffective).
relation_set(relation_id=rid, service_restart('swift-proxy')
relation_settings={'builder-broker':
get_hostaddr()})
else: else:
log("Not yet ready to balance rings - insufficient replicas?", log("Not yet ready to balance rings - insufficient replicas?",
level=INFO) level=INFO)
else:
log("New storage relation joined - stopping proxy until ring builder "
"synced")
service_stop('swift-proxy')
@hooks.hook('swift-storage-relation-broken') @hooks.hook('swift-storage-relation-broken')
@ -267,23 +243,6 @@ def storage_broken():
CONFIGS.write_all() CONFIGS.write_all()
@hooks.hook('config-changed')
@restart_on_change(restart_map())
def config_changed():
if config('prefer-ipv6'):
setup_ipv6()
configure_https()
open_port(config('bind-port'))
# Determine whether or not we should do an upgrade, based on the
# the version offered in keyston-release.
if (openstack.openstack_upgrade_available('python-swift')):
do_openstack_upgrade(CONFIGS)
for r_id in relation_ids('identity-service'):
keystone_joined(relid=r_id)
[cluster_joined(rid) for rid in relation_ids('cluster')]
@hooks.hook('cluster-relation-joined') @hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None): def cluster_joined(relation_id=None):
for addr_type in ADDRESS_TYPES: for addr_type in ADDRESS_TYPES:
@ -301,55 +260,59 @@ def cluster_joined(relation_id=None):
private_addr = unit_get('private-address') private_addr = unit_get('private-address')
def sync_proxy_rings(broker_url):
"""The leader proxy is responsible for intialising, updating and
rebalancing the ring. Once the leader is ready the rings must then be
synced into each other proxy unit.
Note that we sync the ring builder and .gz files since the builder itself
is linked to the underlying .gz ring.
"""
log('Fetching swift rings & builders from proxy @ %s.' % broker_url)
target = '/etc/swift'
for server in ['account', 'object', 'container']:
url = '%s/%s.builder' % (broker_url, server)
log('Fetching %s.' % url)
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
"%s/%s.builder" % (target, server)]
subprocess.check_call(cmd)
url = '%s/%s.ring.gz' % (broker_url, server)
log('Fetching %s.' % url)
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
'%s/%s.ring.gz' % (target, server)]
subprocess.check_call(cmd)
@hooks.hook('cluster-relation-changed', @hooks.hook('cluster-relation-changed',
'cluster-relation-departed') 'cluster-relation-departed')
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def cluster_changed(): def cluster_changed():
if is_elected_leader(SWIFT_HA_RES):
rel_ids = relation_ids('cluster')
disabled = []
units = 0
for rid in rel_ids:
for unit in related_units(rid):
units += 1
disabled.append(relation_get('disable-proxy-service', rid=rid))
disabled = [int(d) for d in disabled if d is not None]
if not any(disabled) and len(set(disabled)) == 1:
log("Syncing rings and builders across %s peer units" % (units),
level=DEBUG)
notify_peers_builders_available()
notify_storage_rings_available()
else:
log("Not all apis disabled - skipping sync until all peers ready "
"(got %s)" % (disabled), level=INFO)
CONFIGS.write_all() CONFIGS.write_all()
return
settings = relation_get()
if int(settings.get('disable-proxy-service', 0)):
log("Peer request to disable proxy api received", level=INFO)
service_stop('swift-proxy')
trigger = str(uuid.uuid4())
relation_set(relation_settings={'trigger': trigger,
'disable-proxy-service': 0})
return
# If not the leader, see if there are any builder files we can sync from # If not the leader, see if there are any builder files we can sync from
# the leader. # the leader.
if not cluster.is_elected_leader(SWIFT_HA_RES): log("Non-leader peer - checking if updated rings available", level=DEBUG)
settings = relation_get()
broker = settings.get('builder-broker', None) broker = settings.get('builder-broker', None)
if broker: if not broker:
log("No update available", level=DEBUG)
return
path = os.path.basename(get_www_dir()) path = os.path.basename(get_www_dir())
broker_url = 'http://%s/%s' % (broker, path)
try: try:
sync_proxy_rings(broker_url) sync_proxy_rings('http://%s/%s' % (broker, path))
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
log("Ring builder sync failed, builders not yet available - " log("Ring builder sync failed, builders not yet available - "
"leader not ready?", level=WARNING) "leader not ready?", level=WARNING)
return None return None
if builders_synced(): if builders_synced():
log("Ring builders synced - balancing rings and starting " log("Ring builders synced - starting proxy", level=INFO)
"proxy")
CONFIGS.write_all() CONFIGS.write_all()
service_start('swift-proxy') service_start('swift-proxy')
else: else:
@ -360,9 +323,9 @@ def cluster_changed():
@hooks.hook('ha-relation-changed') @hooks.hook('ha-relation-changed')
def ha_relation_changed(): def ha_relation_changed():
clustered = relation_get('clustered') clustered = relation_get('clustered')
if clustered and cluster.is_leader(SWIFT_HA_RES): if clustered and is_crm_leader(SWIFT_HA_RES):
log('Cluster configured, notifying other services and' log("Cluster configured, notifying other services and updating "
'updating keystone endpoint configuration') "keystone endpoint configuration", level=INFO)
# Tell all related services to start using # Tell all related services to start using
# the VIP instead # the VIP instead
for r_id in relation_ids('identity-service'): for r_id in relation_ids('identity-service'):
@ -377,17 +340,12 @@ def ha_relation_joined():
corosync_mcastport = config('ha-mcastport') corosync_mcastport = config('ha-mcastport')
vip = config('vip') vip = config('vip')
if not vip: if not vip:
log('Unable to configure hacluster as vip not provided', log('Unable to configure hacluster as vip not provided', level=ERROR)
level=ERROR)
sys.exit(1) sys.exit(1)
# Obtain resources # Obtain resources
resources = { resources = {'res_swift_haproxy': 'lsb:haproxy'}
'res_swift_haproxy': 'lsb:haproxy' resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'}
}
resource_params = {
'res_swift_haproxy': 'op monitor interval="5s"'
}
vip_group = [] vip_group = []
for vip in vip.split(): for vip in vip.split():
@ -414,12 +372,8 @@ def ha_relation_joined():
if len(vip_group) >= 1: if len(vip_group) >= 1:
relation_set(groups={'grp_swift_vips': ' '.join(vip_group)}) relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
init_services = { init_services = {'res_swift_haproxy': 'haproxy'}
'res_swift_haproxy': 'haproxy' clones = {'cl_swift_haproxy': 'res_swift_haproxy'}
}
clones = {
'cl_swift_haproxy': 'res_swift_haproxy'
}
relation_set(init_services=init_services, relation_set(init_services=init_services,
corosync_bindiface=corosync_bindiface, corosync_bindiface=corosync_bindiface,
@ -430,10 +384,9 @@ def ha_relation_joined():
def configure_https(): def configure_https():
''' """Enables SSL API Apache config if appropriate and kicks identity-service
Enables SSL API Apache config if appropriate and kicks identity-service
with any required api updates. with any required api updates.
''' """
# need to write all to ensure changes to the entire request pipeline # need to write all to ensure changes to the entire request pipeline
# propagate (c-api, haprxy, apache) # propagate (c-api, haprxy, apache)
CONFIGS.write_all() CONFIGS.write_all()
@ -451,14 +404,17 @@ def configure_https():
for rid in relation_ids('identity-service'): for rid in relation_ids('identity-service'):
keystone_joined(relid=rid) keystone_joined(relid=rid)
write_rc_script() env_vars = {'OPENSTACK_SERVICE_SWIFT': 'proxy-server',
'OPENSTACK_PORT_API': config('bind-port'),
'OPENSTACK_PORT_MEMCACHED': 11211}
openstack.save_script_rc(**env_vars)
def main(): def main():
try: try:
hooks.execute(sys.argv) hooks.execute(sys.argv)
except UnregisteredHookError as e: except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e)) log('Unknown hook {} - skipping.'.format(e), level=DEBUG)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,14 +1,41 @@
import os import os
import pwd import pwd
import shutil
import subprocess import subprocess
import charmhelpers.contrib.openstack.utils as openstack import uuid
import sys
from collections import OrderedDict
from collections import OrderedDict
from swift_context import (
get_swift_hash,
SwiftHashContext,
SwiftIdentityContext,
HAProxyContext,
SwiftRingContext,
ApacheSSLContext,
MemcachedContext,
)
import charmhelpers.contrib.openstack.context as context
import charmhelpers.contrib.openstack.templating as templating
from charmhelpers.contrib.openstack.utils import (
get_os_codename_package,
get_os_codename_install_source,
configure_installation_source
)
from charmhelpers.contrib.hahelpers.cluster import (
is_elected_leader,
is_clustered,
)
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, ERROR, log,
DEBUG,
INFO,
WARNING,
config, config,
relation_get, relation_get,
unit_get,
relation_set,
relation_ids,
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_update, apt_update,
@ -16,14 +43,13 @@ from charmhelpers.fetch import (
apt_install, apt_install,
add_source add_source
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
lsb_release lsb_release
) )
from charmhelpers.contrib.network.ip import (
import charmhelpers.contrib.openstack.context as context format_ipv6_addr,
import charmhelpers.contrib.openstack.templating as templating get_ipv6_addr,
import swift_context )
# Various config files that are managed via templating. # Various config files that are managed via templating.
@ -70,59 +96,58 @@ BASE_PACKAGES = [
FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3'] FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3']
SWIFT_HA_RES = 'grp_swift_vips' SWIFT_HA_RES = 'grp_swift_vips'
TEMPLATES = 'templates/' TEMPLATES = 'templates/'
# Map config files to hook contexts and services that will be associated # Map config files to hook contexts and services that will be associated
# with file in restart_on_changes()'s service map. # with file in restart_on_changes()'s service map.
CONFIG_FILES = OrderedDict([ CONFIG_FILES = OrderedDict([
(SWIFT_CONF, { (SWIFT_CONF, {
'hook_contexts': [swift_context.SwiftHashContext()], 'hook_contexts': [SwiftHashContext()],
'services': ['swift-proxy'], 'services': ['swift-proxy'],
}), }),
(SWIFT_PROXY_CONF, { (SWIFT_PROXY_CONF, {
'hook_contexts': [swift_context.SwiftIdentityContext(), 'hook_contexts': [SwiftIdentityContext(),
context.BindHostContext()], context.BindHostContext()],
'services': ['swift-proxy'], 'services': ['swift-proxy'],
}), }),
(HAPROXY_CONF, { (HAPROXY_CONF, {
'hook_contexts': [context.HAProxyContext(), 'hook_contexts': [context.HAProxyContext(),
swift_context.HAProxyContext()], HAProxyContext()],
'services': ['haproxy'], 'services': ['haproxy'],
}), }),
(SWIFT_RINGS_CONF, { (SWIFT_RINGS_CONF, {
'hook_contexts': [swift_context.SwiftRingContext()], 'hook_contexts': [SwiftRingContext()],
'services': ['apache2'], 'services': ['apache2'],
}), }),
(SWIFT_RINGS_24_CONF, { (SWIFT_RINGS_24_CONF, {
'hook_contexts': [swift_context.SwiftRingContext()], 'hook_contexts': [SwiftRingContext()],
'services': ['apache2'], 'services': ['apache2'],
}), }),
(APACHE_SITE_CONF, { (APACHE_SITE_CONF, {
'hook_contexts': [swift_context.ApacheSSLContext()], 'hook_contexts': [ApacheSSLContext()],
'services': ['apache2'], 'services': ['apache2'],
}), }),
(APACHE_SITE_24_CONF, { (APACHE_SITE_24_CONF, {
'hook_contexts': [swift_context.ApacheSSLContext()], 'hook_contexts': [ApacheSSLContext()],
'services': ['apache2'], 'services': ['apache2'],
}), }),
(MEMCACHED_CONF, { (MEMCACHED_CONF, {
'hook_contexts': [swift_context.MemcachedContext()], 'hook_contexts': [MemcachedContext()],
'services': ['memcached'], 'services': ['memcached'],
}), }),
]) ])
def register_configs(): def register_configs():
""" """Register config files with their respective contexts.
Register config files with their respective contexts.
Regstration of some configs may not be required depending on Registration of some configs may not be required depending on
existing of certain relations. existing of certain relations.
""" """
# if called without anything installed (eg during install hook) # if called without anything installed (eg during install hook)
# just default to earliest supported release. configs dont get touched # just default to earliest supported release. configs dont get touched
# till post-install, anyway. # till post-install, anyway.
release = openstack.get_os_codename_package('swift-proxy', fatal=False) \ release = get_os_codename_package('swift-proxy', fatal=False) \
or 'essex' or 'essex'
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release) openstack_release=release)
@ -149,13 +174,12 @@ def register_configs():
def restart_map(): def restart_map():
''' """Determine the correct resource map to be passed to
Determine the correct resource map to be passed to
charmhelpers.core.restart_on_change() based on the services configured. charmhelpers.core.restart_on_change() based on the services configured.
:returns: dict: A dictionary mapping config file to lists of services :returns dict: A dictionary mapping config file to lists of services
that should be restarted when file changes. that should be restarted when file changes.
''' """
_map = [] _map = []
for f, ctxt in CONFIG_FILES.iteritems(): for f, ctxt in CONFIG_FILES.iteritems():
svcs = [] svcs = []
@ -163,6 +187,7 @@ def restart_map():
svcs.append(svc) svcs.append(svc)
if svcs: if svcs:
_map.append((f, svcs)) _map.append((f, svcs))
return OrderedDict(_map) return OrderedDict(_map)
@ -174,12 +199,13 @@ def swift_user(username='swift'):
def ensure_swift_dir(conf_dir=os.path.dirname(SWIFT_CONF)): def ensure_swift_dir(conf_dir=os.path.dirname(SWIFT_CONF)):
if not os.path.isdir(conf_dir): if not os.path.isdir(conf_dir):
os.mkdir(conf_dir, 0o750) os.mkdir(conf_dir, 0o750)
uid, gid = swift_user() uid, gid = swift_user()
os.chown(conf_dir, uid, gid) os.chown(conf_dir, uid, gid)
def determine_packages(release): def determine_packages(release):
'''determine what packages are needed for a given OpenStack release''' """Determine what packages are needed for a given OpenStack release."""
if release == 'essex': if release == 'essex':
return BASE_PACKAGES return BASE_PACKAGES
elif release == 'folsom': elif release == 'folsom':
@ -190,13 +216,6 @@ def determine_packages(release):
return FOLSOM_PACKAGES return FOLSOM_PACKAGES
def write_rc_script():
env_vars = {'OPENSTACK_SERVICE_SWIFT': 'proxy-server',
'OPENSTACK_PORT_API': config('bind-port'),
'OPENSTACK_PORT_MEMCACHED': 11211}
openstack.save_script_rc(**env_vars)
def _load_builder(path): def _load_builder(path):
# lifted straight from /usr/bin/swift-ring-builder # lifted straight from /usr/bin/swift-ring-builder
from swift.common.ring import RingBuilder from swift.common.ring import RingBuilder
@ -213,6 +232,7 @@ def _load_builder(path):
for dev in builder.devs: for dev in builder.devs:
if dev and 'meta' not in dev: if dev and 'meta' not in dev:
dev['meta'] = '' dev['meta'] = ''
return builder return builder
@ -222,14 +242,14 @@ def _write_ring(ring, ring_path):
def ring_port(ring_path, node): def ring_port(ring_path, node):
'''determine correct port from relation settings for a given ring file.''' """Determine correct port from relation settings for a given ring file."""
for name in ['account', 'object', 'container']: for name in ['account', 'object', 'container']:
if name in ring_path: if name in ring_path:
return node[('%s_port' % name)] return node[('%s_port' % name)]
def initialize_ring(path, part_power, replicas, min_hours): def initialize_ring(path, part_power, replicas, min_hours):
'''Initialize a new swift ring with given parameters.''' """Initialize a new swift ring with given parameters."""
from swift.common.ring import RingBuilder from swift.common.ring import RingBuilder
ring = RingBuilder(part_power, replicas, min_hours) ring = RingBuilder(part_power, replicas, min_hours)
_write_ring(ring, path) _write_ring(ring, path)
@ -244,8 +264,7 @@ def exists_in_ring(ring_path, node):
n = [(i, node[i]) for i in node if i in dev and i != 'zone'] n = [(i, node[i]) for i in node if i in dev and i != 'zone']
if sorted(d) == sorted(n): if sorted(d) == sorted(n):
msg = 'Node already exists in ring (%s).' % ring_path log('Node already exists in ring (%s).' % ring_path, level=INFO)
log(msg)
return True return True
return False return False
@ -271,10 +290,8 @@ def add_to_ring(ring_path, node):
} }
ring.add_dev(new_dev) ring.add_dev(new_dev)
_write_ring(ring, ring_path) _write_ring(ring, ring_path)
msg = 'Added new device to ring %s: %s' %\ msg = 'Added new device to ring %s: %s' % (ring_path, new_dev)
(ring_path, log(msg, level=INFO)
[k for k in new_dev.iteritems()])
log(msg)
def _get_zone(ring_builder): def _get_zone(ring_builder):
@ -302,9 +319,19 @@ def _get_zone(ring_builder):
return sorted(zone_distrib, key=zone_distrib.get).pop(0) return sorted(zone_distrib, key=zone_distrib.get).pop(0)
def get_min_part_hours(ring):
builder = _load_builder(ring)
return builder.min_part_hours
def set_min_part_hours(path, min_part_hours):
builder = _load_builder(path)
builder.min_part_hours = min_part_hours
_write_ring(builder, path)
def get_zone(assignment_policy): def get_zone(assignment_policy):
''' Determine the appropriate zone depending on configured assignment """Determine the appropriate zone depending on configured assignment policy.
policy.
Manual assignment relies on each storage zone being deployed as a Manual assignment relies on each storage zone being deployed as a
separate service unit with its desired zone set as a configuration separate service unit with its desired zone set as a configuration
@ -314,7 +341,7 @@ def get_zone(assignment_policy):
of zones equal to the configured minimum replicas. This allows for a of zones equal to the configured minimum replicas. This allows for a
single swift-storage service unit, with each 'add-unit'd machine unit single swift-storage service unit, with each 'add-unit'd machine unit
being assigned to a different zone. being assigned to a different zone.
''' """
if assignment_policy == 'manual': if assignment_policy == 'manual':
return relation_get('zone') return relation_get('zone')
elif assignment_policy == 'auto': elif assignment_policy == 'auto':
@ -324,13 +351,12 @@ def get_zone(assignment_policy):
potential_zones.append(_get_zone(builder)) potential_zones.append(_get_zone(builder))
return set(potential_zones).pop() return set(potential_zones).pop()
else: else:
log('Invalid zone assignment policy: %s' % assignment_policy, msg = ('Invalid zone assignment policy: %s' % assignment_policy)
level=ERROR) raise Exception(msg)
sys.exit(1)
def balance_ring(ring_path): def balance_ring(ring_path):
'''balance a ring. return True if it needs redistribution''' """Balance a ring. return True if it needs redistribution."""
# shell out to swift-ring-builder instead, since the balancing code there # shell out to swift-ring-builder instead, since the balancing code there
# does a bunch of un-importable validation.''' # does a bunch of un-importable validation.'''
cmd = ['swift-ring-builder', ring_path, 'rebalance'] cmd = ['swift-ring-builder', ring_path, 'rebalance']
@ -340,21 +366,32 @@ def balance_ring(ring_path):
if rc == 0: if rc == 0:
return True return True
elif rc == 1: elif rc == 1:
# swift-ring-builder returns 1 on WARNING (ring didn't require balance) # Ring builder exit-code=1 is supposed to indicate warning but I have
# noticed that it can also return 1 with the following sort of message:
#
# NOTE: Balance of 166.67 indicates you should push this ring, wait
# at least 0 hours, and rebalance/repush.
#
# This indicates that a balance has occurred and a resync would be
# required so not sure why 1 is returned in this case.
return False return False
else: else:
log('balance_ring: %s returned %s' % (cmd, rc), level=ERROR) msg = ('balance_ring: %s returned %s' % (cmd, rc))
sys.exit(1) raise Exception(msg)
def should_balance(rings): def should_balance(rings):
'''Based on zones vs min. replicas, determine whether or not the rings """Based on zones vs min. replicas, determine whether or not the rings
should be balanced during initial configuration.''' should be balanced during initial configuration.
"""
for ring in rings: for ring in rings:
builder = _load_builder(ring).to_dict() builder = _load_builder(ring).to_dict()
replicas = builder['replicas'] replicas = builder['replicas']
zones = [dev['zone'] for dev in builder['devs']] zones = [dev['zone'] for dev in builder['devs']]
if len(set(zones)) < replicas: num_zones = len(set(zones))
if num_zones < replicas:
log("Not enough zones (%d) defined to allow rebalance "
"(need >= %d)" % (num_zones, replicas), level=DEBUG)
return False return False
return True return True
@ -362,10 +399,10 @@ def should_balance(rings):
def do_openstack_upgrade(configs): def do_openstack_upgrade(configs):
new_src = config('openstack-origin') new_src = config('openstack-origin')
new_os_rel = openstack.get_os_codename_install_source(new_src) new_os_rel = get_os_codename_install_source(new_src)
log('Performing OpenStack upgrade to %s.' % (new_os_rel)) log('Performing OpenStack upgrade to %s.' % (new_os_rel), level=DEBUG)
openstack.configure_installation_source(new_src) configure_installation_source(new_src)
dpkg_opts = [ dpkg_opts = [
'--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef', '--option', 'Dpkg::Options::=--force-confdef',
@ -390,3 +427,194 @@ def setup_ipv6():
' main') ' main')
apt_update() apt_update()
apt_install('haproxy/trusty-backports', fatal=True) apt_install('haproxy/trusty-backports', fatal=True)
def sync_proxy_rings(broker_url):
"""The leader proxy is responsible for intialising, updating and
rebalancing the ring. Once the leader is ready the rings must then be
synced into each other proxy unit.
Note that we sync the ring builder and .gz files since the builder itself
is linked to the underlying .gz ring.
"""
log('Fetching swift rings & builders from proxy @ %s.' % broker_url,
level=DEBUG)
target = '/etc/swift'
for server in ['account', 'object', 'container']:
url = '%s/%s.builder' % (broker_url, server)
log('Fetching %s.' % url, level=DEBUG)
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
"%s/%s.builder" % (target, server)]
subprocess.check_call(cmd)
url = '%s/%s.ring.gz' % (broker_url, server)
log('Fetching %s.' % url, level=DEBUG)
cmd = ['wget', url, '--retry-connrefused', '-t', '10', '-O',
'%s/%s.ring.gz' % (target, server)]
subprocess.check_call(cmd)
def balance_rings(force_sync=False):
"""Rebalance each ring and notify peers that new rings are available."""
if not is_elected_leader(SWIFT_HA_RES):
log("Balance rings called by non-leader - skipping", level=WARNING)
return
rebalanced = False
for path in SWIFT_RINGS.itervalues():
if balance_ring(path):
log('Balanced ring %s' % path, level=DEBUG)
rebalanced = True
else:
log('Ring %s not rebalanced' % path, level=DEBUG)
if not rebalanced and not force_sync:
log("Rings unchanged by rebalance - skipping sync", level=INFO)
return
www_dir = get_www_dir()
for ring, builder_path in SWIFT_RINGS.iteritems():
ringfile = '%s.ring.gz' % ring
shutil.copyfile(os.path.join(SWIFT_CONF_DIR, ringfile),
os.path.join(www_dir, ringfile))
shutil.copyfile(builder_path,
os.path.join(www_dir, os.path.basename(builder_path)))
notify_peers_builders_available()
def mark_www_rings_deleted():
"""Mark any rings from the apache server directory as deleted so that
storage units won't see them.
"""
www_dir = get_www_dir()
for ring, _ in SWIFT_RINGS.iteritems():
path = os.path.join(www_dir, '%s.ring.gz' % ring)
if os.path.exists(path):
os.rename(path, "%s.deleted" % (path))
def notify_peers_builders_available():
"""Notify peer swift-proxy peer units that they should synchronise ring and
builder files.
Note that this should only be called from the leader unit.
"""
if not is_elected_leader(SWIFT_HA_RES):
log("Ring availability peer broadcast requested by non-leader - "
"skipping", level=WARNING)
return
if is_clustered():
hostname = config('vip')
else:
hostname = get_hostaddr()
hostname = format_ipv6_addr(hostname) or hostname
# Notify peers that builders are available
log("Notifying peer(s) that rings are ready for sync.", level=INFO)
trigger = str(uuid.uuid4())
for rid in relation_ids('cluster'):
log("Notifying rid=%s" % (rid), level=DEBUG)
# NOTE(dosaboy): we add some random data to the relation settings
# otherwise subsequent calls will not fire (since hostname is always
# the same).
relation_set(relation_id=rid,
relation_settings={'trigger': trigger,
'builder-broker': hostname,
'disable-proxy-service': 0})
def disable_peer_apis():
"""Notify peer relations that they should disable their proxy services.
This should only be called by the leader unit. Once update has been
"""
if not is_elected_leader(SWIFT_HA_RES):
# Only the leader can do this.
return
log("Sending request to disable proxy service to all peers", level=INFO)
rel_ids = relation_ids('cluster')
trigger = str(uuid.uuid4())
for rid in rel_ids:
relation_set(relation_id=rid,
relation_settings={'trigger': trigger,
'disable-proxy-service': 1})
def notify_storage_rings_available():
"""Notify peer swift-storage relations that they should synchronise ring and
builder files.
Note that this should only be called from the leader unit.
"""
if not is_elected_leader(SWIFT_HA_RES):
log("Ring availability storage-relation broadcast requested by "
"non-leader - skipping", level=WARNING)
return
if is_clustered():
hostname = config('vip')
else:
hostname = get_hostaddr()
hostname = format_ipv6_addr(hostname) or hostname
path = os.path.basename(get_www_dir())
rings_url = 'http://%s/%s' % (hostname, path)
trigger = uuid.uuid4()
# Notify storage nodes that there is a new ring to fetch.
log("Notifying storage nodes that new ring is ready for sync.", level=INFO)
for relid in relation_ids('swift-storage'):
relation_set(relation_id=relid, swift_hash=get_swift_hash(),
rings_url=rings_url, trigger=trigger)
def builders_synced():
"""Check that we have all the ring builders synced from the leader.
Returns True if we have all ring builders.
"""
for ring in SWIFT_RINGS.itervalues():
if not os.path.exists(ring):
log("Builder not yet synced - %s" % (ring), level=DEBUG)
return False
return True
def get_hostaddr():
if config('prefer-ipv6'):
return get_ipv6_addr(exc_list=[config('vip')])[0]
return unit_get('private-address')
def update_min_part_hours():
"""Update the min_part_hours setting on swift rings.
This should only be called by the leader unit. Once update has been
performed and if setting has changed, rings will be resynced across the
cluster.
"""
if is_elected_leader(SWIFT_HA_RES):
# Only the leader can do this.
return
new_min_part_hours = config('min-hours')
resync_builders = False
# Only update if all exist
if all([os.path.exists(p) for r, p in SWIFT_RINGS.iteritems()]):
for ring, path in SWIFT_RINGS.iteritems():
min_part_hours = get_min_part_hours(path)
if min_part_hours != new_min_part_hours:
log("Setting ring %s min_part_hours to %s" %
(new_min_part_hours), level=INFO)
set_min_part_hours(path, new_min_part_hours)
resync_builders = True
if resync_builders:
if should_balance([r for r in SWIFT_RINGS.itervalues()]):
balance_rings()
notify_peers_builders_available()
notify_storage_rings_available()