From c9c6735a7e8eccb9ecd6f6ad4910180ea60470a1 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sun, 12 Oct 2014 07:08:43 +0000 Subject: [PATCH 1/7] Add in unicast support --- config.yaml | 5 +++++ hooks/hooks.py | 26 ++++++++++++++++++++++++-- templates/corosync.conf | 16 +++++++++++++++- 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/config.yaml b/config.yaml index 48a84b7..ada8469 100644 --- a/config.yaml +++ b/config.yaml @@ -85,3 +85,8 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + corosync_transport: + type: string + default: "udp" + description: | + Two supported modes are udp (multicast) or udpu (unicast) diff --git a/hooks/hooks.py b/hooks/hooks.py index 691efee..2ad1065 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -29,6 +29,7 @@ from charmhelpers.core.hookenv import ( config, Hooks, UnregisteredHookError, local_unit, + unit_private_ip, ) from charmhelpers.core.host import ( @@ -48,6 +49,7 @@ from charmhelpers.fetch import ( ) from charmhelpers.contrib.hahelpers.cluster import ( + peer_ips, peer_units, oldest_peer ) @@ -78,6 +80,18 @@ def install(): shutil.copy('ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd') +def get_ha_nodes(): + ha_units = peer_ips(peer_relation='hanode') + ha_units[local_unit()] = unit_private_ip() + ha_nodes = {} + # Corosync nodeid 0 is reserved so increase all the nodeids to avoid it + off_set = 1000 + for unit in ha_units: + unit_no = off_set + int(unit.split('/')[1]) + ha_nodes[unit_no] = ha_units[unit] + return ha_nodes + + def get_corosync_conf(): if config('prefer-ipv6'): ip_version = 'ipv6' @@ -85,7 +99,6 @@ def get_corosync_conf(): else: ip_version = 'ipv4' bindnetaddr = hacluster.get_network_address - # NOTE(jamespage) use local charm configuration over any provided by # principle charm conf = { @@ -94,6 +107,8 @@ def get_corosync_conf(): 'corosync_mcastport': config('corosync_mcastport'), 'corosync_mcastaddr': config('corosync_mcastaddr'), 'ip_version': ip_version, + 'ha_nodes': get_ha_nodes(), + 'transport': config('corosync_transport'), } if None not in conf.itervalues(): return conf @@ -109,6 +124,8 @@ def get_corosync_conf(): unit, relid), 'corosync_mcastaddr': config('corosync_mcastaddr'), 'ip_version': ip_version, + 'ha_nodes': get_ha_nodes(), + 'transport': config('corosync_transport'), } if config('prefer-ipv6'): @@ -161,7 +178,12 @@ def config_changed(): log('CRITICAL', 'No Corosync key supplied, cannot proceed') sys.exit(1) - + supported_transports = ['udp', 'udpu'] + if config('corosync_transport') not in supported_transports: + raise ValueError('The corosync_transport type %s is not supported.' + 'Supported types are: %s' % + (config('corosync_transport'), + str(supported_transports))) hacluster.enable_lsb_services('pacemaker') if configure_corosync(): diff --git a/templates/corosync.conf b/templates/corosync.conf index e4c0de6..6d05301 100644 --- a/templates/corosync.conf +++ b/templates/corosync.conf @@ -1,5 +1,5 @@ # Config file generated by the ha charm. - +# udp.template totem { version: 2 @@ -47,9 +47,12 @@ totem { # The following values need to be set based on your environment ringnumber: 0 bindnetaddr: {{ corosync_bindnetaddr }} + {% if transport == "udp" %} mcastaddr: {{ corosync_mcastaddr }} + {% endif %} mcastport: {{ corosync_mcastport }} } + transport: {{ transport }} } quorum { @@ -59,6 +62,17 @@ quorum { expected_votes: 2 } +{% if transport == "udp" %} +nodelist { +{% for nodeid, ip in ha_nodes.iteritems() %} + node { + ring0_addr: {{ ip }} + nodeid: {{ nodeid }} + } +{% endfor %} +} +{% endif %} + logging { fileline: off to_stderr: yes From d329527fa15bd9d2257bff7f2e6a92d7497bfc9c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sun, 12 Oct 2014 08:12:10 +0000 Subject: [PATCH 2/7] bunch of fixes --- .bzrignore | 1 + Makefile | 13 +- charm-helpers.yaml | 1 + .../contrib/openstack/__init__.py | 0 hooks/charmhelpers/contrib/openstack/utils.py | 486 ++++++++++++++++++ hooks/hooks.py | 18 +- templates/corosync.conf | 4 +- 7 files changed, 512 insertions(+), 11 deletions(-) create mode 100644 .bzrignore create mode 100644 hooks/charmhelpers/contrib/openstack/__init__.py create mode 100644 hooks/charmhelpers/contrib/openstack/utils.py diff --git a/.bzrignore b/.bzrignore new file mode 100644 index 0000000..ba077a4 --- /dev/null +++ b/.bzrignore @@ -0,0 +1 @@ +bin diff --git a/Makefile b/Makefile index 52caa19..4c005b4 100644 --- a/Makefile +++ b/Makefile @@ -5,5 +5,14 @@ lint: @flake8 --exclude hooks/charmhelpers hooks @charm proof -sync: - @charm-helper-sync -c charm-helpers.yaml +test: + @echo Starting tests... + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + +bin/charm_helpers_sync.py: + @mkdir -p bin + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ + > bin/charm_helpers_sync.py + +sync: bin/charm_helpers_sync.py + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml diff --git a/charm-helpers.yaml b/charm-helpers.yaml index c3e93c9..4e7b234 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -7,3 +7,4 @@ include: - contrib.hahelpers - contrib.storage - contrib.network.ip + - contrib.openstack.utils diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 0000000..b0d1b03 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,486 @@ +#!/usr/bin/python + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict + +import subprocess +import json +import os +import socket +import sys + +from charmhelpers.core.hookenv import ( + config, + log as juju_log, + charm_dir, + ERROR, + INFO, + relation_ids, + relation_set +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr +) + +from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.fetch import apt_install, apt_cache +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), +]) + +# The ugly duckling +SWIFT_CODENAMES = OrderedDict([ + ('1.4.3', 'diablo'), + ('1.4.8', 'essex'), + ('1.7.4', 'folsom'), + ('1.8.0', 'grizzly'), + ('1.7.7', 'grizzly'), + ('1.7.6', 'grizzly'), + ('1.10.0', 'havana'), + ('1.9.1', 'havana'), + ('1.9.0', 'havana'), + ('1.13.1', 'icehouse'), + ('1.13.0', 'icehouse'), + ('1.12.0', 'icehouse'), + ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), + ('2.1.0', 'juno'), + ('2.2.0', 'juno'), +]) + +DEFAULT_LOOPBACK_SIZE = '5G' + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename): + '''Determine OpenStack version number from codename.''' + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + try: + if 'swift' in pkg.name: + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in vers_map.iteritems(): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + +def import_key(keyid): + cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel == 'distro-proposed': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(DISTRO_PROPOSED % ubuntu_rel) + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in env_vars.iteritems() if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + import apt_pkg as apt + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: ' + 'block_device=%s.' % block_device, level=ERROR) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev, + level=ERROR) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, basestring): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + return ns_query(hostname) + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install('python-dnspython') + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + keys = kwargs.keys() + for key in keys: + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) diff --git a/hooks/hooks.py b/hooks/hooks.py index 2ad1065..7af963e 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -54,6 +54,8 @@ from charmhelpers.contrib.hahelpers.cluster import ( oldest_peer ) +from charmhelpers.contrib.openstack.utils import get_host_ip + hooks = Hooks() COROSYNC_CONF = '/etc/corosync/corosync.conf' @@ -80,15 +82,19 @@ def install(): shutil.copy('ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd') +def get_corosync_id(unit_name): + # Corosync nodeid 0 is reserved so increase all the nodeids to avoid it + off_set = 1000 + return off_set + int(unit_name.split('/')[1]) + + def get_ha_nodes(): ha_units = peer_ips(peer_relation='hanode') ha_units[local_unit()] = unit_private_ip() ha_nodes = {} - # Corosync nodeid 0 is reserved so increase all the nodeids to avoid it - off_set = 1000 for unit in ha_units: - unit_no = off_set + int(unit.split('/')[1]) - ha_nodes[unit_no] = ha_units[unit] + corosync_id = get_corosync_id(unit) + ha_nodes[corosync_id] = get_host_ip(ha_units[unit]) return ha_nodes @@ -129,9 +135,7 @@ def get_corosync_conf(): } if config('prefer-ipv6'): - local_unit_no = int(local_unit().split('/')[1]) - # nodeid should not be 0 - conf['nodeid'] = local_unit_no + 1 + conf['nodeid'] = get_corosync_id(local_unit()) conf['netmtu'] = config('netmtu') if None not in conf.itervalues(): diff --git a/templates/corosync.conf b/templates/corosync.conf index 6d05301..6286d08 100644 --- a/templates/corosync.conf +++ b/templates/corosync.conf @@ -1,5 +1,5 @@ # Config file generated by the ha charm. -# udp.template + totem { version: 2 @@ -62,7 +62,7 @@ quorum { expected_votes: 2 } -{% if transport == "udp" %} +{% if transport == "udpu" %} nodelist { {% for nodeid, ip in ha_nodes.iteritems() %} node { From 09546b3d3184ec983db6cfdf02f53679570492d2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 13 Oct 2014 09:56:02 +0000 Subject: [PATCH 3/7] Add sleep to stop corosync becoming wedged during restarts --- hooks/hooks.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hooks/hooks.py b/hooks/hooks.py index 7af963e..4dddba8 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -35,7 +35,6 @@ from charmhelpers.core.hookenv import ( from charmhelpers.core.host import ( service_stop, service_start, - service_restart, service_running, write_file, mkdir, @@ -205,8 +204,11 @@ def upgrade_charm(): def restart_corosync(): if service_running("pacemaker"): service_stop("pacemaker") - service_restart("corosync") + service_stop("corosync") + # Corosync can become wedged if restarted too quickly. Need to + # replace sleep with some science time.sleep(5) + service_start("corosync") service_start("pacemaker") From 550670aa547a8b21cfb62e6635131fd292d841c0 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 19 Nov 2014 15:51:11 +0000 Subject: [PATCH 4/7] Update transport config options to be more user friendly and support backwards compatability --- config.yaml | 4 ++-- hooks/hooks.py | 25 +++++++++++++++++-------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/config.yaml b/config.yaml index ada8469..b00c2f5 100644 --- a/config.yaml +++ b/config.yaml @@ -87,6 +87,6 @@ options: your network interface. corosync_transport: type: string - default: "udp" + default: "multicast" description: | - Two supported modes are udp (multicast) or udpu (unicast) + Two supported modes are multicast (udp) or unicast (udpu) diff --git a/hooks/hooks.py b/hooks/hooks.py index 4dddba8..52bac1c 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -68,6 +68,7 @@ COROSYNC_CONF_FILES = [ ] PACKAGES = ['corosync', 'pacemaker', 'python-netaddr', 'ipmitool'] +SUPPORTED_TRANSPORTS = ['udp', 'udpu', 'multicast', 'unicast'] @hooks.hook() @@ -81,6 +82,20 @@ def install(): shutil.copy('ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd') +def get_transport(): + if config('corosync_transport') == 'multicast': + return 'udp' + elif config('corosync_transport') == 'unicast': + return 'udpu' + elif config('corosync_transport') in ['udp', 'udpu']: + return config('corosync_transport') + else: + raise ValueError('The corosync_transport type %s is not supported.' + 'Supported types are: %s' % + (config('corosync_transport'), + str(SUPPORTED_TRANSPORTS))) + + def get_corosync_id(unit_name): # Corosync nodeid 0 is reserved so increase all the nodeids to avoid it off_set = 1000 @@ -113,7 +128,7 @@ def get_corosync_conf(): 'corosync_mcastaddr': config('corosync_mcastaddr'), 'ip_version': ip_version, 'ha_nodes': get_ha_nodes(), - 'transport': config('corosync_transport'), + 'transport': get_transport(), } if None not in conf.itervalues(): return conf @@ -130,7 +145,7 @@ def get_corosync_conf(): 'corosync_mcastaddr': config('corosync_mcastaddr'), 'ip_version': ip_version, 'ha_nodes': get_ha_nodes(), - 'transport': config('corosync_transport'), + 'transport': get_transport(), } if config('prefer-ipv6'): @@ -181,12 +196,6 @@ def config_changed(): log('CRITICAL', 'No Corosync key supplied, cannot proceed') sys.exit(1) - supported_transports = ['udp', 'udpu'] - if config('corosync_transport') not in supported_transports: - raise ValueError('The corosync_transport type %s is not supported.' - 'Supported types are: %s' % - (config('corosync_transport'), - str(supported_transports))) hacluster.enable_lsb_services('pacemaker') if configure_corosync(): From 70bdee4a5f50553ddf001b6ca362f11f3529f293 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 4 Dec 2014 09:09:44 +0000 Subject: [PATCH 5/7] Replace spaces with tabs --- templates/corosync.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/corosync.conf b/templates/corosync.conf index 6286d08..292bed9 100644 --- a/templates/corosync.conf +++ b/templates/corosync.conf @@ -47,9 +47,9 @@ totem { # The following values need to be set based on your environment ringnumber: 0 bindnetaddr: {{ corosync_bindnetaddr }} - {% if transport == "udp" %} + {% if transport == "udp" %} mcastaddr: {{ corosync_mcastaddr }} - {% endif %} + {% endif %} mcastport: {{ corosync_mcastport }} } transport: {{ transport }} From 57fbb375224a3884bab360324a56af6baf8428e2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 4 Dec 2014 10:12:29 +0000 Subject: [PATCH 6/7] stop/sleep/start hack no longer seems to be needed on corosync restart --- hooks/hooks.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/hooks/hooks.py b/hooks/hooks.py index 52bac1c..8fda39a 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -33,8 +33,9 @@ from charmhelpers.core.hookenv import ( ) from charmhelpers.core.host import ( - service_stop, service_start, + service_stop, + service_restart, service_running, write_file, mkdir, @@ -213,11 +214,7 @@ def upgrade_charm(): def restart_corosync(): if service_running("pacemaker"): service_stop("pacemaker") - service_stop("corosync") - # Corosync can become wedged if restarted too quickly. Need to - # replace sleep with some science - time.sleep(5) - service_start("corosync") + service_restart("corosync") service_start("pacemaker") From 98920d1219e119cc049c130162a28d9821f4d8a3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 4 Dec 2014 10:15:18 +0000 Subject: [PATCH 7/7] Lint fix --- hooks/hooks.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/hooks/hooks.py b/hooks/hooks.py index 8fda39a..2971ab7 100755 --- a/hooks/hooks.py +++ b/hooks/hooks.py @@ -10,7 +10,6 @@ import ast import shutil import sys -import time import os from base64 import b64decode @@ -82,19 +81,17 @@ def install(): if not os.path.isfile('/usr/lib/ocf/resource.d/ceph/rbd'): shutil.copy('ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd') +_deprecated_transport_values = {"multicast": "udp", "unicast": "udpu"} + def get_transport(): - if config('corosync_transport') == 'multicast': - return 'udp' - elif config('corosync_transport') == 'unicast': - return 'udpu' - elif config('corosync_transport') in ['udp', 'udpu']: - return config('corosync_transport') - else: + oo = config('corosync_transport') + val = _deprecated_transport_values.get(oo, oo) + if val not in ['udp', 'udpu']: raise ValueError('The corosync_transport type %s is not supported.' 'Supported types are: %s' % - (config('corosync_transport'), - str(SUPPORTED_TRANSPORTS))) + (oo, str(SUPPORTED_TRANSPORTS))) + return val def get_corosync_id(unit_name):