From aaecef459670fc92e9f24baf86c8a4e1b754c825 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 5 Dec 2018 15:26:35 +0000 Subject: [PATCH] Resync charms.ceph and charmhelpers Resync the latest charms.ceph to avoid direct installation of Python 2 modules which are not available in the latest Ceph packaging which no longer provides Python 2 support. This commit also updates two action which still used Python to use Python 3 (inline with the rest of the charm). Change-Id: I8fc2a1aa17e48ef5dac9b9974c33b9620fcb7c70 --- actions/set_noout.py | 2 +- actions/unset_noout.py | 2 +- .../charmhelpers/contrib/charmsupport/nrpe.py | 23 ++-- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/ha/utils.py | 110 +++++++++++++----- hooks/charmhelpers/contrib/openstack/utils.py | 99 +++++++++++----- .../contrib/storage/linux/loopback.py | 6 +- lib/ceph/utils.py | 19 ++- 8 files changed, 193 insertions(+), 71 deletions(-) diff --git a/actions/set_noout.py b/actions/set_noout.py index 97aa3841..50c119d9 100755 --- a/actions/set_noout.py +++ b/actions/set_noout.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2017 Canonical Ltd # diff --git a/actions/unset_noout.py b/actions/unset_noout.py index 8ae9a393..142fa1e5 100755 --- a/actions/unset_noout.py +++ b/actions/unset_noout.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 # # Copyright 2017 Canonical Ltd # diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index e3d10c1c..10d86ac0 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -416,15 +416,20 @@ def copy_nrpe_checks(nrpe_files_dir=None): """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - default_nrpe_files_dir = os.path.join( - os.getenv('CHARM_DIR'), - 'hooks', - 'charmhelpers', - 'contrib', - 'openstack', - 'files') - if not nrpe_files_dir: - nrpe_files_dir = default_nrpe_files_dir + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 1c96752a..5b7e3cfb 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -168,7 +168,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): 'nrpe', 'openvswitch-odl', 'neutron-api-odl', 'odl-controller', 'cinder-backup', 'nexentaedge-data', 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', - 'cinder-nexentaedge', 'nexentaedge-mgmt'])) + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) if self.openstack: for svc in services: diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py index add8eb9a..cdf4b4c9 100644 --- a/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -23,6 +23,7 @@ Helpers for high availability. """ +import hashlib import json import re @@ -35,7 +36,6 @@ from charmhelpers.core.hookenv import ( config, status_set, DEBUG, - WARNING, ) from charmhelpers.core.host import ( @@ -124,13 +124,29 @@ def expect_ha(): return len(ha_related_units) > 0 or config('vip') or config('dns-ha') -def generate_ha_relation_data(service): +def generate_ha_relation_data(service, extra_settings=None): """ Generate relation data for ha relation Based on configuration options and unit interfaces, generate a json encoded dict of relation data items for the hacluster relation, providing configuration for DNS HA or VIP's + haproxy clone sets. + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data @returns dict: json encoded data for use with relation_set """ _haproxy_res = 'res_{}_haproxy'.format(service) @@ -149,6 +165,13 @@ def generate_ha_relation_data(service): }, } + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + if config('dns-ha'): update_hacluster_dns_ha(service, _relation_data) else: @@ -232,40 +255,75 @@ def update_hacluster_vip(service, relation_data): """ cluster_config = get_hacluster_config() vip_group = [] + vips_to_delete = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): - res_neutron_vip = 'ocf:heartbeat:IPv6addr' + res_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: - res_neutron_vip = 'ocf:heartbeat:IPaddr2' + res_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + + fallback_params = False + if iface is None: + iface = config('vip_iface') + fallback_params = True + if netmask is None: + netmask = config('vip_cidr') + fallback_params = True if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent vip_key = 'res_{}_{}_vip'.format(service, iface) - if vip_key in vip_group: - if vip not in relation_data['resource_params'][vip_key]: - vip_key = '{}_{}'.format(vip_key, vip_params) - else: - log("Resource '%s' (vip='%s') already exists in " - "vip group - skipping" % (vip_key, vip), WARNING) - continue + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback_params: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}"'.format(ip=vip_params, + vip=vip)) - relation_data['resources'][vip_key] = res_neutron_vip - relation_data['resource_params'][vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) vip_group.append(vip_key) + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + if len(vip_group) >= 1: - relation_data['groups'] = { - 'grp_{}_vips'.format(service): ' '.join(vip_group) - } + key = 'grp_{}_vips'.format(service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 29cad083..59312fcf 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -73,6 +73,8 @@ from charmhelpers.core.host import ( service_running, service_pause, service_resume, + service_stop, + service_start, restart_on_change_helper, ) from charmhelpers.fetch import ( @@ -299,7 +301,7 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel - if src in ['distro', 'distro-proposed']: + if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] except KeyError: @@ -1303,6 +1305,65 @@ def is_unit_paused_set(): return False +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + def pause_unit(assess_status_func, services=None, ports=None, charm_func=None): """Pause a unit by stopping the services and setting 'unit-paused' @@ -1333,20 +1394,10 @@ def pause_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - stopped = service_pause(service) - if not stopped: - messages.append("{} didn't stop cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) set_unit_paused() if assess_status_func: message = assess_status_func() @@ -1385,20 +1436,10 @@ def resume_unit(assess_status_func, services=None, ports=None, @returns None @raises Exception(message) on an error for action_fail(). """ - services = _extract_services_list_helper(services) - messages = [] - if services: - for service in services.keys(): - started = service_resume(service) - if not started: - messages.append("{} didn't start cleanly.".format(service)) - if charm_func: - try: - message = charm_func() - if message: - messages.append(message) - except Exception as e: - message.append(str(e)) + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) clear_unit_paused() if assess_status_func: message = assess_status_func() diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index 0dfdae52..82472ff1 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -36,8 +36,10 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] return loopbacks diff --git a/lib/ceph/utils.py b/lib/ceph/utils.py index 2ef48abe..c03005bb 100644 --- a/lib/ceph/utils.py +++ b/lib/ceph/utils.py @@ -80,8 +80,8 @@ LEADER = 'leader' PEON = 'peon' QUORUM = [LEADER, PEON] -PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', 'python-ceph', - 'radosgw', 'xfsprogs', 'python-pyudev', +PACKAGES = ['ceph', 'gdisk', 'btrfs-tools', + 'radosgw', 'xfsprogs', 'lvm2', 'parted'] CEPH_KEY_MANAGER = 'ceph' @@ -1876,6 +1876,14 @@ def osdize_dir(path, encrypt=False, bluestore=False): :param encrypt: bool. Should the OSD directory be encrypted at rest :returns: None """ + + db = kv() + osd_devices = db.get('osd-devices', []) + if path in osd_devices: + log('Device {} already processed by charm,' + ' skipping'.format(path)) + return + if os.path.exists(os.path.join(path, 'upstart')): log('Path {} is already configured as an OSD - bailing'.format(path)) return @@ -1906,6 +1914,13 @@ def osdize_dir(path, encrypt=False, bluestore=False): log("osdize dir cmd: {}".format(cmd)) subprocess.check_call(cmd) + # NOTE: Record processing of device only on success to ensure that + # the charm only tries to initialize a device of OSD usage + # once during its lifetime. + osd_devices.append(path) + db.set('osd-devices', osd_devices) + db.flush() + def filesystem_mounted(fs): return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0