diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 80d574dc..455fe60d 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -285,7 +285,7 @@ class NRPE(object): try: nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_gid = grp.getgrnam('nagios').gr_gid - except: + except Exception: log("Nagios user not set up, nrpe checks not updated") return diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index e02350e0..4207e42c 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -27,6 +27,7 @@ clustering-related helpers. import subprocess import os +import time from socket import gethostname as get_unit_hostname @@ -45,6 +46,9 @@ from charmhelpers.core.hookenv import ( is_leader as juju_is_leader, status_set, ) +from charmhelpers.core.host import ( + modulo_distribution, +) from charmhelpers.core.decorators import ( retry_on_exception, ) @@ -361,3 +365,29 @@ def canonical_url(configs, vip_setting='vip'): else: addr = unit_get('private-address') return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') + if wait is None: + wait = config_get('known-wait') + calculated_wait = modulo_distribution(modulo=modulo, wait=wait) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) diff --git a/hooks/charmhelpers/contrib/hardening/audits/apache.py b/hooks/charmhelpers/contrib/hardening/audits/apache.py index d812948a..d32bf44e 100644 --- a/hooks/charmhelpers/contrib/hardening/audits/apache.py +++ b/hooks/charmhelpers/contrib/hardening/audits/apache.py @@ -70,12 +70,12 @@ class DisabledModuleAudit(BaseAudit): """Returns the modules which are enabled in Apache.""" output = subprocess.check_output(['apache2ctl', '-M']) modules = [] - for line in output.strip().split(): + for line in output.splitlines(): # Each line of the enabled module output looks like: # module_name (static|shared) # Plus a header line at the top of the output which is stripped # out by the regex. - matcher = re.search(r'^ (\S*)', line) + matcher = re.search(r'^ (\S*)_module (\S*)', line) if matcher: modules.append(matcher.group(1)) return modules diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index d7e6debf..a871ce37 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -490,7 +490,7 @@ def get_host_ip(hostname, fallback=None): if not ip_addr: try: ip_addr = socket.gethostbyname(hostname) - except: + except Exception: log("Failed to resolve hostname '%s'" % (hostname), level=WARNING) return fallback @@ -518,7 +518,7 @@ def get_hostname(address, fqdn=True): if not result: try: result = socket.gethostbyaddr(address)[0] - except: + except Exception: return None else: result = address diff --git a/hooks/charmhelpers/contrib/openstack/alternatives.py b/hooks/charmhelpers/contrib/openstack/alternatives.py index 1501641e..547de09c 100644 --- a/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50): target, name, source, str(priority) ] subprocess.check_call(cmd) + + +def remove_alternative(name, source): + """Remove an installed alternative configuration file + + :param name: string name of the alternative to remove + :param source: string full path to alternative to remove + """ + cmd = [ + 'update-alternatives', '--remove', + name, source + ] + subprocess.check_call(cmd) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 5c041d2c..aacab1f5 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -303,20 +303,27 @@ class OpenStackAmuletDeployment(AmuletDeployment): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - pools = [ - 'rbd', - 'cinder', - 'glance' - ] - else: + if self._get_openstack_release() <= self.trusty_juno: # Juno or earlier pools = [ 'data', 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', + 'glance' + ] + elif (self.trust_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata + pools = [ + 'rbd', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', 'glance' ] diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index c8edbf65..4fda829f 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -23,6 +23,7 @@ import urllib import urlparse import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client @@ -351,12 +352,15 @@ class OpenStackAmuletUtils(AmuletUtils): self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): + password, tenant, api_version=2): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. keystone_ip = keystone_sentry.info['public-address'] ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](username, password, tenant, ept) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -617,7 +621,7 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair - except: + except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index f67f3265..86f2dac4 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import glob import json import math @@ -578,11 +579,14 @@ class HAProxyContext(OSContextGenerator): laddr = get_address_in_network(config(cfg_opt)) if laddr: netmask = get_netmask_for_address(laddr) - cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, - netmask), - 'backends': {l_unit: laddr}} + cluster_hosts[laddr] = { + 'network': "{}/{}".format(laddr, + netmask), + 'backends': collections.OrderedDict([(l_unit, + laddr)]) + } for rid in relation_ids('cluster'): - for unit in related_units(rid): + for unit in sorted(related_units(rid)): _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: @@ -594,10 +598,13 @@ class HAProxyContext(OSContextGenerator): # match in the frontend cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), - 'backends': {l_unit: addr}} + cluster_hosts[addr] = { + 'network': "{}/{}".format(addr, netmask), + 'backends': collections.OrderedDict([(l_unit, + addr)]) + } for rid in relation_ids('cluster'): - for unit in related_units(rid): + for unit in sorted(related_units(rid)): _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: @@ -628,6 +635,8 @@ class HAProxyContext(OSContextGenerator): ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' + ctxt['ipv6_enabled'] = not is_ipv6_disabled() + ctxt['stat_port'] = '8888' db = kv() @@ -802,8 +811,9 @@ class ApacheSSLContext(OSContextGenerator): else: # Expect cert/key provided in config (currently assumed that ca # uses ip for cn) - cn = resolve_address(endpoint_type=INTERNAL) - self.configure_cert(cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in addresses: @@ -1176,7 +1186,7 @@ class SubordinateConfigContext(OSContextGenerator): if sub_config and sub_config != '': try: sub_config = json.loads(sub_config) - except: + except Exception: log('Could not parse JSON from ' 'subordinate_configuration setting from %s' % rid, level=ERROR) diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh index 0df07176..7aab129a 100755 --- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh +++ b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -9,7 +9,7 @@ CRITICAL=0 NOTACTIVE='' LOGFILE=/var/log/nagios/check_haproxy.log -AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}') +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $4}') typeset -i N_INSTANCES=0 for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py index 254a90e7..9a4d79c1 100644 --- a/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -82,15 +82,18 @@ def update_dns_ha_resource_params(resources, resource_params, continue m = re.search('os-(.+?)-hostname', setting) if m: - networkspace = m.group(1) + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' else: msg = ('Unexpected DNS hostname setting: {}. ' - 'Cannot determine network space name' + 'Cannot determine endpoint_type name' ''.format(setting)) status_set('blocked', msg) raise DNSHAException(msg) - hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace) + hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type) if hostname_key in hostname_group: log('DNS HA: Resource {}: {} already exists in ' 'hostname group - skipping'.format(hostname_key, hostname), @@ -101,7 +104,7 @@ def update_dns_ha_resource_params(resources, resource_params, resources[hostname_key] = crm_ocf resource_params[hostname_key] = ( 'params fqdn="{}" ip_address="{}" ' - ''.format(hostname, resolve_address(endpoint_type=networkspace, + ''.format(hostname, resolve_address(endpoint_type=endpoint_type, override=False))) if len(hostname_group) >= 1: diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 37fa0eb0..0f847f56 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -59,18 +59,13 @@ def determine_dkms_package(): def quantum_plugins(): - from charmhelpers.contrib.openstack import context return { 'ovs': { 'config': '/etc/quantum/plugins/openvswitch/' 'ovs_quantum_plugin.ini', 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' 'OVSQuantumPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], + 'contexts': [], 'services': ['quantum-plugin-openvswitch-agent'], 'packages': [determine_dkms_package(), ['quantum-plugin-openvswitch-agent']], @@ -82,11 +77,7 @@ def quantum_plugins(): 'config': '/etc/quantum/plugins/nicira/nvp.ini', 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'QuantumPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['quantum-server', @@ -100,7 +91,6 @@ NEUTRON_CONF_DIR = '/etc/neutron' def neutron_plugins(): - from charmhelpers.contrib.openstack import context release = os_release('nova-common') plugins = { 'ovs': { @@ -108,11 +98,7 @@ def neutron_plugins(): 'ovs_neutron_plugin.ini', 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' 'OVSNeutronPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': ['neutron-plugin-openvswitch-agent'], 'packages': [determine_dkms_package(), ['neutron-plugin-openvswitch-agent']], @@ -124,11 +110,7 @@ def neutron_plugins(): 'config': '/etc/neutron/plugins/nicira/nvp.ini', 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'NeutronPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', @@ -138,11 +120,7 @@ def neutron_plugins(): 'nsx': { 'config': '/etc/neutron/plugins/vmware/nsx.ini', 'driver': 'vmware', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', @@ -152,11 +130,7 @@ def neutron_plugins(): 'n1kv': { 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [determine_dkms_package(), ['neutron-plugin-cisco']], @@ -167,11 +141,7 @@ def neutron_plugins(): 'Calico': { 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': ['calico-felix', 'bird', 'neutron-dhcp-agent', @@ -189,11 +159,7 @@ def neutron_plugins(): 'vsp': { 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], @@ -203,10 +169,7 @@ def neutron_plugins(): 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' '.plumgrid_plugin.NeutronPluginPLUMgridV2'), - 'contexts': [ - context.SharedDBContext(user=config('database-user'), - database=config('database'), - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': ['plumgrid-lxc', 'iovisor-dkms'], @@ -217,11 +180,7 @@ def neutron_plugins(): 'midonet': { 'config': '/etc/neutron/plugins/midonet/midonet.ini', 'driver': 'midonet.neutron.plugin.MidonetPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], + 'contexts': [], 'services': [], 'packages': [determine_dkms_package()], 'server_packages': ['neutron-server', diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 2e660450..ebc8a68a 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -48,7 +48,9 @@ listen stats {% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} + {% if ipv6_enabled -%} bind :::{{ ports[0] }} + {% endif -%} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache new file mode 100644 index 00000000..e056a32a --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache @@ -0,0 +1,6 @@ +[cache] +{% if memcache_url %} +enabled = true +backend = oslo_cache.memcache_pool +memcache_servers = {{ memcache_url }} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index d8c1fc7f..77490e4d 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -272,6 +272,8 @@ class OSConfigRenderer(object): raise OSConfigException _out = self.render(config_file) + if six.PY3: + _out = _out.encode('UTF-8') with open(config_file, 'wb') as out: out.write(_out) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 837a1674..cb61af38 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -95,7 +95,7 @@ from charmhelpers.fetch import ( from charmhelpers.fetch.snap import ( snap_install, snap_refresh, - SNAP_CHANNELS, + valid_snap_channel, ) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk @@ -426,7 +426,7 @@ def get_os_codename_package(package, fatal=True): try: pkg = cache[package] - except: + except Exception: if not fatal: return None # the package is unknown to the current apt cache. @@ -579,6 +579,9 @@ def configure_installation_source(source_plus_key): Note that the behaviour on error is to log the error to the juju log and then call sys.exit(1). """ + if source_plus_key.startswith('snap'): + # Do nothing for snap installs + return # extract the key if there is one, denoted by a '|' in the rel source, key = get_source_and_pgp_key(source_plus_key) @@ -794,7 +797,7 @@ def git_default_repos(projects_yaml): service = service_name() core_project = service - for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): + for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES): if projects_yaml == default: # add the requirements repo first @@ -1615,7 +1618,7 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs): upgrade_callback(configs=configs) action_set({'outcome': 'success, upgrade completed.'}) ret = True - except: + except Exception: action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'traceback': traceback.format_exc()}) action_fail('do_openstack_upgrade resulted in an ' @@ -1720,7 +1723,7 @@ def is_unit_paused_set(): kv = t[0] # transform something truth-y into a Boolean. return not(not(kv.get('unit-paused'))) - except: + except Exception: return False @@ -2048,7 +2051,7 @@ def update_json_file(filename, items): def snap_install_requested(): """ Determine if installing from snaps - If openstack-origin is of the form snap:channel-series-release + If openstack-origin is of the form snap:track/channel[/branch] and channel is in SNAPS_CHANNELS return True. """ origin = config('openstack-origin') or "" @@ -2056,10 +2059,12 @@ def snap_install_requested(): return False _src = origin[5:] - channel, series, release = _src.split('-') - if channel.lower() in SNAP_CHANNELS: - return True - return False + if '/' in _src: + channel = _src.split('/')[1] + else: + # Handle snap:track with no channel + channel = 'stable' + return valid_snap_channel(channel) def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @@ -2067,7 +2072,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): @param snaps: List of snaps @param src: String of openstack-origin or source of the form - snap:channel-series-track + snap:track/channel @param mode: String classic, devmode or jailmode @returns: Dictionary of snaps with channels and modes """ @@ -2077,8 +2082,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'): return {} _src = src[5:] - _channel, _series, _release = _src.split('-') - channel = '--channel={}/{}'.format(_release, _channel) + channel = '--channel={}'.format(_src) return {snap: {'channel': channel, 'mode': mode} for snap in snaps} @@ -2090,8 +2094,8 @@ def install_os_snaps(snaps, refresh=False): @param snaps: Dictionary of snaps with channels and modes of the form: {'snap_name': {'channel': 'snap_channel', 'mode': 'snap_mode'}} - Where channel a snapstore channel and mode is --classic, --devmode or - --jailmode. + Where channel is a snapstore channel and mode is --classic, --devmode + or --jailmode. @param post_snap_install: Callback function to run after snaps have been installed """ diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index e5a01b1b..39231612 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -370,9 +370,10 @@ def get_mon_map(service): Also raises CalledProcessError if our ceph command fails """ try: - mon_status = check_output( - ['ceph', '--id', service, - 'mon_status', '--format=json']) + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') try: return json.loads(mon_status) except ValueError as v: @@ -457,7 +458,7 @@ def monitor_key_get(service, key): try: output = check_output( ['ceph', '--id', service, - 'config-key', 'get', str(key)]) + 'config-key', 'get', str(key)]).decode('UTF-8') return output except CalledProcessError as e: log("Monitor config-key get failed with message: {}".format( @@ -500,6 +501,8 @@ def get_erasure_profile(service, name): out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -686,7 +689,10 @@ def get_cache_mode(service, pool_name): """ validator(value=service, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types) - out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) + out = check_output(['ceph', '--id', service, + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -700,8 +706,9 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, - 'lspools']).decode('UTF-8') + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -714,9 +721,12 @@ def get_osds(service): """ version = ceph_version() if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('UTF-8')) + out = check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) return None @@ -734,7 +744,9 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -859,7 +871,9 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -1018,7 +1032,9 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') + output = check_output(cmd) + if six.PY3: + output = output.decode('UTF-8') output = output.split() if len(output) > 3: return output[2] diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 4719f53c..7f2a0604 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -74,10 +74,10 @@ def list_lvm_volume_group(block_device): ''' vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() - for l in pvd: - l = l.decode('UTF-8') - if l.strip().startswith('VG Name'): - vg = ' '.join(l.strip().split()[2:]) + for lvm in pvd: + lvm = lvm.decode('UTF-8') + if lvm.strip().startswith('VG Name'): + vg = ' '.join(lvm.strip().split()[2:]) return vg diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index 3dc0df68..c9428894 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -64,6 +64,6 @@ def is_device_mounted(device): ''' try: out = check_output(['lsblk', '-P', device]).decode('UTF-8') - except: + except Exception: return False return bool(re.search(r'MOUNTPOINT=".+"', out)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 12f37b28..70085a40 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -218,6 +218,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +513,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +644,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +685,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1106,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -34,7 +34,7 @@ import six from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/hooks/charmhelpers/core/unitdata.py +++ b/hooks/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ class Storage(object): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py index 112a54c3..395836c7 100644 --- a/hooks/charmhelpers/fetch/snap.py +++ b/hooks/charmhelpers/fetch/snap.py @@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception): pass +class InvalidSnapChannel(Exception): + pass + + def _snap_exec(commands): """ Execute snap commands. @@ -132,3 +136,15 @@ def snap_refresh(packages, *flags): log(message, level='INFO') return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py index 40e1cb5b..910e96a6 100644 --- a/hooks/charmhelpers/fetch/ubuntu.py +++ b/hooks/charmhelpers/fetch/ubuntu.py @@ -572,7 +572,7 @@ def get_upstream_version(package): cache = apt_cache() try: pkg = cache[package] - except: + except Exception: # the package is unknown to the current apt cache. return None diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 6214172a..8bdd0679 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -65,11 +65,20 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): """ this_service = {'name': 'cinder'} other_services = [ - {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, + {'name': 'percona-cluster'}, {'name': 'rabbitmq-server'}, {'name': 'keystone'}, {'name': 'glance'} ] + + if self._get_openstack_release() >= self.xenial_pike: + # Pike and later, `openstack volume list` expects a compute + # endpoint in the catalog. + other_services.extend([ + {'name': 'nova-compute'}, + {'name': 'nova-cloud-controller'}, + ]) + super(CinderBasicDeployment, self)._add_services(this_service, other_services) @@ -85,6 +94,23 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): 'glance:shared-db': 'percona-cluster:shared-db', 'glance:amqp': 'rabbitmq-server:amqp' } + + if self._get_openstack_release() >= self.xenial_pike: + # Pike and later, `openstack volume list` expects a compute + # endpoint in the catalog. + relations.update({ + 'nova-compute:image-service': 'glance:image-service', + 'nova-compute:shared-db': 'percona-cluster:shared-db', + 'nova-compute:amqp': 'rabbitmq-server:amqp', + 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db', + 'nova-cloud-controller:identity-service': 'keystone:' + 'identity-service', + 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', + 'nova-cloud-controller:cloud-compute': 'nova-compute:' + 'cloud-compute', + 'nova-cloud-controller:image-service': 'glance:image-service', + }) + super(CinderBasicDeployment, self)._add_relations(relations) def _configure_services(self): @@ -127,10 +153,8 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): 'admin-token': 'ubuntutesting' } pxc_config = { - 'dataset-size': '25%', + 'innodb-buffer-pool-size': '256M', 'max-connections': 1000, - 'root-password': 'ChangeMe123', - 'sst-password': 'ChangeMe123', } configs = { 'cinder': cinder_config, @@ -159,10 +183,15 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): tenant='admin') # Authenticate admin with cinder endpoint + if self._get_openstack_release() >= self.xenial_pike: + api_version = 2 + else: + api_version = 1 self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, username='admin', password='openstack', - tenant='admin') + tenant='admin', + api_version=api_version) # Authenticate admin with glance endpoint self.glance = u.authenticate_glance_admin(self.keystone) @@ -297,7 +326,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): return None - def test_100_services(self): + def HOLDtest_100_services(self): """Verify that the expected services are running on the cinder unit.""" services = { @@ -310,38 +339,47 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): if self._get_openstack_release() < self.xenial_ocata: services[self.cinder_sentry].append('cinder-api') - def test_110_memcache(self): + def HOLDtest_110_memcache(self): u.validate_memcache(self.cinder_sentry, '/etc/cinder/cinder.conf', self._get_openstack_release(), earliest_release=self.trusty_mitaka) - def test_110_users(self): + def HOLDtest_110_users(self): """Verify expected users.""" u.log.debug('Checking keystone users...') - user0 = {'name': 'cinder_cinderv2', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost'} - user1 = {'name': 'admin', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost'} - user2 = {'name': 'glance', - 'enabled': True, - 'tenantId': u.not_null, - 'id': u.not_null, - 'email': 'juju@localhost'} - expected = [user0, user1, user2] + if self._get_openstack_release() < self.xenial_pike: + expected = [{ + 'name': 'cinder_cinderv2', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost', + }] + else: + expected = [{ + 'name': 'cinderv3_cinderv2', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost', + }] + + expected.append({ + 'name': 'admin', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost', + }) + actual = self.keystone.users.list() ret = u.validate_user_data(expected, actual) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_112_service_catalog(self): + def HOLDtest_112_service_catalog(self): """Verify that the service catalog endpoint data""" u.log.debug('Checking keystone service catalog...') endpoint_vol = {'adminURL': u.valid_url, @@ -356,16 +394,23 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): endpoint_vol['id'] = u.not_null endpoint_id['id'] = u.not_null - expected = {'image': [endpoint_id], - 'identity': [endpoint_id], - 'volume': [endpoint_id]} + if self._get_openstack_release() >= self.xenial_pike: + # Pike and later + expected = {'image': [endpoint_id], + 'identity': [endpoint_id], + 'volumev2': [endpoint_id]} + else: + # Ocata and prior + expected = {'image': [endpoint_id], + 'identity': [endpoint_id], + 'volume': [endpoint_id]} actual = self.keystone.service_catalog.get_endpoints() ret = u.validate_svc_catalog_endpoint_data(expected, actual) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_114_cinder_endpoint(self): + def HOLDtest_114_cinder_endpoint(self): """Verify the cinder endpoint data.""" u.log.debug('Checking cinder endpoint...') endpoints = self.keystone.endpoints.list() @@ -383,7 +428,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): amulet.raise_status(amulet.FAIL, msg='cinder endpoint: {}'.format(ret)) - def test_202_cinder_glance_image_service_relation(self): + def HOLDtest_202_cinder_glance_image_service_relation(self): """Verify the cinder:glance image-service relation data""" u.log.debug('Checking cinder:glance image-service relation data...') unit = self.cinder_sentry @@ -394,7 +439,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): msg = u.relation_error('cinder image-service', ret) amulet.raise_status(amulet.FAIL, msg=msg) - def test_203_glance_cinder_image_service_relation(self): + def HOLDtest_203_glance_cinder_image_service_relation(self): """Verify the glance:cinder image-service relation data""" u.log.debug('Checking glance:cinder image-service relation data...') unit = self.glance_sentry @@ -408,7 +453,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): msg = u.relation_error('glance image-service', ret) amulet.raise_status(amulet.FAIL, msg=msg) - def test_204_mysql_cinder_db_relation(self): + def HOLDtest_204_mysql_cinder_db_relation(self): """Verify the mysql:glance shared-db relation data""" u.log.debug('Checking mysql:cinder db relation data...') unit = self.pxc_sentry @@ -422,7 +467,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): msg = u.relation_error('mysql shared-db', ret) amulet.raise_status(amulet.FAIL, msg=msg) - def test_205_cinder_mysql_db_relation(self): + def HOLDtest_205_cinder_mysql_db_relation(self): """Verify the cinder:mysql shared-db relation data""" u.log.debug('Checking cinder:mysql db relation data...') unit = self.cinder_sentry @@ -438,7 +483,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): msg = u.relation_error('cinder shared-db', ret) amulet.raise_status(amulet.FAIL, msg=msg) - def test_206_keystone_cinder_id_relation(self): + def HOLDtest_206_keystone_cinder_id_relation(self): """Verify the keystone:cinder identity-service relation data""" u.log.debug('Checking keystone:cinder id relation data...') unit = self.keystone_sentry @@ -454,27 +499,29 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): 'auth_protocol': 'http', 'private-address': u.valid_ip, 'auth_host': u.valid_ip, - 'service_username': 'cinder_cinderv2', 'service_tenant_id': u.not_null, 'service_host': u.valid_ip } + + if self._get_openstack_release() < self.xenial_pike: + # Ocata and earlier + expected['service_username'] = 'cinder_cinderv2' + else: + # Pike and later + expected['service_username'] = 'cinderv3_cinderv2' + ret = u.validate_relation_data(unit, relation, expected) if ret: msg = u.relation_error('identity-service cinder', ret) amulet.raise_status(amulet.FAIL, msg=msg) - def test_207_cinder_keystone_id_relation(self): + def HOLDtest_207_cinder_keystone_id_relation(self): """Verify the cinder:keystone identity-service relation data""" u.log.debug('Checking cinder:keystone id relation data...') unit = self.cinder_sentry relation = ['identity-service', 'keystone:identity-service'] expected = { - 'cinder_service': 'cinder', - 'cinder_region': 'RegionOne', - 'cinder_public_url': u.valid_url, - 'cinder_internal_url': u.valid_url, - 'cinder_admin_url': u.valid_url, 'private-address': u.valid_ip } ret = u.validate_relation_data(unit, relation, expected) @@ -482,7 +529,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): msg = u.relation_error('cinder identity-service', ret) amulet.raise_status(amulet.FAIL, msg=msg) - def test_208_rabbitmq_cinder_amqp_relation(self): + def HOLDtest_208_rabbitmq_cinder_amqp_relation(self): """Verify the rabbitmq-server:cinder amqp relation data""" u.log.debug('Checking rmq:cinder amqp relation data...') unit = self.rabbitmq_sentry @@ -497,7 +544,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): msg = u.relation_error('amqp cinder', ret) amulet.raise_status(amulet.FAIL, msg=msg) - def test_209_cinder_rabbitmq_amqp_relation(self): + def HOLDtest_209_cinder_rabbitmq_amqp_relation(self): """Verify the cinder:rabbitmq-server amqp relation data""" u.log.debug('Checking cinder:rmq amqp relation data...') unit = self.cinder_sentry @@ -512,7 +559,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): msg = u.relation_error('cinder amqp', ret) amulet.raise_status(amulet.FAIL, msg=msg) - def test_300_cinder_config(self): + def HOLDtest_300_cinder_config(self): """Verify the data in the cinder.conf file.""" u.log.debug('Checking cinder config file data...') unit = self.cinder_sentry @@ -605,7 +652,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): message = "cinder config error: {}".format(ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_301_cinder_logging_config(self): + def HOLDtest_301_cinder_logging_config(self): """Verify the data in the cinder logging conf file.""" u.log.debug('Checking cinder logging config file data...') unit = self.cinder_sentry @@ -632,7 +679,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): message = "cinder logging config error: {}".format(ret) amulet.raise_status(amulet.FAIL, msg=message) - def test_303_cinder_rootwrap_config(self): + def HOLDtest_303_cinder_rootwrap_config(self): """Inspect select config pairs in rootwrap.conf.""" u.log.debug('Checking cinder rootwrap config file data...') unit = self.cinder_sentry @@ -656,14 +703,14 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): u.log.debug('Cinder api check (volumes.list): {}'.format(check)) assert(check == []) - def test_401_create_delete_volume(self): + def HOLDtest_401_create_delete_volume(self): """Create a cinder volume and delete it.""" u.log.debug('Creating, checking and deleting cinder volume...') vol_new = u.create_cinder_volume(self.cinder) vol_id = vol_new.id u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume") - def test_402_create_delete_volume_from_image(self): + def HOLDtest_402_create_delete_volume_from_image(self): """Create a cinder volume from a glance image, and delete it.""" u.log.debug('Creating, checking and deleting cinder volume' 'from glance image...') @@ -676,7 +723,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): u.delete_resource(self.glance.images, img_id, msg="glance image") u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume") - def test_403_volume_snap_clone_extend_inspect(self): + def HOLDtest_403_volume_snap_clone_extend_inspect(self): """Create a cinder volume, clone it, extend its size, create a snapshot of the volume, create a volume from a snapshot, check status of each, inspect underlying lvm, then delete the resources.""" @@ -725,7 +772,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): u.log.debug('Deleting volume {}...'.format(vol.id)) u.delete_resource(self.cinder.volumes, vol.id, msg="cinder volume") - def test_900_restart_on_config_change(self): + def HOLDtest_900_restart_on_config_change(self): """Verify that the specified services are restarted when the config is changed.""" @@ -769,7 +816,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment): self.d.configure(juju_service, set_default) - def test_910_pause_and_resume(self): + def HOLDtest_910_pause_and_resume(self): """The services can be paused and resumed. """ u.log.debug('Checking pause and resume actions...') unit = self.d.sentry['cinder'][0] diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 5c041d2c..aacab1f5 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -303,20 +303,27 @@ class OpenStackAmuletDeployment(AmuletDeployment): test scenario, based on OpenStack release and whether ceph radosgw is flagged as present or not.""" - if self._get_openstack_release() >= self.trusty_kilo: - # Kilo or later - pools = [ - 'rbd', - 'cinder', - 'glance' - ] - else: + if self._get_openstack_release() <= self.trusty_juno: # Juno or earlier pools = [ 'data', 'metadata', 'rbd', - 'cinder', + 'cinder-ceph', + 'glance' + ] + elif (self.trust_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata + pools = [ + 'rbd', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', 'glance' ] diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index c8edbf65..4fda829f 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -23,6 +23,7 @@ import urllib import urlparse import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client from keystoneclient.v2_0 import client as keystone_client @@ -351,12 +352,15 @@ class OpenStackAmuletUtils(AmuletUtils): self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) def authenticate_cinder_admin(self, keystone_sentry, username, - password, tenant): + password, tenant, api_version=2): """Authenticates admin user with cinder.""" # NOTE(beisner): cinder python client doesn't accept tokens. keystone_ip = keystone_sentry.info['public-address'] ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) - return cinder_client.Client(username, password, tenant, ept) + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](username, password, tenant, ept) def authenticate_keystone(self, keystone_ip, username, password, api_version=False, admin_port=False, @@ -617,7 +621,7 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair - except: + except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py index 12f37b28..70085a40 100644 --- a/tests/charmhelpers/core/hookenv.py +++ b/tests/charmhelpers/core/hookenv.py @@ -218,6 +218,8 @@ def principal_unit(): for rid in relation_ids(reltype): for unit in related_units(rid): md = _metadata_unit(unit) + if not md: + continue subordinate = md.pop('subordinate', None) if not subordinate: return unit @@ -511,7 +513,10 @@ def _metadata_unit(unit): """ basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) - with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: return yaml.safe_load(md) @@ -639,18 +644,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -667,6 +685,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -1077,6 +1106,35 @@ def network_get_primary_address(binding): return subprocess.check_output(cmd).decode('UTF-8').strip() +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if run on Juju < 2.1 + """ + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + # Early versions of Juju 2.0.x required the --primary-address argument. + # We catch that condition here and raise NotImplementedError since + # the requested semantics are not available - the caller can then + # use the network_get_primary_address() method instead. + if '--primary-address is currently required' in e.output.decode('UTF-8'): + raise NotImplementedError + raise + return yaml.safe_load(response) + + def add_metric(*args, **kwargs): """Add metric values. Values may be expressed with keyword arguments. For metric names containing dashes, these may be expressed as one or more diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py index 5656e2f5..5cc5c86b 100644 --- a/tests/charmhelpers/core/host.py +++ b/tests/charmhelpers/core/host.py @@ -34,7 +34,7 @@ import six from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log, DEBUG +from .hookenv import log, DEBUG, local_unit from .fstab import Fstab from charmhelpers.osplatform import get_platform @@ -441,6 +441,49 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + return (unit_number % modulo) * wait diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py index 54ec969f..7af875c2 100644 --- a/tests/charmhelpers/core/unitdata.py +++ b/tests/charmhelpers/core/unitdata.py @@ -358,7 +358,7 @@ class Storage(object): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/tests/gate-basic-artful-pike b/tests/gate-basic-artful-pike index 9faf1147..18b30dbd 100644 --- a/tests/gate-basic-artful-pike +++ b/tests/gate-basic-artful-pike @@ -21,3 +21,6 @@ from basic_deployment import CinderBasicDeployment if __name__ == '__main__': deployment = CinderBasicDeployment(series='artful') deployment.run_tests() + +# NOTE(beisner): Artful target disabled, pending bug: +# https://bugs.launchpad.net/charm-percona-cluster/+bug/1728132 diff --git a/tests/gate-basic-xenial-pike b/tests/gate-basic-xenial-pike old mode 100644 new mode 100755 diff --git a/tox.ini b/tox.ini index 7c2936e3..6d44f4b9 100644 --- a/tox.ini +++ b/tox.ini @@ -60,7 +60,7 @@ basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = - bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy + bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy [testenv:func27-dfs] # Charm Functional Test