diff --git a/Makefile b/Makefile index 097bd93..5c2ee29 100644 --- a/Makefile +++ b/Makefile @@ -20,8 +20,8 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py -# @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml +# @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml publish: lint @bzr push lp:charms/ceilometer-agent diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml index 987d84c..48b12f6 100644 --- a/charm-helpers-tests.yaml +++ b/charm-helpers-tests.yaml @@ -1,5 +1,4 @@ -#branch: lp:charm-helpers -branch: lp:~1chb1n/charm-helpers/amulet-ceph-cinder-updates/ +branch: lp:charm-helpers destination: tests/charmhelpers include: - contrib.amulet diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 9333efc..aa0b515 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -44,6 +44,7 @@ from charmhelpers.core.hookenv import ( ERROR, WARNING, unit_get, + is_leader as juju_is_leader ) from charmhelpers.core.decorators import ( retry_on_exception, @@ -52,6 +53,8 @@ from charmhelpers.core.strutils import ( bool_from_string, ) +DC_RESOURCE_NAME = 'DC' + class HAIncompleteConfig(Exception): pass @@ -61,17 +64,30 @@ class CRMResourceNotFound(Exception): pass +class CRMDCNotFound(Exception): + pass + + def is_elected_leader(resource): """ Returns True if the charm executing this is the elected cluster leader. It relies on two mechanisms to determine leadership: - 1. If the charm is part of a corosync cluster, call corosync to + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to determine leadership. - 2. If the charm is not part of a corosync cluster, the leader is + 3. If the charm is not part of a corosync cluster, the leader is determined as being "the alive unit with the lowest unit numer". In other words, the oldest surviving unit. """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + if is_clustered(): if not is_crm_leader(resource): log('Deferring action to CRM leader.', level=INFO) @@ -95,7 +111,33 @@ def is_clustered(): return False -@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) +def is_crm_dc(): + """ + Determine leadership by querying the pacemaker Designated Controller + """ + cmd = ['crm', 'status'] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + + current_dc = '' + for line in status.split('\n'): + if line.startswith('Current DC'): + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + current_dc = line.split(':')[1].split()[0] + if current_dc == get_unit_hostname(): + return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + + return False + + +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) def is_crm_leader(resource, retry=False): """ Returns True if the charm calling this is the elected corosync leader, @@ -104,6 +146,8 @@ def is_crm_leader(resource, retry=False): We allow this operation to be retried to avoid the possibility of getting a false negative. See LP #1396246 for more info. """ + if resource == DC_RESOURCE_NAME: + return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 461a702..c664c9d 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -110,7 +110,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo) = range(10) + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty) = range(12) releases = { ('precise', None): self.precise_essex, @@ -121,8 +122,10 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo} + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -138,6 +141,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 9c3d918..576bf0b 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -16,15 +16,15 @@ import logging import os +import six import time import urllib import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client -import six - from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -37,7 +37,7 @@ class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms. + that is specifically for use by OpenStack charm tests. """ def __init__(self, log_level=ERROR): @@ -51,6 +51,8 @@ class OpenStackAmuletUtils(AmuletUtils): Validate actual endpoint data vs expected endpoint data. The ports are used to find the matching endpoint. """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) @@ -77,6 +79,7 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual service catalog endpoints vs a list of expected service catalog endpoints. """ + self.log.debug('Validating service catalog endpoint data...') self.log.debug('actual: {}'.format(repr(actual))) for k, v in six.iteritems(expected): if k in actual: @@ -93,6 +96,7 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual tenant data vs list of expected tenant data. """ + self.log.debug('Validating tenant data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,6 +118,7 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual role data vs a list of expected role data. """ + self.log.debug('Validating role data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -134,6 +139,7 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual user data vs a list of expected user data. """ + self.log.debug('Validating user data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -155,17 +161,20 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual flavors vs a list of expected flavors. """ + self.log.debug('Validating flavor data...') self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') unit = keystone_sentry service_ip = unit.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -175,6 +184,7 @@ class OpenStackAmuletUtils(AmuletUtils): def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return keystone_client.Client(username=user, password=password, @@ -182,12 +192,21 @@ class OpenStackAmuletUtils(AmuletUtils): def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', endpoint_type='adminURL') return glance_client.Client(ep, token=keystone.auth_token) + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return nova_client.Client(username=user, api_key=password, @@ -195,6 +214,7 @@ class OpenStackAmuletUtils(AmuletUtils): def create_cirros_image(self, glance, image_name): """Download the latest cirros image and upload it to glance.""" + self.log.debug('Creating glance image ({})...'.format(image_name)) http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -235,6 +255,11 @@ class OpenStackAmuletUtils(AmuletUtils): def delete_image(self, glance, image): """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) num_before = len(list(glance.images.list())) glance.images.delete(image) @@ -254,6 +279,8 @@ class OpenStackAmuletUtils(AmuletUtils): def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) image = nova.images.find(name=image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, @@ -276,6 +303,11 @@ class OpenStackAmuletUtils(AmuletUtils): def delete_instance(self, nova, instance): """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) num_before = len(list(nova.servers.list())) nova.servers.delete(instance) @@ -292,3 +324,90 @@ class OpenStackAmuletUtils(AmuletUtils): return False return True + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 400eaf8..ab40006 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -240,7 +240,7 @@ class SharedDBContext(OSContextGenerator): if self.relation_prefix: password_setting = self.relation_prefix + '_password' - for rid in relation_ids('shared-db'): + for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) host = rdata.get('db_host') diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index 29bbddc..3dca6dc 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -17,6 +17,7 @@ from charmhelpers.core.hookenv import ( config, unit_get, + service_name, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -26,8 +27,6 @@ from charmhelpers.contrib.network.ip import ( ) from charmhelpers.contrib.hahelpers.cluster import is_clustered -from functools import partial - PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' @@ -35,15 +34,18 @@ ADMIN = 'admin' ADDRESS_MAP = { PUBLIC: { 'config': 'os-public-network', - 'fallback': 'public-address' + 'fallback': 'public-address', + 'override': 'os-public-hostname', }, INTERNAL: { 'config': 'os-internal-network', - 'fallback': 'private-address' + 'fallback': 'private-address', + 'override': 'os-internal-hostname', }, ADMIN: { 'config': 'os-admin-network', - 'fallback': 'private-address' + 'fallback': 'private-address', + 'override': 'os-admin-hostname', } } @@ -57,15 +59,50 @@ def canonical_url(configs, endpoint_type=PUBLIC): :param endpoint_type: str endpoint type to resolve. :param returns: str base URL for services on the current service unit. """ - scheme = 'http' - if 'https' in configs.complete_contexts(): - scheme = 'https' + scheme = _get_scheme(configs) + address = resolve_address(endpoint_type) if is_ipv6(address): address = "[{}]".format(address) + return '%s://%s' % (scheme, address) +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + def resolve_address(endpoint_type=PUBLIC): """Return unit address depending on net config. @@ -77,7 +114,10 @@ def resolve_address(endpoint_type=PUBLIC): :param endpoint_type: Network endpoing type """ - resolved_address = None + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address + vips = config('vip') if vips: vips = vips.split() @@ -109,38 +149,3 @@ def resolve_address(endpoint_type=PUBLIC): "clustered=%s)" % (net_type, clustered)) return resolved_address - - -def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC, - override=None): - """Returns the correct endpoint URL to advertise to Keystone. - - This method provides the correct endpoint URL which should be advertised to - the keystone charm for endpoint creation. This method allows for the url to - be overridden to force a keystone endpoint to have specific URL for any of - the defined scopes (admin, internal, public). - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :param url_template: str format string for creating the url template. Only - two values will be passed - the scheme+hostname - returned by the canonical_url and the port. - :param endpoint_type: str endpoint type to resolve. - :param override: str the name of the config option which overrides the - endpoint URL defined by the charm itself. None will - disable any overrides (default). - """ - if override: - # Return any user-defined overrides for the keystone endpoint URL. - user_value = config(override) - if user_value: - return user_value.strip() - - return url_template % (canonical_url(configs, endpoint_type), port) - - -public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC) - -internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL) - -admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN) diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 02c92e9..f7b7235 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -172,14 +172,16 @@ def neutron_plugins(): 'services': ['calico-felix', 'bird', 'neutron-dhcp-agent', - 'nova-api-metadata'], + 'nova-api-metadata', + 'etcd'], 'packages': [[headers_package()] + determine_dkms_package(), ['calico-compute', 'bird', 'neutron-dhcp-agent', - 'nova-api-metadata']], - 'server_packages': ['neutron-server', 'calico-control'], - 'server_services': ['neutron-server'] + 'nova-api-metadata', + 'etcd']], + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], + 'server_services': ['neutron-server', 'etcd'] }, 'vsp': { 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', @@ -256,11 +258,14 @@ def network_manager(): def parse_mappings(mappings): parsed = {} if mappings: - mappings = mappings.split(' ') + mappings = mappings.split() for m in mappings: p = m.partition(':') - if p[1] == ':': - parsed[p[0].strip()] = p[2].strip() + key = p[0].strip() + if p[1]: + parsed[key] = p[2].strip() + else: + parsed[key] = '' return parsed @@ -283,13 +288,13 @@ def parse_data_port_mappings(mappings, default_bridge='br-data'): Returns dict of the form {bridge:port}. """ _mappings = parse_mappings(mappings) - if not _mappings: + if not _mappings or list(_mappings.values()) == ['']: if not mappings: return {} # For backwards-compatibility we need to support port-only provided in # config. - _mappings = {default_bridge: mappings.split(' ')[0]} + _mappings = {default_bridge: mappings.split()[0]} bridges = _mappings.keys() ports = _mappings.values() @@ -309,6 +314,8 @@ def parse_vlan_range_mappings(mappings): Mappings must be a space-delimited list of provider:start:end mappings. + The start:end range is optional and may be omitted. + Returns dict of the form {provider: (start, end)}. """ _mappings = parse_mappings(mappings) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index f90a028..28532c9 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -53,9 +53,13 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr ) +from charmhelpers.contrib.python.packages import ( + pip_create_virtualenv, + pip_install, +) + from charmhelpers.core.host import lsb_release, mounts, umount from charmhelpers.fetch import apt_install, apt_cache, install_remote -from charmhelpers.contrib.python.packages import pip_install from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -75,6 +79,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) @@ -87,6 +92,7 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2014.1', 'icehouse'), ('2014.2', 'juno'), ('2015.1', 'kilo'), + ('2015.2', 'liberty'), ]) # The ugly duckling @@ -109,6 +115,7 @@ SWIFT_CODENAMES = OrderedDict([ ('2.2.0', 'juno'), ('2.2.1', 'kilo'), ('2.2.2', 'kilo'), + ('2.3.0', 'liberty'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -317,6 +324,9 @@ def configure_installation_source(rel): 'kilo': 'trusty-updates/kilo', 'kilo/updates': 'trusty-updates/kilo', 'kilo/proposed': 'trusty-proposed/kilo', + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', } try: @@ -497,7 +507,17 @@ def git_install_requested(): requirements_dir = None -def git_clone_and_install(projects_yaml, core_project): +def _git_yaml_load(projects_yaml): + """ + Load the specified yaml into a dictionary. + """ + if not projects_yaml: + return None + + return yaml.load(projects_yaml) + + +def git_clone_and_install(projects_yaml, core_project, depth=1): """ Clone/install all specified OpenStack repositories. @@ -510,23 +530,22 @@ def git_clone_and_install(projects_yaml, core_project): repository: 'git://git.openstack.org/openstack/requirements.git', branch: 'stable/icehouse'} directory: /mnt/openstack-git - http_proxy: http://squid.internal:3128 - https_proxy: https://squid.internal:3128 + http_proxy: squid-proxy-url + https_proxy: squid-proxy-url The directory, http_proxy, and https_proxy keys are optional. """ global requirements_dir parent_dir = '/mnt/openstack-git' + http_proxy = None - if not projects_yaml: - return - - projects = yaml.load(projects_yaml) + projects = _git_yaml_load(projects_yaml) _git_validate_projects_yaml(projects, core_project) old_environ = dict(os.environ) if 'http_proxy' in projects.keys(): + http_proxy = projects['http_proxy'] os.environ['http_proxy'] = projects['http_proxy'] if 'https_proxy' in projects.keys(): os.environ['https_proxy'] = projects['https_proxy'] @@ -534,15 +553,24 @@ def git_clone_and_install(projects_yaml, core_project): if 'directory' in projects.keys(): parent_dir = projects['directory'] + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) + + # Upgrade setuptools from default virtualenv version. The default version + # in trusty breaks update.py in global requirements master branch. + pip_install('setuptools', upgrade=True, proxy=http_proxy, + venv=os.path.join(parent_dir, 'venv')) + for p in projects['repositories']: repo = p['repository'] branch = p['branch'] if p['name'] == 'requirements': - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, update_requirements=False) requirements_dir = repo_dir else: - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, update_requirements=True) os.environ = old_environ @@ -574,7 +602,8 @@ def _git_ensure_key_exists(key, keys): error_out('openstack-origin-git key \'{}\' is missing'.format(key)) -def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, + update_requirements): """ Clone and install a single git repository. """ @@ -587,23 +616,29 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements) if not os.path.exists(dest_dir): juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, + depth=depth) else: repo_dir = dest_dir + venv = os.path.join(parent_dir, 'venv') + if update_requirements: if not requirements_dir: error_out('requirements repo must be cloned before ' 'updating from global requirements.') - _git_update_requirements(repo_dir, requirements_dir) + _git_update_requirements(venv, repo_dir, requirements_dir) juju_log('Installing git repo from dir: {}'.format(repo_dir)) - pip_install(repo_dir) + if http_proxy: + pip_install(repo_dir, proxy=http_proxy, venv=venv) + else: + pip_install(repo_dir, venv=venv) return repo_dir -def _git_update_requirements(package_dir, reqs_dir): +def _git_update_requirements(venv, package_dir, reqs_dir): """ Update from global requirements. @@ -612,25 +647,38 @@ def _git_update_requirements(package_dir, reqs_dir): """ orig_dir = os.getcwd() os.chdir(reqs_dir) - cmd = ['python', 'update.py', package_dir] + python = os.path.join(venv, 'bin/python') + cmd = [python, 'update.py', package_dir] try: subprocess.check_call(cmd) except subprocess.CalledProcessError: package = os.path.basename(package_dir) - error_out("Error updating {} from global-requirements.txt".format(package)) + error_out("Error updating {} from " + "global-requirements.txt".format(package)) os.chdir(orig_dir) +def git_pip_venv_dir(projects_yaml): + """ + Return the pip virtualenv path. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + return os.path.join(parent_dir, 'venv') + + def git_src_dir(projects_yaml, project): """ Return the directory where the specified project's source is located. """ parent_dir = '/mnt/openstack-git' - if not projects_yaml: - return - - projects = yaml.load(projects_yaml) + projects = _git_yaml_load(projects_yaml) if 'directory' in projects.keys(): parent_dir = projects['directory'] @@ -640,3 +688,15 @@ def git_src_dir(projects_yaml, project): return os.path.join(parent_dir, os.path.basename(p['repository'])) return None + + +def git_yaml_value(projects_yaml, key): + """ + Return the value in projects_yaml for the specified key. + """ + projects = _git_yaml_load(projects_yaml) + + if key in projects.keys(): + return projects[key] + + return None diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py index 8659516..10b32e3 100644 --- a/hooks/charmhelpers/contrib/python/packages.py +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -17,8 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import os +import subprocess + from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import log +from charmhelpers.core.hookenv import charm_dir, log try: from pip import main as pip_execute @@ -33,6 +36,8 @@ __author__ = "Jorge Niedbalski " def parse_options(given, available): """Given a set of options, check if available""" for key, value in sorted(given.items()): + if not value: + continue if key in available: yield "--{0}={1}".format(key, value) @@ -51,11 +56,15 @@ def pip_install_requirements(requirements, **options): pip_execute(command) -def pip_install(package, fatal=False, upgrade=False, **options): +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): """Install a python package""" - command = ["install"] + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] - available_options = ('proxy', 'src', 'log', "index-url", ) + available_options = ('proxy', 'src', 'log', 'index-url', ) for option in parse_options(options, available_options): command.append(option) @@ -69,7 +78,10 @@ def pip_install(package, fatal=False, upgrade=False, **options): log("Installing {} package with options: {}".format(package, command)) - pip_execute(command) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) def pip_uninstall(package, **options): @@ -94,3 +106,16 @@ def pip_list(): """Returns the list of current python installed packages """ return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + apt_install('python-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 86f805f..0add16d 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -21,12 +21,16 @@ # Charm Helpers Developers from __future__ import print_function +from distutils.version import LooseVersion +from functools import wraps +import glob import os import json import yaml import subprocess import sys import errno +import tempfile from subprocess import CalledProcessError import six @@ -58,15 +62,17 @@ def cached(func): will cache the result of unit_get + 'test' for future calls. """ + @wraps(func) def wrapper(*args, **kwargs): global cache key = str((func, args, kwargs)) try: return cache[key] except KeyError: - res = func(*args, **kwargs) - cache[key] = res - return res + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res return wrapper @@ -178,7 +184,7 @@ def local_unit(): def remote_unit(): """The remote unit for the current relation hook""" - return os.environ['JUJU_REMOTE_UNIT'] + return os.environ.get('JUJU_REMOTE_UNIT', None) def service_name(): @@ -238,23 +244,7 @@ class Config(dict): self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() - - def __getitem__(self, key): - """For regular dict lookups, check the current juju config first, - then the previous (saved) copy. This ensures that user-saved values - will be returned by a dict lookup. - - """ - try: - return dict.__getitem__(self, key) - except KeyError: - return (self._prev_dict or {})[key] - - def keys(self): - prev_keys = [] - if self._prev_dict is not None: - prev_keys = self._prev_dict.keys() - return list(set(prev_keys + list(dict.keys(self)))) + atexit(self._implicit_save) def load_previous(self, path=None): """Load previous copy of config from disk. @@ -273,6 +263,9 @@ class Config(dict): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) + for k, v in self._prev_dict.items(): + if k not in self: + self[k] = v def changed(self, key): """Return True if the current value for this key is different from @@ -304,13 +297,13 @@ class Config(dict): instance. """ - if self._prev_dict: - for k, v in six.iteritems(self._prev_dict): - if k not in self: - self[k] = v with open(self.path, 'w') as f: json.dump(self, f) + def _implicit_save(self): + if self.implicit_save: + self.save() + @cached def config(scope=None): @@ -353,18 +346,49 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (list(relation_settings.items()) + list(kwargs.items())): - if v is None: - relation_cmd_line.append('{}='.format(k)) - else: - relation_cmd_line.append('{}={}'.format(k, v)) - subprocess.check_call(relation_cmd_line) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit()) +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + @cached def relation_ids(reltype=None): """A list of relation_ids""" @@ -509,6 +533,11 @@ def unit_get(attribute): return None +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + def unit_private_ip(): """Get this unit's private IP address""" return unit_get('private-address') @@ -541,10 +570,14 @@ class Hooks(object): hooks.execute(sys.argv) """ - def __init__(self, config_save=True): + def __init__(self, config_save=None): super(Hooks, self).__init__() self._hooks = {} - self._config_save = config_save + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save def register(self, name, function): """Register a hook""" @@ -552,13 +585,16 @@ class Hooks(object): def execute(self, args): """Execute a registered hook based on args[0]""" + _run_atstart() hook_name = os.path.basename(args[0]) if hook_name in self._hooks: - self._hooks[hook_name]() - if self._config_save: - cfg = config() - if cfg.implicit_save: - cfg.save() + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() else: raise UnregisteredHookError(hook_name) @@ -605,3 +641,160 @@ def action_fail(message): The results set by action_set are preserved.""" subprocess.check_call(['action-fail', message]) + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state + + If the status-set command is not found then assume this is juju < 1.23 and + return 'unknown' + """ + cmd = ['status-get'] + try: + raw_status = subprocess.check_output(cmd, universal_newlines=True) + status = raw_status.rstrip() + return status + except OSError as e: + if e.errno == errno.ENOENT: + return 'unknown' + else: + raise + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 830822a..901a4cf 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -24,6 +24,7 @@ import os import re import pwd +import glob import grp import random import string @@ -90,7 +91,7 @@ def service_available(service_name): ['service', service_name, 'status'], stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: - return 'unrecognized service' not in e.output + return b'unrecognized service' not in e.output else: return True @@ -269,6 +270,21 @@ def file_hash(path, hash_type='md5'): return None +def path_hash(path): + """ + Generate a hash checksum of all files matching 'path'. Standard wildcards + like '*' and '?' are supported, see documentation for the 'glob' module for + more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. @@ -296,23 +312,25 @@ def restart_on_change(restart_map, stopstart=False): @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] }) - def ceph_client_changed(): + def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. """ def wrap(f): def wrapped_f(*args, **kwargs): - checksums = {} - for path in restart_map: - checksums[path] = file_hash(path) + checksums = {path: path_hash(path) for path in restart_map} f(*args, **kwargs) restarts = [] for path in restart_map: - if checksums[path] != file_hash(path): + if path_hash(path) != checksums[path]: restarts += restart_map[path] services_list = list(OrderedDict.fromkeys(restarts)) if not stopstart: diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py index c5534e4..a42660c 100644 --- a/hooks/charmhelpers/core/services/base.py +++ b/hooks/charmhelpers/core/services/base.py @@ -15,9 +15,9 @@ # along with charm-helpers. If not, see . import os -import re import json -from collections import Iterable +from inspect import getargspec +from collections import Iterable, OrderedDict from charmhelpers.core import host from charmhelpers.core import hookenv @@ -119,7 +119,7 @@ class ServiceManager(object): """ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') self._ready = None - self.services = {} + self.services = OrderedDict() for service in services or []: service_name = service['service'] self.services[service_name] = service @@ -128,15 +128,18 @@ class ServiceManager(object): """ Handle the current hook by doing The Right Thing with the registered services. """ - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.provide_data() - self.reconfigure_services() - cfg = hookenv.config() - if cfg.implicit_save: - cfg.save() + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() def provide_data(self): """ @@ -145,15 +148,36 @@ class ServiceManager(object): A provider must have a `name` attribute, which indicates which relation to set data on, and a `provide_data()` method, which returns a dict of data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. """ - hook_name = hookenv.hook_name() - for service in self.services.values(): + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) for provider in service.get('provided_data', []): - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): - data = provider.provide_data() - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data - if _ready: - hookenv.relation_set(None, data) + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) def reconfigure_services(self, *service_names): """ diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 792e629..9a1a251 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -158,7 +158,7 @@ def filter_installed_packages(packages): def apt_cache(in_memory=True): """Build and return an apt cache""" - import apt_pkg + from apt import apt_pkg apt_pkg.init() if in_memory: apt_pkg.config.set("Dir::Cache::pkgcache", "") diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py index 93aae87..ddc25b7 100644 --- a/hooks/charmhelpers/fetch/giturl.py +++ b/hooks/charmhelpers/fetch/giturl.py @@ -45,14 +45,16 @@ class GitUrlFetchHandler(BaseFetchHandler): else: return True - def clone(self, source, dest, branch): + def clone(self, source, dest, branch, depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - repo = Repo.clone_from(source, dest) - repo.git.checkout(branch) + if depth: + Repo.clone_from(source, dest, branch=branch, depth=depth) + else: + Repo.clone_from(source, dest, branch=branch) - def install(self, source, branch="master", dest=None): + def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -63,7 +65,7 @@ class GitUrlFetchHandler(BaseFetchHandler): if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: - self.clone(source, dest_dir, branch) + self.clone(source, dest_dir, branch, depth) except GitCommandError as e: raise UnhandledSource(e.message) except OSError as e: