From b9be6d3fb6a6a111a9d5bd693c036b302887e7d5 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 2 Jul 2014 15:20:13 +0000 Subject: [PATCH 1/3] Move charm-helpers.yaml to charm-helpers-hooks.yaml and add charm-helpers-tests.yaml. --- Makefile | 3 ++- charm-helpers.yaml => charm-helpers-hooks.yaml | 0 charm-helpers-tests.yaml | 5 +++++ 3 files changed, 7 insertions(+), 1 deletion(-) rename charm-helpers.yaml => charm-helpers-hooks.yaml (100%) create mode 100644 charm-helpers-tests.yaml diff --git a/Makefile b/Makefile index 540fd86..86a48df 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,8 @@ test: @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests sync: - @charm-helper-sync -c charm-helpers.yaml + @charm-helper-sync -c charm-helpers-hooks.yaml + @charm-helper-sync -c charm-helpers-tests.yaml publish: lint test bzr push lp:charms/swift-proxy diff --git a/charm-helpers.yaml b/charm-helpers-hooks.yaml similarity index 100% rename from charm-helpers.yaml rename to charm-helpers-hooks.yaml diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml new file mode 100644 index 0000000..48b12f6 --- /dev/null +++ b/charm-helpers-tests.yaml @@ -0,0 +1,5 @@ +branch: lp:charm-helpers +destination: tests/charmhelpers +include: + - contrib.amulet + - contrib.openstack.amulet From 04dfb3895660414c03d87908ac31d89cda9dc5a5 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 10 Jul 2014 20:50:01 +0000 Subject: [PATCH 2/3] Sync with charm-helpers --- .../charmhelpers/contrib/hahelpers/cluster.py | 1 + .../contrib/openstack/amulet/__init__.py | 0 .../contrib/openstack/amulet/deployment.py | 57 ++++ .../contrib/openstack/amulet/utils.py | 253 ++++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 58 +++- .../charmhelpers/contrib/openstack/neutron.py | 14 + .../contrib/openstack/templating.py | 37 ++- hooks/charmhelpers/contrib/openstack/utils.py | 7 +- .../contrib/storage/linux/ceph.py | 2 +- .../contrib/storage/linux/utils.py | 1 + hooks/charmhelpers/core/fstab.py | 116 ++++++++ hooks/charmhelpers/core/hookenv.py | 9 +- hooks/charmhelpers/core/host.py | 40 ++- hooks/charmhelpers/fetch/__init__.py | 40 +-- hooks/charmhelpers/fetch/bzrurl.py | 3 +- tests/charmhelpers/__init__.py | 0 tests/charmhelpers/contrib/__init__.py | 0 tests/charmhelpers/contrib/amulet/__init__.py | 0 .../charmhelpers/contrib/amulet/deployment.py | 63 +++++ tests/charmhelpers/contrib/amulet/utils.py | 157 +++++++++++ .../contrib/openstack/__init__.py | 0 .../contrib/openstack/amulet/__init__.py | 0 .../contrib/openstack/amulet/deployment.py | 57 ++++ .../contrib/openstack/amulet/utils.py | 253 ++++++++++++++++++ 24 files changed, 1100 insertions(+), 68 deletions(-) create mode 100644 hooks/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 hooks/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 hooks/charmhelpers/contrib/openstack/amulet/utils.py create mode 100644 hooks/charmhelpers/core/fstab.py create mode 100644 tests/charmhelpers/__init__.py create mode 100644 tests/charmhelpers/contrib/__init__.py create mode 100644 tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 tests/charmhelpers/contrib/amulet/utils.py create mode 100644 tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/utils.py diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index bf832f7..4e1a473 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -170,6 +170,7 @@ def canonical_url(configs, vip_setting='vip'): :configs : OSTemplateRenderer: A config tempating object to inspect for a complete https context. + :vip_setting: str: Setting in charm config that specifies VIP address. ''' diff --git a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 0000000..de2a6bf --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,57 @@ +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms.""" + + def __init__(self, series, openstack=None, source=None): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin.""" + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + name = 0 + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + + if self.openstack: + for svc in services: + charm_name = self._get_charm_name(svc[name]) + if charm_name not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc[name], config) + + if self.source: + for svc in services: + charm_name = self._get_charm_name(svc[name]) + if charm_name in use_source: + config = {'source': self.source} + self.d.configure(svc[name], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Return an integer representing the enum value of the openstack + release.""" + self.precise_essex, self.precise_folsom, self.precise_grizzly, \ + self.precise_havana, self.precise_icehouse, \ + self.trusty_icehouse = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 0000000..806bd21 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,253 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms.""" + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint.""" + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if admin_port in ep.adminurl and internal_port in ep.internalurl \ + and public_port in ep.publicurl: + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints.""" + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in expected.iteritems(): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate a list of actual tenant data vs list of expected tenant + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate a list of actual role data vs a list of expected role + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate a list of actual user data vs a list of expected user + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate a list of actual flavors vs a list of expected flavors.""" + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + + if not os.path.exists(cirros_img): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, cirros_img) + f.close() + + with open(cirros_img) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 474d51e..eff2bd3 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -243,23 +243,31 @@ class IdentityServiceContext(OSContextGenerator): class AMQPContext(OSContextGenerator): - interfaces = ['amqp'] - def __init__(self, ssl_dir=None): + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] def __call__(self): log('Generating template context for amqp') conf = config() + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + if self.relation_prefix: + user_setting = self.relation_prefix + '-rabbit-user' + vhost_setting = self.relation_prefix + '-rabbit-vhost' + try: - username = conf['rabbit-user'] - vhost = conf['rabbit-vhost'] + username = conf[user_setting] + vhost = conf[vhost_setting] except KeyError as e: log('Could not generate shared_db context. ' 'Missing required charm config options: %s.' % e) raise OSContextError ctxt = {} - for rid in relation_ids('amqp'): + for rid in relation_ids(self.rel_name): ha_vip_only = False for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): @@ -418,12 +426,13 @@ class ApacheSSLContext(OSContextGenerator): """ Generates a context for an apache vhost configuration that configures HTTPS reverse proxying for one or many endpoints. Generated context - looks something like: - { - 'namespace': 'cinder', - 'private_address': 'iscsi.mycinderhost.com', - 'endpoints': [(8776, 8766), (8777, 8767)] - } + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } The endpoints list consists of a tuples mapping external ports to internal ports. @@ -541,6 +550,26 @@ class NeutronContext(OSContextGenerator): return nvp_ctxt + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_ctxt = { + 'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': config( + 'n1kv_restrict_policy_profiles'), + } + + return n1kv_ctxt + def neutron_ctxt(self): if https(): proto = 'https' @@ -572,6 +601,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.ovs_ctxt()) elif self.plugin in ['nvp', 'nsx']: ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -611,7 +642,7 @@ class SubordinateConfigContext(OSContextGenerator): The subordinate interface allows subordinates to export their configuration requirements to the principle for multiple config files and multiple serivces. Ie, a subordinate that has interfaces - to both glance and nova may export to following yaml blob as json: + to both glance and nova may export to following yaml blob as json:: glance: /etc/glance/glance-api.conf: @@ -630,7 +661,8 @@ class SubordinateConfigContext(OSContextGenerator): It is then up to the principle charms to subscribe this context to the service+config file it is interestd in. Configuration data will - be available in the template context, in glance's case, as: + be available in the template context, in glance's case, as:: + ctxt = { ... other context ... 'subordinate_config': { diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index ba97622..84d97bc 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -128,6 +128,20 @@ def neutron_plugins(): 'server_packages': ['neutron-server', 'neutron-plugin-vmware'], 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 4595778..f544271 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -30,17 +30,17 @@ def get_loader(templates_dir, os_release): loading dir. A charm may also ship a templates dir with this module - and it will be appended to the bottom of the search list, eg: - hooks/charmhelpers/contrib/openstack/templates. + and it will be appended to the bottom of the search list, eg:: - :param templates_dir: str: Base template directory containing release - sub-directories. - :param os_release : str: OpenStack release codename to construct template - loader. + hooks/charmhelpers/contrib/openstack/templates - :returns : jinja2.ChoiceLoader constructed with a list of - jinja2.FilesystemLoaders, ordered in descending - order by OpenStack release. + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) for rel in OPENSTACK_CODENAMES.itervalues()] @@ -111,7 +111,8 @@ class OSConfigRenderer(object): and ease the burden of managing config templates across multiple OpenStack releases. - Basic usage: + Basic usage:: + # import some common context generates from charmhelpers from charmhelpers.contrib.openstack import context @@ -131,21 +132,19 @@ class OSConfigRenderer(object): # write out all registered configs configs.write_all() - Details: + **OpenStack Releases and template loading** - OpenStack Releases and template loading - --------------------------------------- When the object is instantiated, it is associated with a specific OS release. This dictates how the template loader will be constructed. The constructed loader attempts to load the template from several places in the following order: - - from the most recent OS release-specific template dir (if one exists) - - the base templates_dir - - a template directory shipped in the charm with this helper file. + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + For the example above, '/tmp/templates' contains the following structure:: - For the example above, '/tmp/templates' contains the following structure: /tmp/templates/nova.conf /tmp/templates/api-paste.ini /tmp/templates/grizzly/api-paste.ini @@ -169,8 +168,8 @@ class OSConfigRenderer(object): $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows us to ship common templates (haproxy, apache) with the helpers. - Context generators - --------------------------------------- + **Context generators** + Context generators are used to generate template contexts during hook execution. Doing so may require inspecting service relations, charm config, etc. When registered, a config file is associated with a list diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 1a44ab1..127b03f 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -3,7 +3,6 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict -import apt_pkg as apt import subprocess import os import socket @@ -85,6 +84,8 @@ def get_os_codename_install_source(src): '''Derive OpenStack release codename from a given installation source.''' ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] rel = '' + if src is None: + return rel if src in ['distro', 'distro-proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -132,6 +133,7 @@ def get_os_version_codename(codename): def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' + import apt_pkg as apt apt.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -189,7 +191,7 @@ def get_os_version_package(pkg, fatal=True): for version, cname in vers_map.iteritems(): if cname == codename: return version - #e = "Could not determine OpenStack version for package: %s" % pkg + # e = "Could not determine OpenStack version for package: %s" % pkg # error_out(e) @@ -325,6 +327,7 @@ def openstack_upgrade_available(package): """ + import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) available_vers = get_os_version_install_source(src) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1241741..768438a 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -303,7 +303,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, blk_device, fstype, system_services=[]): """ NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. + the same rbd_img otherwise data loss will occur. Ensures given pool and RBD image exists, is mapped to a block device, and the device is formatted and mounted at the given mount_point. diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index b87ef26..8d0f611 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -37,6 +37,7 @@ def zap_disk(block_device): check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + def is_device_mounted(device): '''Given a device path, return True if that device is mounted, and False if it isn't. diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py new file mode 100644 index 0000000..cfaf0a6 --- /dev/null +++ b/hooks/charmhelpers/core/fstab.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import os + + +class Fstab(file): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = d + self.p = p + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + file.__init__(self, self._path, 'r+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + try: + if not line.startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write(str(entry) + '\n') + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = self.readlines() + + found = False + for index, line in enumerate(lines): + if not line.startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines)) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index c2e66f6..c953043 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -25,7 +25,7 @@ cache = {} def cached(func): """Cache return values for multiple executions of func + args - For example: + For example:: @cached def unit_get(attribute): @@ -445,18 +445,19 @@ class UnregisteredHookError(Exception): class Hooks(object): """A convenient handler for hook functions. - Example: + Example:: + hooks = Hooks() # register a hook, taking its name from the function name @hooks.hook() def install(): - ... + pass # your code here # register a hook, providing a custom hook name @hooks.hook("config-changed") def config_changed(): - ... + pass # your code here if __name__ == "__main__": # execute a hook based on the name the program is called by diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 186147f..8b617a4 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -12,11 +12,11 @@ import random import string import subprocess import hashlib -import apt_pkg from collections import OrderedDict from hookenv import log +from fstab import Fstab def service_start(service_name): @@ -35,7 +35,8 @@ def service_restart(service_name): def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if reload fails""" + """Reload a system service, optionally falling back to restart if + reload fails""" service_result = service('reload', service_name) if not service_result and restart_on_failure: service_result = service('restart', service_name) @@ -144,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def mount(device, mountpoint, options=None, persist=False): +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: @@ -155,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False): except subprocess.CalledProcessError, e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_add(device, mountpoint, filesystem, options=options) return True @@ -169,9 +182,9 @@ def umount(mountpoint, persist=False): except subprocess.CalledProcessError, e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False + if persist: - # TODO: update fstab - pass + return fstab_remove(mountpoint) return True @@ -198,13 +211,13 @@ def file_hash(path): def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing - This function is used a decorator, for example + This function is used a decorator, for example:: @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] }) def ceph_client_changed(): - ... + pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the @@ -300,10 +313,13 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): '''Compare supplied revno with the revno of the installed package - 1 => Installed revno is greater than supplied arg - 0 => Installed revno is the same as supplied arg - -1 => Installed revno is less than supplied arg + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + ''' + import apt_pkg if not pkgcache: apt_pkg.init() pkgcache = apt_pkg.Cache() diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index e8e837a..5be512c 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,6 @@ from charmhelpers.core.hookenv import ( config, log, ) -import apt_pkg import os @@ -117,6 +116,7 @@ class BaseFetchHandler(object): def filter_installed_packages(packages): """Returns a list of packages that require installation""" + import apt_pkg apt_pkg.init() # Tell apt to build an in-memory cache to prevent race conditions (if @@ -235,31 +235,39 @@ def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """ - Configure multiple sources from charm configuration + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Example config: - install_sources: + install_sources: | - "ppa:foo" - "http://example.com/repo precise main" - install_keys: + install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ - sources = safe_load(config(sources_var)) - keys = config(keys_var) - if keys is not None: - keys = safe_load(keys) - if isinstance(sources, basestring) and ( - keys is None or isinstance(keys, basestring)): - add_source(sources, keys) + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, basestring): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) else: - if not len(sources) == len(keys): - msg = 'Install sources and keys lists are different lengths' - raise SourceConfigError(msg) - for src_num in range(len(sources)): - add_source(sources[src_num], keys[src_num]) + if isinstance(keys, basestring): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) if update: apt_update(fatal=True) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index db5dd9a..0e580e4 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -39,7 +39,8 @@ class BzrUrlFetchHandler(BaseFetchHandler): def install(self, source): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) try: diff --git a/tests/charmhelpers/__init__.py b/tests/charmhelpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/charmhelpers/contrib/__init__.py b/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/charmhelpers/contrib/amulet/__init__.py b/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 0000000..9e19432 --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,63 @@ +import amulet +import re + + +class AmuletDeployment(object): + """This class provides generic Amulet deployment and test runner + methods.""" + + def __init__(self, series): + """Initialize the deployment environment.""" + self.series = series + self.d = amulet.Deployment(series=self.series) + + def _get_charm_name(self, service_name): + """Gets the charm name from the service name. Unique service names can + be specified with a '-service#' suffix (e.g. mysql-service1).""" + if re.match(r"^.*-service\d{1,3}$", service_name): + charm_name = re.sub('\-service\d{1,3}$', '', service_name) + else: + charm_name = service_name + return charm_name + + def _add_services(self, this_service, other_services): + """Add services to the deployment where this_service is the local charm + that we're focused on testing and other_services are the other + charms that come from the charm store.""" + name, units = range(2) + + charm_name = self._get_charm_name(this_service[name]) + self.d.add(this_service[name], + units=this_service[units]) + + for svc in other_services: + charm_name = self._get_charm_name(svc[name]) + self.d.add(svc[name], + charm='cs:{}/{}'.format(self.series, charm_name), + units=svc[units]) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in relations.iteritems(): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup() + self.d.sentry.wait() + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 0000000..03e4a81 --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,157 @@ +import ConfigParser +import io +import logging +import re +import sys +from time import sleep + + +class AmuletUtils(object): + """This class provides common utility functions that are used by Amulet + tests.""" + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = \ + log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def validate_services(self, commands): + """Verify the specified services are running on the corresponding + service units.""" + for k, v in commands.iteritems(): + for cmd in v: + output, code = k.run(cmd) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + config = ConfigParser.ConfigParser() + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, expected): + """Verify that the specified section of the config file contains + the expected option key:value pairs.""" + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + if config.get(section, k) != expected[k]: + return "section [{}] {}:{} != expected {}:{}".format(section, + k, config.get(section, k), k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluate a variable and returns a + bool.""" + for k, v in expected.iteritems(): + if k in actual: + if isinstance(v, basestring) or \ + isinstance(v, bool) or \ + isinstance(v, (int, long)): + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + self.log.debug('actual: {}'.format(repr(actual))) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string != None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): + """Determine start time of the process based on the last modification + time of the /proc/pid directory. If pgrep_full is True, the process + name is matched against the full command line.""" + if pgrep_full: + cmd = 'pgrep -o -f {}'.format(service) + else: + cmd = 'pgrep -o {}'.format(service) + proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=False, sleep_time=20): + """Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted.""" + sleep(sleep_time) + if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ + self._get_file_mtime(sentry_unit, filename): + return True + else: + return False + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) diff --git a/tests/charmhelpers/contrib/openstack/__init__.py b/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 0000000..de2a6bf --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,57 @@ +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms.""" + + def __init__(self, series, openstack=None, source=None): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin.""" + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + name = 0 + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + + if self.openstack: + for svc in services: + charm_name = self._get_charm_name(svc[name]) + if charm_name not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc[name], config) + + if self.source: + for svc in services: + charm_name = self._get_charm_name(svc[name]) + if charm_name in use_source: + config = {'source': self.source} + self.d.configure(svc[name], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Return an integer representing the enum value of the openstack + release.""" + self.precise_essex, self.precise_folsom, self.precise_grizzly, \ + self.precise_havana, self.precise_icehouse, \ + self.trusty_icehouse = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 0000000..806bd21 --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,253 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms.""" + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint.""" + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if admin_port in ep.adminurl and internal_port in ep.internalurl \ + and public_port in ep.publicurl: + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints.""" + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in expected.iteritems(): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate a list of actual tenant data vs list of expected tenant + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate a list of actual role data vs a list of expected role + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate a list of actual user data vs a list of expected user + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate a list of actual flavors vs a list of expected flavors.""" + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + + if not os.path.exists(cirros_img): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, cirros_img) + f.close() + + with open(cirros_img) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True From 3bf5ea44a9689f348dbe48843ad357e64270d826 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Fri, 11 Jul 2014 16:57:37 +0000 Subject: [PATCH 3/3] Add Amulet basic tests --- Makefile | 13 +- tests/00-setup | 11 + tests/10-basic-precise-essex | 9 + tests/11-basic-precise-folsom | 11 + tests/12-basic-precise-grizzly | 11 + tests/13-basic-precise-havana | 11 + tests/14-basic-precise-icehouse | 11 + tests/15-basic-trusty-icehouse | 9 + tests/README | 52 ++ tests/basic_deployment.py | 827 ++++++++++++++++++++++++++++++++ 10 files changed, 962 insertions(+), 3 deletions(-) create mode 100755 tests/00-setup create mode 100755 tests/10-basic-precise-essex create mode 100755 tests/11-basic-precise-folsom create mode 100755 tests/12-basic-precise-grizzly create mode 100755 tests/13-basic-precise-havana create mode 100755 tests/14-basic-precise-icehouse create mode 100755 tests/15-basic-trusty-icehouse create mode 100644 tests/README create mode 100644 tests/basic_deployment.py diff --git a/Makefile b/Makefile index 86a48df..79899a5 100644 --- a/Makefile +++ b/Makefile @@ -3,13 +3,20 @@ PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers --ignore=E125 hooks - @flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests + @flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests tests @charm proof -test: - @echo Starting tests... +unit_test: + @echo Starting unit tests... @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests +test: + @echo Starting Amulet tests... + # coreycb note: The -v should only be temporary until Amulet sends + # raise_status() messages to stderr: + # https://bugs.launchpad.net/amulet/+bug/1320357 + @juju test -v -p AMULET_HTTP_PROXY + sync: @charm-helper-sync -c charm-helpers-hooks.yaml @charm-helper-sync -c charm-helpers-tests.yaml diff --git a/tests/00-setup b/tests/00-setup new file mode 100755 index 0000000..f40cdd7 --- /dev/null +++ b/tests/00-setup @@ -0,0 +1,11 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes python-amulet +sudo apt-get install --yes python-swiftclient +sudo apt-get install --yes python-glanceclient +sudo apt-get install --yes python-keystoneclient +sudo apt-get install --yes python-novaclient diff --git a/tests/10-basic-precise-essex b/tests/10-basic-precise-essex new file mode 100755 index 0000000..ad40ab6 --- /dev/null +++ b/tests/10-basic-precise-essex @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic swift-proxy deployment on precise-essex.""" + +from basic_deployment import SwiftProxyBasicDeployment + +if __name__ == '__main__': + deployment = SwiftProxyBasicDeployment(series='precise') + deployment.run_tests() diff --git a/tests/11-basic-precise-folsom b/tests/11-basic-precise-folsom new file mode 100755 index 0000000..82fe6a6 --- /dev/null +++ b/tests/11-basic-precise-folsom @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic swift-proxy deployment on precise-folsom.""" + +from basic_deployment import SwiftProxyBasicDeployment + +if __name__ == '__main__': + deployment = SwiftProxyBasicDeployment(series='precise', + openstack='cloud:precise-folsom', + source='cloud:precise-updates/folsom') + deployment.run_tests() diff --git a/tests/12-basic-precise-grizzly b/tests/12-basic-precise-grizzly new file mode 100755 index 0000000..c1a434b --- /dev/null +++ b/tests/12-basic-precise-grizzly @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic swift-proxy deployment on precise-grizzly.""" + +from basic_deployment import SwiftProxyBasicDeployment + +if __name__ == '__main__': + deployment = SwiftProxyBasicDeployment(series='precise', + openstack='cloud:precise-grizzly', + source='cloud:precise-updates/grizzly') + deployment.run_tests() diff --git a/tests/13-basic-precise-havana b/tests/13-basic-precise-havana new file mode 100755 index 0000000..0a3780a --- /dev/null +++ b/tests/13-basic-precise-havana @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic swift-proxy deployment on precise-havana.""" + +from basic_deployment import SwiftProxyBasicDeployment + +if __name__ == '__main__': + deployment = SwiftProxyBasicDeployment(series='precise', + openstack='cloud:precise-havana', + source='cloud:precise-updates/havana') + deployment.run_tests() diff --git a/tests/14-basic-precise-icehouse b/tests/14-basic-precise-icehouse new file mode 100755 index 0000000..4fc883a --- /dev/null +++ b/tests/14-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic swift-proxy deployment on precise-icehouse.""" + +from basic_deployment import SwiftProxyBasicDeployment + +if __name__ == '__main__': + deployment = SwiftProxyBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/tests/15-basic-trusty-icehouse b/tests/15-basic-trusty-icehouse new file mode 100755 index 0000000..93b685b --- /dev/null +++ b/tests/15-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic swift-proxy deployment on trusty-icehouse.""" + +from basic_deployment import SwiftProxyBasicDeployment + +if __name__ == '__main__': + deployment = SwiftProxyBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/tests/README b/tests/README new file mode 100644 index 0000000..3a9b785 --- /dev/null +++ b/tests/README @@ -0,0 +1,52 @@ +This directory provides Amulet tests that focus on verification of swift-proxy +deployments. + +If you use a web proxy server to access the web, you'll need to set the +AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. + +The following examples demonstrate different ways that tests can be executed. +All examples are run from the charm's root directory. + + * To run all tests (starting with 00-setup): + + make test + + * To run a specific test module (or modules): + + juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To run a specific test module (or modules), and keep the environment + deployed after a failure: + + juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To re-run a test module against an already deployed environment (one + that was deployed by a previous call to 'juju test --set-e'): + + ./tests/15-basic-trusty-icehouse + +For debugging and test development purposes, all code should be idempotent. +In other words, the code should have the ability to be re-run without changing +the results beyond the initial run. This enables editing and re-running of a +test module against an already deployed environment, as described above. + +Manual debugging tips: + + * Set the following env vars before using the OpenStack CLI as admin: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=openstack + export OS_REGION_NAME=RegionOne + + * Set the following env vars before using the OpenStack CLI as demoUser: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=demoTenant + export OS_USERNAME=demoUser + export OS_PASSWORD=password + export OS_REGION_NAME=RegionOne + + * Sample swift command: + swift -A $OS_AUTH_URL --os-tenant-name services --os-username swift \ + --os-password password list + (where tenant/user names and password are in swift-proxy's nova.conf file) diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py new file mode 100644 index 0000000..292475b --- /dev/null +++ b/tests/basic_deployment.py @@ -0,0 +1,827 @@ +#!/usr/bin/python + +import amulet +import swiftclient + +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) + +from charmhelpers.contrib.openstack.amulet.utils import ( + OpenStackAmuletUtils, + DEBUG, # flake8: noqa + ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(ERROR) + + +class SwiftProxyBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic swift-proxy deployment.""" + + def __init__(self, series, openstack=None, source=None): + """Deploy the entire test environment.""" + super(SwiftProxyBasicDeployment, self).__init__(series, openstack, + source) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + self._initialize_tests() + + def _add_services(self): + """Add the service that we're testing, including the number of units, + where swift-proxy is local, and the other charms are from + the charm store.""" + this_service = ('swift-proxy', 1) + other_services = [('mysql', 1), + ('keystone', 1), ('glance', 1), ('swift-storage', 1)] + super(SwiftProxyBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'keystone:shared-db': 'mysql:shared-db', + 'swift-proxy:identity-service': 'keystone:identity-service', + 'swift-storage:swift-storage': 'swift-proxy:swift-storage', + 'glance:identity-service': 'keystone:identity-service', + 'glance:shared-db': 'mysql:shared-db', + 'glance:object-store': 'swift-proxy:object-store' + } + super(SwiftProxyBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + swift_proxy_config = {'zone-assignment': 'manual', + 'replicas': '1', + 'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae', + 'use-https': 'no'} + swift_storage_config = {'zone': '1', + 'block-device': 'vdb', + 'overwrite': 'true'} + configs = {'keystone': keystone_config, + 'swift-proxy': swift_proxy_config, + 'swift-storage': swift_storage_config} + super(SwiftProxyBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry.unit['mysql/0'] + self.keystone_sentry = self.d.sentry.unit['keystone/0'] + self.glance_sentry = self.d.sentry.unit['glance/0'] + self.swift_proxy_sentry = self.d.sentry.unit['swift-proxy/0'] + self.swift_storage_sentry = self.d.sentry.unit['swift-storage/0'] + + # Authenticate admin with keystone + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + # Authenticate admin with glance endpoint + self.glance = u.authenticate_glance_admin(self.keystone) + + # Authenticate swift user + keystone_relation = self.keystone_sentry.relation('identity-service', + 'swift-proxy:identity-service') + ep = self.keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + self.swift = swiftclient.Connection(authurl=ep, + user=keystone_relation['service_username'], + key=keystone_relation['service_password'], + tenant_name=keystone_relation['service_tenant'], + auth_version='2.0') + + # Create a demo tenant/role/user + self.demo_tenant = 'demoTenant' + self.demo_role = 'demoRole' + self.demo_user = 'demoUser' + if not u.tenant_exists(self.keystone, self.demo_tenant): + tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + self.keystone.roles.create(name=self.demo_role) + self.keystone.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Authenticate demo user with keystone + self.keystone_demo = \ + u.authenticate_keystone_user(self.keystone, user=self.demo_user, + password='password', + tenant=self.demo_tenant) + + def test_services(self): + """Verify the expected services are running on the corresponding + service units.""" + swift_storage_services = ['status swift-account', + 'status swift-account-auditor', + 'status swift-account-reaper', + 'status swift-account-replicator', + 'status swift-container', + 'status swift-container-auditor', + 'status swift-container-replicator', + 'status swift-container-updater', + 'status swift-object', + 'status swift-object-auditor', + 'status swift-object-replicator', + 'status swift-object-updater'] + if self._get_openstack_release() >= self.precise_icehouse: + swift_storage_services.append('status swift-container-sync') + + commands = { + self.mysql_sentry: ['status mysql'], + self.keystone_sentry: ['status keystone'], + self.glance_sentry: ['status glance-registry', 'status glance-api'], + self.swift_proxy_sentry: ['status swift-proxy'], + self.swift_storage_sentry: swift_storage_services + } + + ret = u.validate_services(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_users(self): + """Verify all existing roles.""" + user1 = {'name': 'demoUser', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'demo@demo.com'} + user2 = {'name': 'admin', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': 'juju@localhost'} + user3 = {'name': 'glance', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': u'juju@localhost'} + user4 = {'name': 'swift', + 'enabled': True, + 'tenantId': u.not_null, + 'id': u.not_null, + 'email': u'juju@localhost'} + expected = [user1, user2, user3, user4] + actual = self.keystone.users.list() + + ret = u.validate_user_data(expected, actual) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_service_catalog(self): + """Verify that the service catalog endpoint data is valid.""" + endpoint_vol = {'adminURL': u.valid_url, + 'region': 'RegionOne', + 'publicURL': u.valid_url, + 'internalURL': u.valid_url} + endpoint_id = {'adminURL': u.valid_url, + 'region': 'RegionOne', + 'publicURL': u.valid_url, + 'internalURL': u.valid_url} + if self._get_openstack_release() >= self.precise_folsom: + endpoint_vol['id'] = u.not_null + endpoint_id['id'] = u.not_null + expected = {'image': [endpoint_id], 'object-store': [endpoint_id], + 'identity': [endpoint_id]} + actual = self.keystone_demo.service_catalog.get_endpoints() + + ret = u.validate_svc_catalog_endpoint_data(expected, actual) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_openstack_object_store_endpoint(self): + """Verify the swift object-store endpoint data.""" + endpoints = self.keystone.endpoints.list() + admin_port = internal_port = public_port = '8080' + expected = {'id': u.not_null, + 'region': 'RegionOne', + 'adminurl': u.valid_url, + 'internalurl': u.valid_url, + 'publicurl': u.valid_url, + 'service_id': u.not_null} + + ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, + public_port, expected) + if ret: + message = 'object-store endpoint: {}'.format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_swift_proxy_identity_service_relation(self): + """Verify the swift-proxy to keystone identity-service relation data.""" + unit = self.swift_proxy_sentry + relation = ['identity-service', 'keystone:identity-service'] + expected = { + 'service': 'swift', + 'region': 'RegionOne', + 'public_url': u.valid_url, + 'internal_url': u.valid_url, + 'private-address': u.valid_ip, + 'requested_roles': 'Member,Admin', + 'admin_url': u.valid_url + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('swift-proxy identity-service', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_keystone_identity_service_relation(self): + """Verify the keystone to swift-proxy identity-service relation data.""" + unit = self.keystone_sentry + relation = ['identity-service', 'swift-proxy:identity-service'] + expected = { + 'service_protocol': 'http', + 'service_tenant': 'services', + 'admin_token': 'ubuntutesting', + 'service_password': u.not_null, + 'service_port': '5000', + 'auth_port': '35357', + 'auth_protocol': 'http', + 'private-address': u.valid_ip, + 'https_keystone': 'False', + 'auth_host': u.valid_ip, + 'service_username': 'swift', + 'service_tenant_id': u.not_null, + 'service_host': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('keystone identity-service', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_swift_storage_swift_storage_relation(self): + """Verify the swift-storage to swift-proxy swift-storage relation + data.""" + unit = self.swift_storage_sentry + relation = ['swift-storage', 'swift-proxy:swift-storage'] + expected = { + 'account_port': '6002', + 'zone': '1', + 'object_port': '6000', + 'container_port': '6001', + 'private-address': u.valid_ip, + 'device': 'vdb' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('swift-storage swift-storage', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_swift_proxy_swift_storage_relation(self): + """Verify the swift-proxy to swift-storage swift-storage relation + data.""" + unit = self.swift_proxy_sentry + relation = ['swift-storage', 'swift-storage:swift-storage'] + expected = { + 'private-address': u.valid_ip, + 'trigger': u.not_null, + 'rings_url': u.valid_url, + 'swift_hash': u.not_null + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('swift-proxy swift-storage', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_glance_object_store_relation(self): + """Verify the glance to swift-proxy object-store relation data.""" + unit = self.glance_sentry + relation = ['object-store', 'swift-proxy:object-store'] + expected = { 'private-address': u.valid_ip } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('glance object-store', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_swift_proxy_object_store_relation(self): + """Verify the swift-proxy to glance object-store relation data.""" + unit = self.swift_proxy_sentry + relation = ['object-store', 'glance:object-store'] + expected = {'private-address': u.valid_ip} + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('swift-proxy object-store', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_restart_on_config_change(self): + """Verify that the specified services are restarted when the config + is changed.""" + svc = 'swift-proxy' + self.d.configure('swift-proxy', {'node-timeout': '90'}) + + if not u.service_restarted(self.swift_proxy_sentry, svc, + '/etc/swift/proxy-server.conf'): + msg = "service {} didn't restart after config change".format(svc) + amulet.raise_status(amulet.FAIL, msg=msg) + + self.d.configure('swift-proxy', {'node-timeout': '60'}) + + def test_swift_config(self): + """Verify the data in the swift config file.""" + unit = self.swift_proxy_sentry + conf = '/etc/swift/swift.conf' + swift_proxy_relation = unit.relation('swift-storage', + 'swift-storage:swift-storage') + expected = { + 'swift_hash_path_suffix': swift_proxy_relation['swift_hash'] + } + + ret = u.validate_config_data(unit, conf, 'swift-hash', expected) + if ret: + message = "swift config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_proxy_server_icehouse_config(self): + """Verify the data in the proxy-server config file.""" + if self._get_openstack_release() < self.precise_icehouse: + return + + unit = self.swift_proxy_sentry + conf = '/etc/swift/proxy-server.conf' + keystone_relation = self.keystone_sentry.relation('identity-service', + 'swift-proxy:identity-service') + swift_proxy_relation = unit.relation('identity-service', + 'keystone:identity-service') + swift_proxy_ip = swift_proxy_relation['private-address'] + auth_host = keystone_relation['auth_host'] + auth_protocol = keystone_relation['auth_protocol'] + + expected = { + 'DEFAULT': { + 'bind_port': '8080', + 'workers': '0', + 'user': 'swift' + }, + 'pipeline:main': { + 'pipeline': 'gatekeeper healthcheck cache swift3 s3token ' + 'container_sync bulk tempurl slo dlo formpost ' + 'authtoken keystoneauth staticweb ' + 'container-quotas account-quotas proxy-server' + }, + 'app:proxy-server': { + 'use': 'egg:swift#proxy', + 'allow_account_management': 'true', + 'account_autocreate': 'true', + 'node_timeout': '60', + 'recoverable_node_timeout': '30' + }, + 'filter:tempauth': { + 'use': 'egg:swift#tempauth', + 'user_system_root': 'testpass .admin https://{}:8080/v1/' + 'AUTH_system'.format(swift_proxy_ip) + }, + 'filter:healthcheck': {'use': 'egg:swift#healthcheck'}, + 'filter:cache': { + 'use': 'egg:swift#memcache', + 'memcache_servers': '{}:11211'.format(swift_proxy_ip) + }, + 'filter:account-quotas': {'use': 'egg:swift#account_quotas'}, + 'filter:container-quotas': {'use': 'egg:swift#container_quotas'}, + 'filter:staticweb': {'use': 'egg:swift#staticweb'}, + 'filter:bulk': {'use': 'egg:swift#bulk'}, + 'filter:slo': {'use': 'egg:swift#slo'}, + 'filter:dlo': {'use': 'egg:swift#dlo'}, + 'filter:formpost': {'use': 'egg:swift#formpost'}, + 'filter:tempurl': {'use': 'egg:swift#tempurl'}, + 'filter:container_sync': {'use': 'egg:swift#container_sync'}, + 'filter:gatekeeper': {'use': 'egg:swift#gatekeeper'}, + 'filter:keystoneauth': { + 'use': 'egg:swift#keystoneauth', + 'operator_roles': 'Member,Admin' + }, + 'filter:authtoken': { + 'paste.filter_factory': 'keystoneclient.middleware.' + 'auth_token:filter_factory', + 'auth_host': auth_host, + 'auth_port': keystone_relation['auth_port'], + 'auth_protocol': auth_protocol, + 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host, + keystone_relation['service_port']), + 'admin_tenant_name': keystone_relation['service_tenant'], + 'admin_user': keystone_relation['service_username'], + 'admin_password': keystone_relation['service_password'], + 'delay_auth_decision': 'true', + 'signing_dir': '/etc/swift', + 'cache': 'swift.cache' + }, + 'filter:s3token': { + 'paste.filter_factory': 'keystoneclient.middleware.' + 's3_token:filter_factory', + 'service_host': keystone_relation['service_host'], + 'service_port': keystone_relation['service_port'], + 'auth_port': keystone_relation['auth_port'], + 'auth_host': keystone_relation['auth_host'], + 'auth_protocol': keystone_relation['auth_protocol'], + 'auth_token': keystone_relation['admin_token'], + 'admin_token': keystone_relation['admin_token'] + }, + 'filter:swift3': {'use': 'egg:swift3#swift3'} + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "proxy-server config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_proxy_server_havana_config(self): + """Verify the data in the proxy-server config file.""" + if self._get_openstack_release() != self.precise_havana: + return + + unit = self.swift_proxy_sentry + conf = '/etc/swift/proxy-server.conf' + keystone_relation = self.keystone_sentry.relation('identity-service', + 'swift-proxy:identity-service') + swift_proxy_relation = unit.relation('identity-service', + 'keystone:identity-service') + swift_proxy_ip = swift_proxy_relation['private-address'] + auth_host = keystone_relation['auth_host'] + auth_protocol = keystone_relation['auth_protocol'] + + expected = { + 'DEFAULT': { + 'bind_port': '8080', + 'workers': '0', + 'user': 'swift' + }, + 'pipeline:main': { + 'pipeline': 'healthcheck cache swift3 authtoken ' + 'keystoneauth container-quotas account-quotas ' + 'proxy-server' + }, + 'app:proxy-server': { + 'use': 'egg:swift#proxy', + 'allow_account_management': 'true', + 'account_autocreate': 'true', + 'node_timeout': '60', + 'recoverable_node_timeout': '30' + }, + 'filter:tempauth': { + 'use': 'egg:swift#tempauth', + 'user_system_root': 'testpass .admin https://{}:8080/v1/' + 'AUTH_system'.format(swift_proxy_ip) + }, + 'filter:healthcheck': {'use': 'egg:swift#healthcheck'}, + 'filter:cache': { + 'use': 'egg:swift#memcache', + 'memcache_servers': '{}:11211'.format(swift_proxy_ip) + }, + 'filter:account-quotas': {'use': 'egg:swift#account_quotas'}, + 'filter:container-quotas': {'use': 'egg:swift#container_quotas'}, + 'filter:keystoneauth': { + 'use': 'egg:swift#keystoneauth', + 'operator_roles': 'Member,Admin' + }, + 'filter:authtoken': { + 'paste.filter_factory': 'keystoneclient.middleware.' + 'auth_token:filter_factory', + 'auth_host': auth_host, + 'auth_port': keystone_relation['auth_port'], + 'auth_protocol': auth_protocol, + 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host, + keystone_relation['service_port']), + 'admin_tenant_name': keystone_relation['service_tenant'], + 'admin_user': keystone_relation['service_username'], + 'admin_password': keystone_relation['service_password'], + 'delay_auth_decision': 'true', + 'signing_dir': '/etc/swift', + 'cache': 'swift.cache' + }, + 'filter:s3token': { + 'paste.filter_factory': 'keystone.middleware.s3_token:' + 'filter_factory', + 'service_host': keystone_relation['service_host'], + 'service_port': keystone_relation['service_port'], + 'auth_port': keystone_relation['auth_port'], + 'auth_host': keystone_relation['auth_host'], + 'auth_protocol': keystone_relation['auth_protocol'], + 'auth_token': keystone_relation['admin_token'], + 'admin_token': keystone_relation['admin_token'], + 'service_protocol': keystone_relation['service_protocol'] + }, + 'filter:swift3': {'use': 'egg:swift3#swift3'} + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "proxy-server config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_proxy_server_grizzly_config(self): + """Verify the data in the proxy-server config file.""" + if self._get_openstack_release() != self.precise_grizzly: + return + + unit = self.swift_proxy_sentry + conf = '/etc/swift/proxy-server.conf' + keystone_relation = self.keystone_sentry.relation('identity-service', + 'swift-proxy:identity-service') + swift_proxy_relation = unit.relation('identity-service', + 'keystone:identity-service') + swift_proxy_ip = swift_proxy_relation['private-address'] + auth_host = keystone_relation['auth_host'] + auth_protocol = keystone_relation['auth_protocol'] + + expected = { + 'DEFAULT': { + 'bind_port': '8080', + 'workers': '0', + 'user': 'swift' + }, + 'pipeline:main': { + 'pipeline': 'healthcheck cache swift3 s3token authtoken ' + 'keystone container-quotas account-quotas ' + 'proxy-server' + }, + 'app:proxy-server': { + 'use': 'egg:swift#proxy', + 'allow_account_management': 'true', + 'account_autocreate': 'true', + 'node_timeout': '60', + 'recoverable_node_timeout': '30' + }, + 'filter:tempauth': { + 'use': 'egg:swift#tempauth', + 'user_system_root': 'testpass .admin https://{}:8080/v1/' + 'AUTH_system'.format(swift_proxy_ip) + }, + 'filter:healthcheck': {'use': 'egg:swift#healthcheck'}, + 'filter:cache': { + 'use': 'egg:swift#memcache', + 'memcache_servers': '{}:11211'.format(swift_proxy_ip) + }, + 'filter:account-quotas': {'use': 'egg:swift#account_quotas'}, + 'filter:container-quotas': {'use': 'egg:swift#container_quotas'}, + 'filter:keystone': { + 'paste.filter_factory': 'swift.common.middleware.' + 'keystoneauth:filter_factory', + 'operator_roles': 'Member,Admin' + }, + 'filter:authtoken': { + 'paste.filter_factory': 'keystone.middleware.auth_token:' + 'filter_factory', + 'auth_host': auth_host, + 'auth_port': keystone_relation['auth_port'], + 'auth_protocol': auth_protocol, + 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host, + keystone_relation['service_port']), + 'admin_tenant_name': keystone_relation['service_tenant'], + 'admin_user': keystone_relation['service_username'], + 'admin_password': keystone_relation['service_password'], + 'delay_auth_decision': 'true', + 'signing_dir': '/etc/swift' + }, + 'filter:s3token': { + 'paste.filter_factory': 'keystone.middleware.s3_token:' + 'filter_factory', + 'service_host': keystone_relation['service_host'], + 'service_port': keystone_relation['service_port'], + 'auth_port': keystone_relation['auth_port'], + 'auth_host': keystone_relation['auth_host'], + 'auth_protocol': keystone_relation['auth_protocol'], + 'auth_token': keystone_relation['admin_token'], + 'admin_token': keystone_relation['admin_token'], + 'service_protocol': keystone_relation['service_protocol'] + }, + 'filter:swift3': {'use': 'egg:swift3#swift3'} + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "proxy-server config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_proxy_server_folsom_config(self): + """Verify the data in the proxy-server config file.""" + if self._get_openstack_release() != self.precise_folsom: + return + + unit = self.swift_proxy_sentry + conf = '/etc/swift/proxy-server.conf' + keystone_relation = self.keystone_sentry.relation('identity-service', + 'swift-proxy:identity-service') + swift_proxy_relation = unit.relation('identity-service', + 'keystone:identity-service') + swift_proxy_ip = swift_proxy_relation['private-address'] + auth_host = keystone_relation['auth_host'] + auth_protocol = keystone_relation['auth_protocol'] + + expected = { + 'DEFAULT': { + 'bind_port': '8080', + 'workers': '0', + 'user': 'swift' + }, + 'pipeline:main': { + 'pipeline': 'healthcheck cache swift3 s3token authtoken ' + 'keystone proxy-server' + }, + 'app:proxy-server': { + 'use': 'egg:swift#proxy', + 'allow_account_management': 'true', + 'account_autocreate': 'true', + 'node_timeout': '60', + 'recoverable_node_timeout': '30' + }, + 'filter:tempauth': { + 'use': 'egg:swift#tempauth', + 'user_system_root': 'testpass .admin https://{}:8080/v1/' + 'AUTH_system'.format(swift_proxy_ip) + }, + 'filter:healthcheck': {'use': 'egg:swift#healthcheck'}, + 'filter:cache': { + 'use': 'egg:swift#memcache', + 'memcache_servers': '{}:11211'.format(swift_proxy_ip) + }, + 'filter:keystone': { + 'paste.filter_factory': 'keystone.middleware.swift_auth:' + 'filter_factory', + 'operator_roles': 'Member,Admin' + }, + 'filter:authtoken': { + 'paste.filter_factory': 'keystone.middleware.auth_token:' + 'filter_factory', + 'auth_host': auth_host, + 'auth_port': keystone_relation['auth_port'], + 'auth_protocol': auth_protocol, + 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host, + keystone_relation['service_port']), + 'admin_tenant_name': keystone_relation['service_tenant'], + 'admin_user': keystone_relation['service_username'], + 'admin_password': keystone_relation['service_password'], + 'delay_auth_decision': '1' + }, + 'filter:s3token': { + 'paste.filter_factory': 'keystone.middleware.s3_token:' + 'filter_factory', + 'service_host': keystone_relation['service_host'], + 'service_port': keystone_relation['service_port'], + 'auth_port': keystone_relation['auth_port'], + 'auth_host': keystone_relation['auth_host'], + 'auth_protocol': keystone_relation['auth_protocol'], + 'auth_token': keystone_relation['admin_token'], + 'admin_token': keystone_relation['admin_token'], + 'service_protocol': keystone_relation['service_protocol'] + }, + 'filter:swift3': {'use': 'egg:swift#swift3'} + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "proxy-server config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_proxy_server_essex_config(self): + """Verify the data in the proxy-server config file.""" + if self._get_openstack_release() != self.precise_essex: + return + + unit = self.swift_proxy_sentry + conf = '/etc/swift/proxy-server.conf' + keystone_relation = self.keystone_sentry.relation('identity-service', + 'swift-proxy:identity-service') + swift_proxy_relation = unit.relation('identity-service', + 'keystone:identity-service') + swift_proxy_ip = swift_proxy_relation['private-address'] + auth_host = keystone_relation['auth_host'] + auth_protocol = keystone_relation['auth_protocol'] + + expected = { + 'DEFAULT': { + 'bind_port': '8080', + 'workers': '0', + 'user': 'swift' + }, + 'pipeline:main': { + 'pipeline': 'healthcheck cache swift3 s3token authtoken ' + 'keystone proxy-server' + }, + 'app:proxy-server': { + 'use': 'egg:swift#proxy', + 'allow_account_management': 'true', + 'account_autocreate': 'true', + 'node_timeout': '60', + 'recoverable_node_timeout': '30' + }, + 'filter:tempauth': { + 'use': 'egg:swift#tempauth', + 'user_system_root': 'testpass .admin https://{}:8080/v1/' + 'AUTH_system'.format(swift_proxy_ip) + }, + 'filter:healthcheck': {'use': 'egg:swift#healthcheck'}, + 'filter:cache': { + 'use': 'egg:swift#memcache', + 'memcache_servers': '{}:11211'.format(swift_proxy_ip) + }, + 'filter:keystone': { + 'paste.filter_factory': 'keystone.middleware.swift_auth:' + 'filter_factory', + 'operator_roles': 'Member,Admin' + }, + 'filter:authtoken': { + 'paste.filter_factory': 'keystone.middleware.auth_token:' + 'filter_factory', + 'auth_host': auth_host, + 'auth_port': keystone_relation['auth_port'], + 'auth_protocol': auth_protocol, + 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host, + keystone_relation['service_port']), + 'admin_tenant_name': keystone_relation['service_tenant'], + 'admin_user': keystone_relation['service_username'], + 'admin_password': keystone_relation['service_password'], + 'delay_auth_decision': '1' + }, + 'filter:s3token': { + 'paste.filter_factory': 'keystone.middleware.s3_token:' + 'filter_factory', + 'service_host': keystone_relation['service_host'], + 'service_port': keystone_relation['service_port'], + 'auth_port': keystone_relation['auth_port'], + 'auth_host': keystone_relation['auth_host'], + 'auth_protocol': keystone_relation['auth_protocol'], + 'auth_token': keystone_relation['admin_token'], + 'admin_token': keystone_relation['admin_token'], + 'service_protocol': keystone_relation['service_protocol'] + }, + 'filter:swift3': {'use': 'egg:swift#swift3'} + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "proxy-server config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_image_create(self): + """Create an instance in glance, which is backed by swift, and validate + that some of the metadata for the image match in glance and swift.""" + # NOTE(coreycb): Skipping failing test on folsom until resolved. On + # folsom only, uploading an image to glance gets 400 Bad + # Request - Error uploading image: (error): [Errno 111] + # ECONNREFUSED (HTTP 400) + if self._get_openstack_release() == self.precise_folsom: + u.log.error("Skipping failing test until resolved") + return + + # Create glance image + image = u.create_cirros_image(self.glance, "cirros-image") + if not image: + amulet.raise_status(amulet.FAIL, msg="Image create failed") + + # Validate that cirros image exists in glance and get its checksum/size + images = list(self.glance.images.list()) + if len(images) != 1: + msg = "Expected 1 glance image, found {}".format(len(images)) + amulet.raise_status(amulet.FAIL, msg=msg) + + if images[0].name != 'cirros-image': + message = "cirros image does not exist" + amulet.raise_status(amulet.FAIL, msg=message) + + glance_image_md5 = image.checksum + glance_image_size = image.size + + # Validate that swift object's checksum/size match that from glance + headers, containers = self.swift.get_account() + if len(containers) != 1: + msg = "Expected 1 swift container, found {}".format(len(containers)) + amulet.raise_status(amulet.FAIL, msg=msg) + + container_name = containers[0].get('name') + + headers, objects = self.swift.get_container(container_name) + if len(objects) != 1: + msg = "Expected 1 swift object, found {}".format(len(objects)) + amulet.raise_status(amulet.FAIL, msg=msg) + + swift_object_size = objects[0].get('bytes') + swift_object_md5 = objects[0].get('hash') + + if glance_image_size != swift_object_size: + msg = "Glance image size {} != swift object size {}".format( \ + glance_image_size, swift_object_size) + amulet.raise_status(amulet.FAIL, msg=msg) + + if glance_image_md5 != swift_object_md5: + msg = "Glance image hash {} != swift object hash {}".format( \ + glance_image_md5, swift_object_md5) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Cleanup + u.delete_image(self.glance, image)