From c283a1c922a903133ef31627203a56b951129324 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 7 Mar 2016 09:10:53 +0000 Subject: [PATCH] Enable Keystone v3 API This changes enables the Keystone v3 api. It can be toggled on and off via the preferred-api-version option. When services join the identity-service relation they will be presented with a new parameter api_version which is the maximum api version the keystone charm supports and matches what was set via preferred-api-version. If preferred-api-version is set to 3 then the charm will render a new policy.json which adds support for domains etc when keystone is checking authorisation. The new policy.json requires an admin domain to be created and specifies that a user is classed as an admin of the whole cloud if they have the admin role against that admin domain. The admin domain, called admin_domain, is created by the charm. The name of this domain is currently not user configurable. The role that enables a user to be classed as an admin is specified by the old charm option admin-role. The charm grants admin-role to the admin-user against the admin_domain. Switching a deployed cloud from preferred-api-version 2 to preferred-api-version 3 is supported. Switching from preferred-api-version 3 to preferred-api-version 2 should work from the charm point of view but may cause problems if there are duplicate users between domains or may have unintended consequences like escalating the privilege of some users so is not recommended. Change-Id: I8eec2a90e0acbf56ee72cb5036a0a21f4a77a2c3 --- charm-helpers-tests.yaml | 2 + charmhelpers/contrib/storage/linux/ceph.py | 132 ++- config.yaml | 6 + hooks/keystone_context.py | 10 +- hooks/keystone_hooks.py | 6 + hooks/keystone_utils.py | 345 ++++-- hooks/manager.py | 225 +++- templates/liberty/policy.json | 382 +++++++ templates/liberty/policy.json.v2 | 184 ++++ tests/basic_deployment.py | 252 ++++- .../contrib/openstack/amulet/utils.py | 46 +- tests/charmhelpers/core/__init__.py | 15 + tests/charmhelpers/core/decorators.py | 57 + tests/charmhelpers/core/hookenv.py | 978 ++++++++++++++++++ tox.ini | 2 +- unit_tests/test_actions.py | 21 +- unit_tests/test_actions_git_reinstall.py | 3 +- unit_tests/test_actions_openstack_upgrade.py | 11 +- unit_tests/test_keystone_hooks.py | 19 +- unit_tests/test_keystone_utils.py | 59 +- 20 files changed, 2545 insertions(+), 210 deletions(-) create mode 100644 templates/liberty/policy.json create mode 100644 templates/liberty/policy.json.v2 create mode 100644 tests/charmhelpers/core/__init__.py create mode 100644 tests/charmhelpers/core/decorators.py create mode 100644 tests/charmhelpers/core/hookenv.py diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml index 48b12f6f..7e5325b7 100644 --- a/charm-helpers-tests.yaml +++ b/charm-helpers-tests.yaml @@ -3,3 +3,5 @@ destination: tests/charmhelpers include: - contrib.amulet - contrib.openstack.amulet + - core.hookenv + - core.decorators diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py index fb1bee34..1974e1ef 100644 --- a/charmhelpers/contrib/storage/linux/ceph.py +++ b/charmhelpers/contrib/storage/linux/ceph.py @@ -24,6 +24,8 @@ # Adam Gandelman # import bisect +import errno +import hashlib import six import os @@ -163,7 +165,7 @@ class Pool(object): :return: None """ # read-only is easy, writeback is much harder - mode = get_cache_mode(cache_pool) + mode = get_cache_mode(self.service, cache_pool) if mode == 'readonly': check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) @@ -259,6 +261,134 @@ class ErasurePool(Pool): Returns json formatted output""" +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output( + ['ceph', '--id', service, + 'ceph', 'mon_status', '--format=json']) + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}".format( + mon_status, v.message)) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}".format( + e.message)) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'ceph', 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'ceph', 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'ceph', 'config-key', 'get', str(key)]) + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + def get_erasure_profile(service, name): """ :param service: six.string_types. The Ceph user name to run the command under diff --git a/config.yaml b/config.yaml index 5ca9359d..4519a100 100644 --- a/config.yaml +++ b/config.yaml @@ -298,6 +298,12 @@ options: description: | A comma-separated list of nagios servicegroups. If left empty, the nagios_context will be used as the servicegroup + preferred-api-version: + default: 2 + type: int + description: | + Use this keystone api version for keystone endpoints and advertise this + version to identity client charms action-managed-upgrade: type: boolean default: False diff --git a/hooks/keystone_context.py b/hooks/keystone_context.py index f9765a5b..1174a7ac 100644 --- a/hooks/keystone_context.py +++ b/hooks/keystone_context.py @@ -190,9 +190,15 @@ class KeystoneContext(context.OSContextGenerator): from keystone_utils import ( api_port, set_admin_token, endpoint_url, resolve_address, PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, + get_admin_domain_id ) ctxt = {} ctxt['token'] = set_admin_token(config('admin-token')) + ctxt['api_version'] = int(config('preferred-api-version')) + ctxt['admin_role'] = config('admin-role') + if ctxt['api_version'] > 2: + ctxt['admin_domain_id'] = ( + get_admin_domain_id() or 'admin_domain_id') ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), singlenode_mode=True) ctxt['public_port'] = determine_api_port(api_port('keystone-public'), @@ -233,10 +239,10 @@ class KeystoneContext(context.OSContextGenerator): # correct auth URL. ctxt['public_endpoint'] = endpoint_url( resolve_address(PUBLIC), - api_port('keystone-public')).rstrip('v2.0') + api_port('keystone-public')).replace('v2.0', '') ctxt['admin_endpoint'] = endpoint_url( resolve_address(ADMIN), - api_port('keystone-admin')).rstrip('v2.0') + api_port('keystone-admin')).replace('v2.0', '') return ctxt diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 05594377..fa0b10ee 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -47,6 +47,7 @@ from charmhelpers.contrib.openstack.utils import ( git_install_requested, openstack_upgrade_available, sync_db_with_multi_ipv6_addresses, + os_release, ) from keystone_utils import ( @@ -64,6 +65,7 @@ from keystone_utils import ( services, CLUSTER_RES, KEYSTONE_CONF, + POLICY_JSON, SSH_USER, setup_ipv6, send_notifications, @@ -309,6 +311,8 @@ def db_changed(): else: CONFIGS.write(KEYSTONE_CONF) leader_init_db_if_ready(use_current_context=True) + if os_release('keystone-common') >= 'liberty': + CONFIGS.write(POLICY_JSON) @hooks.hook('pgsql-db-relation-changed') @@ -320,6 +324,8 @@ def pgsql_db_changed(): else: CONFIGS.write(KEYSTONE_CONF) leader_init_db_if_ready(use_current_context=True) + if os_release('keystone-common') >= 'liberty': + CONFIGS.write(POLICY_JSON) @hooks.hook('identity-service-relation-changed') diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index f9ee0777..6c8c65f5 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -166,6 +166,7 @@ KEYSTONE_LOGGER_CONF = "/etc/keystone/logging.conf" KEYSTONE_CONF_DIR = os.path.dirname(KEYSTONE_CONF) STORED_PASSWD = "/var/lib/keystone/keystone.passwd" STORED_TOKEN = "/var/lib/keystone/keystone.token" +STORED_ADMIN_DOMAIN_ID = "/var/lib/keystone/keystone.admin_domain_id" SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd' HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' @@ -184,6 +185,10 @@ SSH_USER = 'juju_keystone' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' SSL_SYNC_SEMAPHORE = threading.Semaphore() SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH] +ADMIN_DOMAIN = 'admin_domain' +DEFAULT_DOMAIN = 'Default' +POLICY_JSON = '/etc/keystone/policy.json' + BASE_RESOURCE_MAP = OrderedDict([ (KEYSTONE_CONF, { 'services': BASE_SERVICES, @@ -212,6 +217,10 @@ BASE_RESOURCE_MAP = OrderedDict([ 'contexts': [keystone_context.ApacheSSLContext()], 'services': ['apache2'], }), + (POLICY_JSON, { + 'contexts': [keystone_context.KeystoneContext()], + 'services': BASE_SERVICES, + }), ]) valid_services = { @@ -329,6 +338,8 @@ def resource_map(): """ resource_map = deepcopy(BASE_RESOURCE_MAP) + if os_release('keystone') < 'liberty': + resource_map.pop(POLICY_JSON) if os.path.exists('/etc/apache2/conf-available'): resource_map.pop(APACHE_CONF) else: @@ -452,18 +463,26 @@ def migrate_database(): # OLD -def get_local_endpoint(): +def get_api_suffix(): + return 'v2.0' if get_api_version() == 2 else 'v3' + + +def get_local_endpoint(api_suffix=None): """Returns the URL for the local end-point bypassing haproxy/ssl""" + if not api_suffix: + api_suffix = get_api_suffix() + keystone_port = determine_api_port(api_port('keystone-admin'), + singlenode_mode=True) if config('prefer-ipv6'): ipv6_addr = get_ipv6_addr(exc_list=[config('vip')])[0] - endpoint_url = 'http://[%s]:{}/v2.0/' % ipv6_addr - local_endpoint = endpoint_url.format( - determine_api_port(api_port('keystone-admin'), - singlenode_mode=True)) + local_endpoint = 'http://[{}]:{}/{}/'.format( + ipv6_addr, + keystone_port, + api_suffix) else: - local_endpoint = 'http://localhost:{}/v2.0/'.format( - determine_api_port(api_port('keystone-admin'), - singlenode_mode=True)) + local_endpoint = 'http://localhost:{}/{}/'.format( + keystone_port, + api_suffix) return local_endpoint @@ -506,18 +525,14 @@ def get_admin_token(): def is_service_present(service_name, service_type): - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) + manager = get_manager() service_id = manager.resolve_service_id(service_name, service_type) return service_id is not None def delete_service_entry(service_name, service_type): """ Delete a service from keystone""" - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) + manager = get_manager() service_id = manager.resolve_service_id(service_name, service_type) if service_id: manager.api.services.delete(service_id) @@ -526,28 +541,34 @@ def delete_service_entry(service_name, service_type): def create_service_entry(service_name, service_type, service_desc, owner=None): """ Add a new service entry to keystone if one does not already exist """ - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) + manager = get_manager() for service in [s._info for s in manager.api.services.list()]: if service['name'] == service_name: log("Service entry for '%s' already exists." % service_name, level=DEBUG) return - manager.api.services.create(name=service_name, - service_type=service_type, + manager.api.services.create(service_name, + service_type, description=service_desc) log("Created new service entry '%s'" % service_name, level=DEBUG) def create_endpoint_template(region, service, publicurl, adminurl, internalurl): + manager = get_manager() + if manager.api_version == 2: + create_endpoint_template_v2(manager, region, service, publicurl, + adminurl, internalurl) + else: + create_endpoint_template_v3(manager, region, service, publicurl, + adminurl, internalurl) + + +def create_endpoint_template_v2(manager, region, service, publicurl, adminurl, + internalurl): """ Create a new endpoint template for service if one does not already exist matching name *and* region """ - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) service_id = manager.resolve_service_id(service) for ep in [e._info for e in manager.api.endpoints.list()]: if ep['service_id'] == service_id and ep['region'] == region: @@ -566,67 +587,131 @@ def create_endpoint_template(region, service, publicurl, adminurl, log("Updating endpoint template with new endpoint urls.") manager.api.endpoints.delete(ep['id']) - manager.api.endpoints.create(region=region, - service_id=service_id, - publicurl=publicurl, - adminurl=adminurl, - internalurl=internalurl) + manager.create_endpoints(region=region, + service_id=service_id, + publicurl=publicurl, + adminurl=adminurl, + internalurl=internalurl) log("Created new endpoint template for '%s' in '%s'" % (region, service), level=DEBUG) +def create_endpoint_template_v3(manager, region, service, publicurl, adminurl, + internalurl): + service_id = manager.resolve_service_id(service) + endpoints = { + 'public': publicurl, + 'admin': adminurl, + 'internal': internalurl, + } + for ep_type in endpoints.keys(): + # Delete endpoint if its has changed + ep_deleted = manager.delete_old_endpoint_v3( + ep_type, + service_id, + region, + endpoints[ep_type] + ) + ep_exists = manager.find_endpoint_v3( + ep_type, + service_id, + region + ) + if ep_deleted or not ep_exists: + manager.api.endpoints.create( + service_id, + endpoints[ep_type], + interface=ep_type, + region=region + ) + + def create_tenant(name): """Creates a tenant if it does not already exist""" - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) - tenants = [t._info for t in manager.api.tenants.list()] - if not tenants or name not in [t['name'] for t in tenants]: - manager.api.tenants.create(tenant_name=name, - description='Created by Juju') + manager = get_manager() + tenant = manager.resolve_tenant_id(name) + if not tenant: + manager.create_tenant(tenant_name=name, + description='Created by Juju') log("Created new tenant: %s" % name, level=DEBUG) return log("Tenant '%s' already exists." % name, level=DEBUG) -def user_exists(name): - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) - users = [u._info for u in manager.api.users.list()] - if not users or name not in [u['name'] for u in users]: - return False - - return True +def create_or_show_domain(name): + """Creates a domain if it does not already exist""" + manager = get_manager() + domain_id = manager.resolve_domain_id(name) + if domain_id: + log("Domain '%s' already exists." % name, level=DEBUG) + else: + manager.create_domain(domain_name=name, + description='Created by Juju') + log("Created new domain: %s" % name, level=DEBUG) + domain_id = manager.resolve_domain_id(name) + return domain_id -def create_user(name, password, tenant): +def user_exists(name, domain=None): + manager = get_manager() + if domain: + domain_id = manager.resolve_domain_id(domain) + if not domain_id: + error_out('Could not resolve domain_id for {} when checking if ' + ' user {} exists'.format(domain, name)) + for user in manager.api.users.list(): + if user.name == name: + # In v3 Domains are seperate user namespaces so need to check that + # the domain matched if provided + if domain: + if domain_id == user.domain_id: + return True + else: + return True + + return False + + +def create_user(name, password, tenant=None, domain=None): """Creates a user if it doesn't already exist, as a member of tenant""" - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) - if user_exists(name): + manager = get_manager() + if user_exists(name, domain=domain): log("A user named '%s' already exists" % name, level=DEBUG) return - tenant_id = manager.resolve_tenant_id(tenant) - if not tenant_id: - error_out('Could not resolve tenant_id for tenant %s' % tenant) + tenant_id = None + if tenant: + tenant_id = manager.resolve_tenant_id(tenant) + if not tenant_id: + error_out('Could not resolve tenant_id for tenant %s' % tenant) - manager.api.users.create(name=name, - password=password, - email='juju@localhost', - tenant_id=tenant_id) + domain_id = None + if domain: + domain_id = manager.resolve_domain_id(domain) + if not domain_id: + error_out('Could not resolve domain_id for domain %s when creating' + ' user %s' % (domain, name)) + + manager.create_user(name=name, + password=password, + email='juju@localhost', + tenant_id=tenant_id, + domain_id=domain_id) log("Created new user '%s' tenant: %s" % (name, tenant_id), level=DEBUG) -def create_role(name, user=None, tenant=None): +def get_manager(api_version=None): + """Return a keystonemanager for the correct API version""" + from manager import get_keystone_manager + return get_keystone_manager(get_local_endpoint(), get_admin_token(), + api_version) + + +def create_role(name, user=None, tenant=None, domain=None): """Creates a role if it doesn't already exist. grants role to user""" - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) + manager = get_manager() roles = [r._info for r in manager.api.roles.list()] if not roles or name not in [r['name'] for r in roles]: manager.api.roles.create(name=name) @@ -640,31 +725,45 @@ def create_role(name, user=None, tenant=None): # NOTE(adam_g): Keystone client requires id's for add_user_role, not names user_id = manager.resolve_user_id(user) role_id = manager.resolve_role_id(name) - tenant_id = manager.resolve_tenant_id(tenant) - if None in [user_id, role_id, tenant_id]: - error_out("Could not resolve [%s, %s, %s]" % - (user_id, role_id, tenant_id)) + if None in [user_id, role_id]: + error_out("Could not resolve [%s, %s]" % + (user_id, role_id)) - grant_role(user, name, tenant) + grant_role(user, name, tenant, domain) -def grant_role(user, role, tenant): +def grant_role(user, role, tenant=None, domain=None, user_domain=None): """Grant user and tenant a specific role""" - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) + manager = get_manager() log("Granting user '%s' role '%s' on tenant '%s'" % (user, role, tenant)) - user_id = manager.resolve_user_id(user) - role_id = manager.resolve_role_id(role) - tenant_id = manager.resolve_tenant_id(tenant) - cur_roles = manager.api.roles.roles_for_user(user_id, tenant_id) + user_id = manager.resolve_user_id(user, user_domain=user_domain) + role_id = manager.resolve_role_id(role) + if None in [user_id, role_id]: + error_out("Could not resolve [%s, %s]" % + (user_id, role_id)) + + tenant_id = None + if tenant: + tenant_id = manager.resolve_tenant_id(tenant) + if not tenant_id: + error_out('Could not resolve tenant_id for tenant %s' % tenant) + + domain_id = None + if domain: + domain_id = manager.resolve_domain_id(domain) + if not domain_id: + error_out('Could not resolve domain_id for domain %s' % domain) + + cur_roles = manager.roles_for_user(user_id, tenant_id=tenant_id, + domain_id=domain_id) if not cur_roles or role_id not in [r.id for r in cur_roles]: - manager.api.roles.add_user_role(user=user_id, - role=role_id, - tenant=tenant_id) + manager.add_user_role(user=user_id, + role=role_id, + tenant=tenant_id, + domain=domain_id) log("Granted user '%s' role '%s' on tenant '%s'" % (user, role, tenant), level=DEBUG) else: @@ -677,6 +776,11 @@ def store_admin_passwd(passwd): fd.writelines("%s\n" % passwd) +def store_admin_domain_id(domain_id): + with open(STORED_ADMIN_DOMAIN_ID, 'w+') as fd: + fd.writelines("%s\n" % domain_id) + + def get_admin_passwd(): passwd = config("admin-password") if passwd and passwd.lower() != "none": @@ -708,6 +812,13 @@ def get_admin_passwd(): return passwd +def get_api_version(): + api_version = config('preferred-api-version') + if api_version not in [2, 3]: + raise ValueError('Bad preferred-api-version') + return api_version + + def ensure_initial_admin(config): # Allow retry on fail since leader may not be ready yet. # NOTE(hopem): ks client may not be installed at module import time so we @@ -734,13 +845,27 @@ def ensure_initial_admin(config): """ create_tenant("admin") create_tenant(config("service-tenant")) + if get_api_version() > 2: + domain_id = create_or_show_domain(ADMIN_DOMAIN) + store_admin_domain_id(domain_id) # User is managed by ldap backend when using ldap identity if not (config('identity-backend') == 'ldap' and config('ldap-readonly')): passwd = get_admin_passwd() if passwd: - create_user_credentials(config('admin-user'), 'admin', passwd, - new_roles=[config('admin-role')]) + if get_api_version() > 2: + create_user_credentials(config('admin-user'), passwd, + domain=ADMIN_DOMAIN) + create_role(config('admin-role'), config('admin-user'), + domain=ADMIN_DOMAIN) + grant_role(config('admin-user'), config('admin-role'), + tenant='admin', user_domain=ADMIN_DOMAIN) + grant_role(config('admin-user'), config('admin-role'), + domain=ADMIN_DOMAIN, user_domain=ADMIN_DOMAIN) + else: + create_user_credentials(config('admin-user'), passwd, + tenant='admin', + new_roles=[config('admin-role')]) create_service_entry("keystone", "identity", "Keystone Identity Service") @@ -756,34 +881,39 @@ def ensure_initial_admin(config): return _ensure_initial_admin(config) -def endpoint_url(ip, port): +def endpoint_url(ip, port, suffix=None): proto = 'http' if https(): proto = 'https' if is_ipv6(ip): ip = "[{}]".format(ip) - return "%s://%s:%s/v2.0" % (proto, ip, port) + if suffix: + ep = "%s://%s:%s/%s" % (proto, ip, port, suffix) + else: + ep = "%s://%s:%s" % (proto, ip, port) + return ep def create_keystone_endpoint(public_ip, service_port, internal_ip, admin_ip, auth_port, region): - create_endpoint_template(region, "keystone", - endpoint_url(public_ip, service_port), - endpoint_url(admin_ip, auth_port), - endpoint_url(internal_ip, service_port)) + api_suffix = get_api_suffix() + create_endpoint_template( + region, "keystone", + endpoint_url(public_ip, service_port, suffix=api_suffix), + endpoint_url(admin_ip, auth_port, suffix=api_suffix), + endpoint_url(internal_ip, service_port, suffix=api_suffix), + ) def update_user_password(username, password): - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) + manager = get_manager() log("Updating password for user '%s'" % username) user_id = manager.resolve_user_id(username) if user_id is None: error_out("Could not resolve user id for '%s'" % username) - manager.api.users.update_password(user=user_id, password=password) + manager.update_password(user=user_id, password=password) log("Successfully updated password for user '%s'" % username) @@ -1361,22 +1491,23 @@ def relation_list(rid): return result -def create_user_credentials(user, tenant, passwd, new_roles=None, grants=None): +def create_user_credentials(user, passwd, tenant=None, new_roles=None, + grants=None, domain=None): """Create user credentials. Optionally adds role grants to user and/or creates new roles. """ log("Creating service credentials for '%s'" % user, level=DEBUG) - if user_exists(user): + if user_exists(user, domain=domain): log("User '%s' already exists - updating password" % (user), level=DEBUG) update_user_password(user, passwd) else: - create_user(user, passwd, tenant) + create_user(user, passwd, tenant, domain) if grants: for role in grants: - grant_role(user, role, tenant) + grant_role(user, role, tenant, domain) else: log("No role grants requested for user '%s'" % (user), level=DEBUG) @@ -1385,7 +1516,7 @@ def create_user_credentials(user, tenant, passwd, new_roles=None, grants=None): # Currently used by Swift and Ceilometer. for role in new_roles: log("Creating requested role '%s'" % role, level=DEBUG) - create_role(role, user, tenant) + create_role(role, user, tenant, domain) return passwd @@ -1400,15 +1531,18 @@ def create_service_credentials(user, new_roles=None): if not tenant: raise Exception("No service tenant provided in config") - return create_user_credentials(user, tenant, get_service_password(user), - new_roles=new_roles, - grants=[config('admin-role')]) + if get_api_version() == 2: + domain = None + else: + domain = DEFAULT_DOMAIN + return create_user_credentials(user, get_service_password(user), + tenant=tenant, new_roles=new_roles, + grants=[config('admin-role')], + domain=domain) def add_service_to_keystone(relation_id=None, remote_unit=None): - import manager - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), - token=get_admin_token()) + manager = get_manager() settings = relation_get(rid=relation_id, unit=remote_unit) # the minimum settings needed per endpoint single = set(['service', 'region', 'public_url', 'admin_url', @@ -1419,7 +1553,6 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): protocol = 'https' else: protocol = 'http' - if single.issubset(settings): # other end of relation advertised only one endpoint if 'None' in settings.itervalues(): @@ -1546,6 +1679,8 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): # we return a token, information about our API endpoints, and the generated # service credentials service_tenant = config('service-tenant') + domain_name = 'Default' if manager.api_version == 3 else None + grant_role(service_username, 'Admin', service_tenant, domain_name) # NOTE(dosaboy): we use __null__ to represent settings that are to be # routed to relations via the cluster relation and set to None. @@ -1565,6 +1700,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): "ca_cert": '__null__', "auth_protocol": protocol, "service_protocol": protocol, + "api_version": get_api_version(), } # generate or get a new cert/key for service if set to manage certs. @@ -1863,7 +1999,6 @@ def assess_status(configs): @param configs: a templating.OSConfigRenderer() object """ - if is_paused(): status_set("maintenance", "Paused. Use 'resume' action to resume normal service.") @@ -1873,3 +2008,13 @@ def assess_status(configs): set_os_workload_status( configs, REQUIRED_INTERFACES, charm_func=check_optional_relations, services=services(), ports=determine_ports()) + + +def get_admin_domain_id(): + domain_id = None + if os.path.isfile(STORED_ADMIN_DOMAIN_ID): + log("Loading stored domain id from %s" % STORED_ADMIN_DOMAIN_ID, + level=INFO) + with open(STORED_ADMIN_DOMAIN_ID, 'r') as fd: + domain_id = fd.readline().strip('\n') + return domain_id diff --git a/hooks/manager.py b/hooks/manager.py index 17c6c029..3da4f63a 100644 --- a/hooks/manager.py +++ b/hooks/manager.py @@ -1,12 +1,70 @@ #!/usr/bin/python from keystoneclient.v2_0 import client +from keystoneclient.v3 import client as keystoneclient_v3 +from keystoneclient.auth import token_endpoint +from keystoneclient import session + + +def _get_keystone_manager_class(endpoint, token, api_version): + """Return KeystoneManager class for the given API version + @param endpoint: the keystone endpoint to point client at + @param token: the keystone admin_token + @param api_version: version of the keystone api the client should use + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, token) + if api_version == 3: + return KeystoneManager3(endpoint, token) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager(endpoint, token, api_version=None): + """Return a keystonemanager for the correct API version + + If api_version has not been set then create a manager based on the endpoint + Use this manager to query the catalogue and determine which api version + should actually be being used. Return the correct client based on that + XXX I think the keystone client should be able to do version + detection automatically so the code below could be greatly + simplified + + @param endpoint: the keystone endpoint to point client at + @param token: the keystone admin_token + @param api_version: version of the keystone api the client should use + @returns keystonemanager class used for interrogating keystone + """ + if api_version: + return _get_keystone_manager_class(endpoint, token, api_version) + else: + if 'v2.0' in endpoint.split('/'): + manager = _get_keystone_manager_class(endpoint, token, 2) + else: + manager = _get_keystone_manager_class(endpoint, token, 3) + if endpoint.endswith('/'): + base_ep = endpoint.rsplit('/', 2)[0] + else: + base_ep = endpoint.rsplit('/', 1)[0] + svc_id = None + for svc in manager.api.services.list(): + if svc.type == 'identity': + svc_id = svc.id + version = None + for ep in manager.api.endpoints.list(): + if ep.service_id == svc_id and hasattr(ep, 'adminurl'): + version = ep.adminurl.split('/')[-1] + if version and version == 'v2.0': + new_ep = base_ep + "/" + 'v2.0' + return _get_keystone_manager_class(new_ep, token, 2) + elif version and version == 'v3': + new_ep = base_ep + "/" + 'v3' + return _get_keystone_manager_class(new_ep, token, 3) + else: + return manager class KeystoneManager(object): - def __init__(self, endpoint, token): - self.api = client.Client(endpoint=endpoint, token=token) - def resolve_tenant_id(self, name): """Find the tenant_id of a given tenant""" tenants = [t._info for t in self.api.tenants.list()] @@ -14,6 +72,9 @@ class KeystoneManager(object): if name == t['name']: return t['id'] + def resolve_domain_id(self, name): + pass + def resolve_role_id(self, name): """Find the role_id of a given role""" roles = [r._info for r in self.api.roles.list()] @@ -21,13 +82,6 @@ class KeystoneManager(object): if name == r['name']: return r['id'] - def resolve_user_id(self, name): - """Find the user_id of a given user""" - users = [u._info for u in self.api.users.list()] - for u in users: - if name == u['name']: - return u['id'] - def resolve_service_id(self, name, service_type=None): """Find the service_id of a given service""" services = [s._info for s in self.api.services.list()] @@ -45,3 +99,154 @@ class KeystoneManager(object): for s in services: if type == s['type']: return s['id'] + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, token): + self.api_version = 2 + self.api = client.Client(endpoint=endpoint, token=token) + + def resolve_user_id(self, name, user_domain=None): + """Find the user_id of a given user""" + users = [u._info for u in self.api.users.list()] + for u in users: + if name == u['name']: + return u['id'] + + def create_endpoints(self, region, service_id, publicurl, adminurl, + internalurl): + self.api.endpoints.create(region=region, service_id=service_id, + publicurl=publicurl, adminurl=adminurl, + internalurl=internalurl) + + def tenants_list(self): + return self.api.tenants.list() + + def create_tenant(self, tenant_name, description, domain='default'): + self.api.tenants.create(tenant_name=tenant_name, + description=description) + + def delete_tenant(self, tenant_id): + self.api.tenants.delete(tenant_id) + + def create_user(self, name, password, email, tenant_id=None, + domain_id=None): + self.api.users.create(name=name, + password=password, + email=email, + tenant_id=tenant_id) + + def update_password(self, user, password): + self.api.users.update_password(user=user, password=password) + + def roles_for_user(self, user_id, tenant_id=None, domain_id=None): + return self.api.roles.roles_for_user(user_id, tenant_id) + + def add_user_role(self, user, role, tenant, domain): + self.api.roles.add_user_role(user=user, role=role, tenant=tenant) + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, token): + self.api_version = 3 + keystone_auth_v3 = token_endpoint.Token(endpoint=endpoint, token=token) + keystone_session_v3 = session.Session(auth=keystone_auth_v3) + self.api = keystoneclient_v3.Client(session=keystone_session_v3) + + def resolve_tenant_id(self, name): + """Find the tenant_id of a given tenant""" + tenants = [t._info for t in self.api.projects.list()] + for t in tenants: + if name == t['name']: + return t['id'] + + def resolve_domain_id(self, name): + """Find the domain_id of a given domain""" + domains = [d._info for d in self.api.domains.list()] + for d in domains: + if name == d['name']: + return d['id'] + + def resolve_user_id(self, name, user_domain=None): + """Find the user_id of a given user""" + if user_domain: + domain_id = self.resolve_domain_id(user_domain) + for user in self.api.users.list(): + if name == user.name: + if user_domain: + if domain_id == user.domain_id: + return user.id + else: + return user.id + + def create_endpoints(self, region, service_id, publicurl, adminurl, + internalurl): + self.api.endpoints.create(service_id, publicurl, interface='public', + region=region) + self.api.endpoints.create(service_id, adminurl, interface='admin', + region=region) + self.api.endpoints.create(service_id, internalurl, + interface='internal', region=region) + + def tenants_list(self): + return self.api.projects.list() + + def create_domain(self, domain_name, description): + self.api.domains.create(domain_name, description=description) + + def create_tenant(self, tenant_name, description, domain='default'): + self.api.projects.create(tenant_name, domain, description=description) + + def delete_tenant(self, tenant_id): + self.api.projects.delete(tenant_id) + + def create_user(self, name, password, email, tenant_id=None, + domain_id=None): + if not domain_id: + domain_id = self.resolve_domain_id('default') + if tenant_id: + self.api.users.create(name, + domain=domain_id, + password=password, + email=email, + project=tenant_id) + else: + self.api.users.create(name, + domain=domain_id, + password=password, + email=email) + + def update_password(self, user, password): + self.api.users.update(user, password=password) + + def roles_for_user(self, user_id, tenant_id=None, domain_id=None): + # Specify either a domain or project, not both + if domain_id: + return self.api.roles.list(user_id, domain=domain_id) + else: + return self.api.roles.list(user_id, project=tenant_id) + + def add_user_role(self, user, role, tenant, domain): + # Specify either a domain or project, not both + if domain: + self.api.roles.grant(role, user=user, domain=domain) + if tenant: + self.api.roles.grant(role, user=user, project=tenant) + + def find_endpoint_v3(self, interface, service_id, region): + found_eps = [] + for ep in self.api.endpoints.list(): + if ep.service_id == service_id and ep.region == region and \ + ep.interface == interface: + found_eps.append(ep) + return found_eps + + def delete_old_endpoint_v3(self, interface, service_id, region, url): + eps = self.find_endpoint_v3(interface, service_id, region) + for ep in eps: + if getattr(ep, 'url') != url: + self.api.endpoints.delete(ep.id) + return True + return False diff --git a/templates/liberty/policy.json b/templates/liberty/policy.json new file mode 100644 index 00000000..8821ba06 --- /dev/null +++ b/templates/liberty/policy.json @@ -0,0 +1,382 @@ +{% if api_version == 3 -%} +{ + "admin_required": "role:{{ admin_role }}", + "cloud_admin": "rule:admin_required and domain_id:{{ admin_domain_id }}", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner" : "user_id:%(user_id)s or user_id:%(target.token.user_id)s", + "admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner", + "admin_or_cloud_admin": "rule:admin_required or rule:cloud_admin", + "admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s", + "service_admin_or_owner": "rule:service_or_admin or rule:owner", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:cloud_admin", + "identity:update_region": "rule:cloud_admin", + "identity:delete_region": "rule:cloud_admin", + + "identity:get_service": "rule:admin_or_cloud_admin", + "identity:list_services": "rule:admin_or_cloud_admin", + "identity:create_service": "rule:cloud_admin", + "identity:update_service": "rule:cloud_admin", + "identity:delete_service": "rule:cloud_admin", + + "identity:get_endpoint": "rule:admin_or_cloud_admin", + "identity:list_endpoints": "rule:admin_or_cloud_admin", + "identity:create_endpoint": "rule:cloud_admin", + "identity:update_endpoint": "rule:cloud_admin", + "identity:delete_endpoint": "rule:cloud_admin", + + "identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id", + "identity:list_domains": "rule:cloud_admin", + "identity:create_domain": "rule:cloud_admin", + "identity:update_domain": "rule:cloud_admin", + "identity:delete_domain": "rule:cloud_admin", + + "admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s", + "admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s", + "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", + "identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id", + "identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id", + "identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id", + "identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", + "identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", + + "admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s", + "admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s", + "identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", + "identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id", + "identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id", + "identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", + "identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", + + "admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s", + "admin_and_matching_group_domain_id": "rule:admin_required and domain_id:%(group.domain_id)s", + "identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id", + "identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_domain_id", + "identity:create_group": "rule:cloud_admin or rule:admin_and_matching_group_domain_id", + "identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + "identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required or user_id:%(user_id)s", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)", + "identity:ec2_list_credentials": "rule:admin_or_cloud_admin or rule:owner", + "identity:ec2_create_credential": "rule:admin_or_cloud_admin or rule:owner", + "identity:ec2_delete_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_or_cloud_admin", + "identity:list_roles": "rule:admin_or_cloud_admin", + "identity:create_role": "rule:cloud_admin", + "identity:update_role": "rule:cloud_admin", + "identity:delete_role": "rule:cloud_admin", + + "domain_admin_for_grants": "rule:admin_required and (domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s)", + "project_admin_for_grants": "rule:admin_required and project_id:%(project_id)s", + "identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", + "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", + "identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", + "identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", + + "admin_on_domain_filter" : "rule:admin_required and domain_id:%(scope.domain.id)s", + "admin_on_project_filter" : "rule:admin_required and project_id:%(scope.project.id)s", + "identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter", + + "identity:get_policy": "rule:cloud_admin", + "identity:list_policies": "rule:cloud_admin", + "identity:create_policy": "rule:cloud_admin", + "identity:update_policy": "rule:cloud_admin", + "identity:delete_policy": "rule:cloud_admin", + + "identity:change_password": "rule:owner", + "identity:check_token": "rule:admin_or_owner", + "identity:validate_token": "rule:service_admin_or_owner", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_owner", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups": "rule:admin_required", + "identity:get_endpoint_group": "rule:admin_required", + "identity:update_endpoint_group": "rule:admin_required", + "identity:delete_endpoint_group": "rule:admin_required", + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", + "identity:get_endpoint_group_in_project": "rule:admin_required", + "identity:list_endpoint_groups_for_project": "rule:admin_required", + "identity:add_endpoint_group_to_project": "rule:admin_required", + "identity:remove_endpoint_group_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:cloud_admin", + "identity:list_identity_providers": "rule:cloud_admin", + "identity:get_identity_providers": "rule:cloud_admin", + "identity:update_identity_provider": "rule:cloud_admin", + "identity:delete_identity_provider": "rule:cloud_admin", + + "identity:create_protocol": "rule:cloud_admin", + "identity:update_protocol": "rule:cloud_admin", + "identity:get_protocol": "rule:cloud_admin", + "identity:list_protocols": "rule:cloud_admin", + "identity:delete_protocol": "rule:cloud_admin", + + "identity:create_mapping": "rule:cloud_admin", + "identity:get_mapping": "rule:cloud_admin", + "identity:list_mappings": "rule:cloud_admin", + "identity:delete_mapping": "rule:cloud_admin", + "identity:update_mapping": "rule:cloud_admin", + + "identity:create_service_provider": "rule:cloud_admin", + "identity:list_service_providers": "rule:cloud_admin", + "identity:get_service_provider": "rule:cloud_admin", + "identity:update_service_provider": "rule:cloud_admin", + "identity:delete_service_provider": "rule:cloud_admin", + + "identity:get_auth_catalog": "", + "identity:get_auth_projects": "", + "identity:get_auth_domains": "", + + "identity:list_projects_for_groups": "", + "identity:list_domains_for_groups": "", + + "identity:list_revoke_events": "", + + "identity:create_policy_association_for_endpoint": "rule:cloud_admin", + "identity:check_policy_association_for_endpoint": "rule:cloud_admin", + "identity:delete_policy_association_for_endpoint": "rule:cloud_admin", + "identity:create_policy_association_for_service": "rule:cloud_admin", + "identity:check_policy_association_for_service": "rule:cloud_admin", + "identity:delete_policy_association_for_service": "rule:cloud_admin", + "identity:create_policy_association_for_region_and_service": "rule:cloud_admin", + "identity:check_policy_association_for_region_and_service": "rule:cloud_admin", + "identity:delete_policy_association_for_region_and_service": "rule:cloud_admin", + "identity:get_policy_for_endpoint": "rule:cloud_admin", + "identity:list_endpoints_for_policy": "rule:cloud_admin", + + "identity:create_domain_config": "rule:cloud_admin", + "identity:get_domain_config": "rule:cloud_admin", + "identity:update_domain_config": "rule:cloud_admin", + "identity:delete_domain_config": "rule:cloud_admin" +} +{% else -%} +{ + "admin_required": "role:admin or is_admin:1", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner" : "user_id:%(user_id)s", + "admin_or_owner": "rule:admin_required or rule:owner", + "token_subject": "user_id:%(target.token.user_id)s", + "admin_or_token_subject": "rule:admin_required or rule:token_subject", + "service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:admin_required", + "identity:update_region": "rule:admin_required", + "identity:delete_region": "rule:admin_required", + + "identity:get_service": "rule:admin_required", + "identity:list_services": "rule:admin_required", + "identity:create_service": "rule:admin_required", + "identity:update_service": "rule:admin_required", + "identity:delete_service": "rule:admin_required", + + "identity:get_endpoint": "rule:admin_required", + "identity:list_endpoints": "rule:admin_required", + "identity:create_endpoint": "rule:admin_required", + "identity:update_endpoint": "rule:admin_required", + "identity:delete_endpoint": "rule:admin_required", + + "identity:get_domain": "rule:admin_required", + "identity:list_domains": "rule:admin_required", + "identity:create_domain": "rule:admin_required", + "identity:update_domain": "rule:admin_required", + "identity:delete_domain": "rule:admin_required", + + "identity:get_project": "rule:admin_required", + "identity:list_projects": "rule:admin_required", + "identity:list_user_projects": "rule:admin_or_owner", + "identity:create_project": "rule:admin_required", + "identity:update_project": "rule:admin_required", + "identity:delete_project": "rule:admin_required", + + "identity:get_user": "rule:admin_required", + "identity:list_users": "rule:admin_required", + "identity:create_user": "rule:admin_required", + "identity:update_user": "rule:admin_required", + "identity:delete_user": "rule:admin_required", + "identity:change_password": "rule:admin_or_owner", + + "identity:get_group": "rule:admin_required", + "identity:list_groups": "rule:admin_required", + "identity:list_groups_for_user": "rule:admin_or_owner", + "identity:create_group": "rule:admin_required", + "identity:update_group": "rule:admin_required", + "identity:delete_group": "rule:admin_required", + "identity:list_users_in_group": "rule:admin_required", + "identity:remove_user_from_group": "rule:admin_required", + "identity:check_user_in_group": "rule:admin_required", + "identity:add_user_to_group": "rule:admin_required", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + "identity:ec2_list_credentials": "rule:admin_or_owner", + "identity:ec2_create_credential": "rule:admin_or_owner", + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_required", + "identity:list_roles": "rule:admin_required", + "identity:create_role": "rule:admin_required", + "identity:update_role": "rule:admin_required", + "identity:delete_role": "rule:admin_required", + + "identity:check_grant": "rule:admin_required", + "identity:list_grants": "rule:admin_required", + "identity:create_grant": "rule:admin_required", + "identity:revoke_grant": "rule:admin_required", + + "identity:list_role_assignments": "rule:admin_required", + + "identity:get_policy": "rule:admin_required", + "identity:list_policies": "rule:admin_required", + "identity:create_policy": "rule:admin_required", + "identity:update_policy": "rule:admin_required", + "identity:delete_policy": "rule:admin_required", + + "identity:check_token": "rule:admin_or_token_subject", + "identity:validate_token": "rule:service_admin_or_token_subject", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_token_subject", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups": "rule:admin_required", + "identity:get_endpoint_group": "rule:admin_required", + "identity:update_endpoint_group": "rule:admin_required", + "identity:delete_endpoint_group": "rule:admin_required", + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", + "identity:get_endpoint_group_in_project": "rule:admin_required", + "identity:list_endpoint_groups_for_project": "rule:admin_required", + "identity:add_endpoint_group_to_project": "rule:admin_required", + "identity:remove_endpoint_group_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:admin_required", + "identity:list_identity_providers": "rule:admin_required", + "identity:get_identity_providers": "rule:admin_required", + "identity:update_identity_provider": "rule:admin_required", + "identity:delete_identity_provider": "rule:admin_required", + + "identity:create_protocol": "rule:admin_required", + "identity:update_protocol": "rule:admin_required", + "identity:get_protocol": "rule:admin_required", + "identity:list_protocols": "rule:admin_required", + "identity:delete_protocol": "rule:admin_required", + + "identity:create_mapping": "rule:admin_required", + "identity:get_mapping": "rule:admin_required", + "identity:list_mappings": "rule:admin_required", + "identity:delete_mapping": "rule:admin_required", + "identity:update_mapping": "rule:admin_required", + + "identity:create_service_provider": "rule:admin_required", + "identity:list_service_providers": "rule:admin_required", + "identity:get_service_provider": "rule:admin_required", + "identity:update_service_provider": "rule:admin_required", + "identity:delete_service_provider": "rule:admin_required", + + "identity:get_auth_catalog": "", + "identity:get_auth_projects": "", + "identity:get_auth_domains": "", + + "identity:list_projects_for_groups": "", + "identity:list_domains_for_groups": "", + + "identity:list_revoke_events": "", + + "identity:create_policy_association_for_endpoint": "rule:admin_required", + "identity:check_policy_association_for_endpoint": "rule:admin_required", + "identity:delete_policy_association_for_endpoint": "rule:admin_required", + "identity:create_policy_association_for_service": "rule:admin_required", + "identity:check_policy_association_for_service": "rule:admin_required", + "identity:delete_policy_association_for_service": "rule:admin_required", + "identity:create_policy_association_for_region_and_service": "rule:admin_required", + "identity:check_policy_association_for_region_and_service": "rule:admin_required", + "identity:delete_policy_association_for_region_and_service": "rule:admin_required", + "identity:get_policy_for_endpoint": "rule:admin_required", + "identity:list_endpoints_for_policy": "rule:admin_required", + + "identity:create_domain_config": "rule:admin_required", + "identity:get_domain_config": "rule:admin_required", + "identity:update_domain_config": "rule:admin_required", + "identity:delete_domain_config": "rule:admin_required" +} +{% endif -%} diff --git a/templates/liberty/policy.json.v2 b/templates/liberty/policy.json.v2 new file mode 100644 index 00000000..ebb94b02 --- /dev/null +++ b/templates/liberty/policy.json.v2 @@ -0,0 +1,184 @@ +{ + "admin_required": "role:admin or is_admin:1", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner" : "user_id:%(user_id)s", + "admin_or_owner": "rule:admin_required or rule:owner", + "token_subject": "user_id:%(target.token.user_id)s", + "admin_or_token_subject": "rule:admin_required or rule:token_subject", + "service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:admin_required", + "identity:update_region": "rule:admin_required", + "identity:delete_region": "rule:admin_required", + + "identity:get_service": "rule:admin_required", + "identity:list_services": "rule:admin_required", + "identity:create_service": "rule:admin_required", + "identity:update_service": "rule:admin_required", + "identity:delete_service": "rule:admin_required", + + "identity:get_endpoint": "rule:admin_required", + "identity:list_endpoints": "rule:admin_required", + "identity:create_endpoint": "rule:admin_required", + "identity:update_endpoint": "rule:admin_required", + "identity:delete_endpoint": "rule:admin_required", + + "identity:get_domain": "rule:admin_required", + "identity:list_domains": "rule:admin_required", + "identity:create_domain": "rule:admin_required", + "identity:update_domain": "rule:admin_required", + "identity:delete_domain": "rule:admin_required", + + "identity:get_project": "rule:admin_required", + "identity:list_projects": "rule:admin_required", + "identity:list_user_projects": "rule:admin_or_owner", + "identity:create_project": "rule:admin_required", + "identity:update_project": "rule:admin_required", + "identity:delete_project": "rule:admin_required", + + "identity:get_user": "rule:admin_required", + "identity:list_users": "rule:admin_required", + "identity:create_user": "rule:admin_required", + "identity:update_user": "rule:admin_required", + "identity:delete_user": "rule:admin_required", + "identity:change_password": "rule:admin_or_owner", + + "identity:get_group": "rule:admin_required", + "identity:list_groups": "rule:admin_required", + "identity:list_groups_for_user": "rule:admin_or_owner", + "identity:create_group": "rule:admin_required", + "identity:update_group": "rule:admin_required", + "identity:delete_group": "rule:admin_required", + "identity:list_users_in_group": "rule:admin_required", + "identity:remove_user_from_group": "rule:admin_required", + "identity:check_user_in_group": "rule:admin_required", + "identity:add_user_to_group": "rule:admin_required", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + "identity:ec2_list_credentials": "rule:admin_or_owner", + "identity:ec2_create_credential": "rule:admin_or_owner", + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_required", + "identity:list_roles": "rule:admin_required", + "identity:create_role": "rule:admin_required", + "identity:update_role": "rule:admin_required", + "identity:delete_role": "rule:admin_required", + + "identity:check_grant": "rule:admin_required", + "identity:list_grants": "rule:admin_required", + "identity:create_grant": "rule:admin_required", + "identity:revoke_grant": "rule:admin_required", + + "identity:list_role_assignments": "rule:admin_required", + + "identity:get_policy": "rule:admin_required", + "identity:list_policies": "rule:admin_required", + "identity:create_policy": "rule:admin_required", + "identity:update_policy": "rule:admin_required", + "identity:delete_policy": "rule:admin_required", + + "identity:check_token": "rule:admin_or_token_subject", + "identity:validate_token": "rule:service_admin_or_token_subject", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_token_subject", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups": "rule:admin_required", + "identity:get_endpoint_group": "rule:admin_required", + "identity:update_endpoint_group": "rule:admin_required", + "identity:delete_endpoint_group": "rule:admin_required", + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", + "identity:get_endpoint_group_in_project": "rule:admin_required", + "identity:list_endpoint_groups_for_project": "rule:admin_required", + "identity:add_endpoint_group_to_project": "rule:admin_required", + "identity:remove_endpoint_group_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:admin_required", + "identity:list_identity_providers": "rule:admin_required", + "identity:get_identity_providers": "rule:admin_required", + "identity:update_identity_provider": "rule:admin_required", + "identity:delete_identity_provider": "rule:admin_required", + + "identity:create_protocol": "rule:admin_required", + "identity:update_protocol": "rule:admin_required", + "identity:get_protocol": "rule:admin_required", + "identity:list_protocols": "rule:admin_required", + "identity:delete_protocol": "rule:admin_required", + + "identity:create_mapping": "rule:admin_required", + "identity:get_mapping": "rule:admin_required", + "identity:list_mappings": "rule:admin_required", + "identity:delete_mapping": "rule:admin_required", + "identity:update_mapping": "rule:admin_required", + + "identity:create_service_provider": "rule:admin_required", + "identity:list_service_providers": "rule:admin_required", + "identity:get_service_provider": "rule:admin_required", + "identity:update_service_provider": "rule:admin_required", + "identity:delete_service_provider": "rule:admin_required", + + "identity:get_auth_catalog": "", + "identity:get_auth_projects": "", + "identity:get_auth_domains": "", + + "identity:list_projects_for_groups": "", + "identity:list_domains_for_groups": "", + + "identity:list_revoke_events": "", + + "identity:create_policy_association_for_endpoint": "rule:admin_required", + "identity:check_policy_association_for_endpoint": "rule:admin_required", + "identity:delete_policy_association_for_endpoint": "rule:admin_required", + "identity:create_policy_association_for_service": "rule:admin_required", + "identity:check_policy_association_for_service": "rule:admin_required", + "identity:delete_policy_association_for_service": "rule:admin_required", + "identity:create_policy_association_for_region_and_service": "rule:admin_required", + "identity:check_policy_association_for_region_and_service": "rule:admin_required", + "identity:delete_policy_association_for_region_and_service": "rule:admin_required", + "identity:get_policy_for_endpoint": "rule:admin_required", + "identity:list_endpoints_for_policy": "rule:admin_required", + + "identity:create_domain_config": "rule:admin_required", + "identity:get_domain_config": "rule:admin_required", + "identity:update_domain_config": "rule:admin_required", + "identity:delete_domain_config": "rule:admin_required" +} diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 258a7510..7ba1b3c2 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -17,6 +17,8 @@ from charmhelpers.contrib.openstack.amulet.utils import ( DEBUG, # ERROR ) +import keystoneclient +from charmhelpers.core.decorators import retry_on_exception # Use DEBUG to turn on debug logging u = OpenStackAmuletUtils(DEBUG) @@ -30,6 +32,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): """Deploy the entire test environment.""" super(KeystoneBasicDeployment, self).__init__(series, openstack, source, stable) + self.keystone_api_version = 2 self.git = git self._add_services() self._add_relations() @@ -37,8 +40,8 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): self._deploy() u.log.info('Waiting on extended status checks...') - exclude_services = ['mysql'] - self._auto_wait_for_status(exclude_services=exclude_services) + self.exclude_services = ['mysql'] + self._auto_wait_for_status(exclude_services=self.exclude_services) self._initialize_tests() @@ -72,7 +75,8 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): def _configure_services(self): """Configure all of the services.""" keystone_config = {'admin-password': 'openstack', - 'admin-token': 'ubuntutesting'} + 'admin-token': 'ubuntutesting', + 'preferred-api-version': self.keystone_api_version} if self.git: amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY') @@ -109,6 +113,103 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): } super(KeystoneBasicDeployment, self)._configure_services(configs) + @retry_on_exception(5, base_delay=10) + def set_api_version(self, api_version): + set_alternate = {'preferred-api-version': api_version} + + # Make config change, check for service restarts + u.log.debug('Setting preferred-api-version={}'.format(api_version)) + self.d.configure('keystone', set_alternate) + self.keystone_api_version = api_version + client = self.get_keystone_client(api_version=api_version) + # List an artefact that needs authorisation to check admin user + # has been setup. If that is still in progess + # keystoneclient.exceptions.Unauthorized will be thrown and caught by + # @retry_on_exception + if api_version == 2: + client.tenants.list() + self.keystone_v2 = self.get_keystone_client(api_version=2) + else: + client.projects.list() + self.keystone_v3 = self.get_keystone_client(api_version=3) + + def get_keystone_client(self, api_version=None): + if api_version == 2: + return u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin', + api_version=api_version, + keystone_ip=self.keystone_ip) + else: + return u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + api_version=api_version, + keystone_ip=self.keystone_ip) + + def create_users_v2(self): + # Create a demo tenant/role/user + self.demo_tenant = 'demoTenant' + self.demo_role = 'demoRole' + self.demo_user = 'demoUser' + if not u.tenant_exists(self.keystone_v2, self.demo_tenant): + tenant = self.keystone_v2.tenants.create( + tenant_name=self.demo_tenant, + description='demo tenant', + enabled=True) + self.keystone_v2.roles.create(name=self.demo_role) + self.keystone_v2.users.create(name=self.demo_user, + password='password', + tenant_id=tenant.id, + email='demo@demo.com') + + # Authenticate keystone demo + self.keystone_demo = u.authenticate_keystone_user( + self.keystone_v2, user=self.demo_user, + password='password', tenant=self.demo_tenant) + + def create_users_v3(self): + # Create a demo tenant/role/user + self.demo_project = 'demoProject' + self.demo_user_v3 = 'demoUserV3' + self.demo_domain = 'demoDomain' + try: + domain = self.keystone_v3.domains.find(name=self.demo_domain) + except keystoneclient.exceptions.NotFound: + domain = self.keystone_v3.domains.create( + self.demo_domain, + description='Demo Domain', + enabled=True + ) + + try: + self.keystone_v3.projects.find(name=self.demo_project) + except keystoneclient.exceptions.NotFound: + self.keystone_v3.projects.create( + self.demo_project, + domain, + description='Demo Project', + enabled=True, + ) + + try: + self.keystone_v3.roles.find(name=self.demo_role) + except keystoneclient.exceptions.NotFound: + self.keystone_v3.roles.create(name=self.demo_role) + + try: + self.keystone_v3.users.find(name=self.demo_user_v3) + except keystoneclient.exceptions.NotFound: + self.keystone_v3.users.create( + self.demo_user_v3, + domain=domain.id, + project=self.demo_project, + password='password', + email='demov3@demo.com', + description='Demo', + enabled=True) + def _initialize_tests(self): """Perform final initialization before tests get run.""" # Access the sentries for inspecting service units @@ -119,31 +220,14 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): self._get_openstack_release())) u.log.debug('openstack release str: {}'.format( self._get_openstack_release_string())) - + self.keystone_ip = self.keystone_sentry.relation( + 'shared-db', + 'mysql:shared-db')['private-address'] + self.set_api_version(2) # Authenticate keystone admin - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, - user='admin', - password='openstack', - tenant='admin') - - # Create a demo tenant/role/user - self.demo_tenant = 'demoTenant' - self.demo_role = 'demoRole' - self.demo_user = 'demoUser' - if not u.tenant_exists(self.keystone, self.demo_tenant): - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, - description='demo tenant', - enabled=True) - self.keystone.roles.create(name=self.demo_role) - self.keystone.users.create(name=self.demo_user, - password='password', - tenant_id=tenant.id, - email='demo@demo.com') - - # Authenticate keystone demo - self.keystone_demo = u.authenticate_keystone_user( - self.keystone, user=self.demo_user, - password='password', tenant=self.demo_tenant) + self.keystone_v2 = self.get_keystone_client(api_version=2) + self.keystone_v3 = self.get_keystone_client(api_version=3) + self.create_users_v2() def test_100_services(self): """Verify the expected services are running on the corresponding @@ -159,7 +243,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_102_keystone_tenants(self): + def validate_keystone_tenants(self, client): """Verify all existing tenants.""" u.log.debug('Checking keystone tenants...') expected = [ @@ -176,13 +260,20 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): 'description': 'Created by Juju', 'id': u.not_null} ] - actual = self.keystone.tenants.list() + if self.keystone_api_version == 2: + actual = client.tenants.list() + else: + actual = client.projects.list() ret = u.validate_tenant_data(expected, actual) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_104_keystone_roles(self): + def test_102_keystone_tenants(self): + self.set_api_version(2) + self.validate_keystone_tenants(self.keystone_v2) + + def validate_keystone_roles(self, client): """Verify all existing roles.""" u.log.debug('Checking keystone roles...') expected = [ @@ -191,40 +282,113 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): {'name': 'Admin', 'id': u.not_null} ] - actual = self.keystone.roles.list() + actual = client.roles.list() ret = u.validate_role_data(expected, actual) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_106_keystone_users(self): + def test_104_keystone_roles(self): + self.set_api_version(2) + self.validate_keystone_roles(self.keystone_v2) + + def validate_keystone_users(self, client): """Verify all existing roles.""" u.log.debug('Checking keystone users...') - expected = [ + base = [ {'name': 'demoUser', 'enabled': True, - 'tenantId': u.not_null, 'id': u.not_null, 'email': 'demo@demo.com'}, {'name': 'admin', 'enabled': True, - 'tenantId': u.not_null, 'id': u.not_null, 'email': 'juju@localhost'}, {'name': 'cinder_cinderv2', 'enabled': True, - 'tenantId': u.not_null, 'id': u.not_null, 'email': u'juju@localhost'} ] - actual = self.keystone.users.list() - ret = u.validate_user_data(expected, actual) + expected = [] + for user_info in base: + if self.keystone_api_version == 2: + user_info['tenantId'] = u.not_null + else: + user_info['default_project_id'] = u.not_null + expected.append(user_info) + actual = client.users.list() + ret = u.validate_user_data(expected, actual, + api_version=self.keystone_api_version) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_108_service_catalog(self): + def test_106_keystone_users(self): + self.set_api_version(2) + self.validate_keystone_users(self.keystone_v2) + + def is_liberty_or_newer(self): + os_release = self._get_openstack_release_string() + if os_release >= 'liberty': + return True + else: + u.log.info('Skipping test, {} < liberty'.format(os_release)) + return False + + def test_112_keystone_tenants(self): + if self.is_liberty_or_newer(): + self.set_api_version(3) + self.validate_keystone_tenants(self.keystone_v3) + + def test_114_keystone_tenants(self): + if self.is_liberty_or_newer(): + self.set_api_version(3) + self.validate_keystone_roles(self.keystone_v3) + + def test_116_keystone_users(self): + if self.is_liberty_or_newer(): + self.set_api_version(3) + self.validate_keystone_users(self.keystone_v3) + + def test_118_keystone_users(self): + if self.is_liberty_or_newer(): + self.set_api_version(3) + self.create_users_v3() + actual_user = self.keystone_v3.users.find(name=self.demo_user_v3) + expect = { + 'default_project_id': self.demo_project, + 'email': 'demov3@demo.com', + 'name': self.demo_user_v3, + } + for key in expect.keys(): + u.log.debug('Checking user {} {} is {}'.format( + self.demo_user_v3, + key, + expect[key]) + ) + assert expect[key] == getattr(actual_user, key) + + def test_120_keystone_domains(self): + if self.is_liberty_or_newer(): + self.set_api_version(3) + self.create_users_v3() + actual_domain = self.keystone_v3.domains.find( + name=self.demo_domain + ) + expect = { + 'name': self.demo_domain, + } + for key in expect.keys(): + u.log.debug('Checking domain {} {} is {}'.format( + self.demo_domain, + key, + expect[key]) + ) + assert expect[key] == getattr(actual_domain, key) + + def test_138_service_catalog(self): """Verify that the service catalog endpoint data is valid.""" u.log.debug('Checking keystone service catalog...') + self.set_api_version(2) endpoint_check = { 'adminURL': u.valid_url, 'id': u.not_null, @@ -236,16 +400,16 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): 'volume': [endpoint_check], 'identity': [endpoint_check] } - actual = self.keystone.service_catalog.get_endpoints() + actual = self.keystone_v2.service_catalog.get_endpoints() ret = u.validate_svc_catalog_endpoint_data(expected, actual) if ret: amulet.raise_status(amulet.FAIL, msg=ret) - def test_110_keystone_endpoint(self): + def test_140_keystone_endpoint(self): """Verify the keystone endpoint data.""" u.log.debug('Checking keystone api endpoint data...') - endpoints = self.keystone.endpoints.list() + endpoints = self.keystone_v2.endpoints.list() admin_port = '35357' internal_port = public_port = '5000' expected = { @@ -262,10 +426,10 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment): amulet.raise_status(amulet.FAIL, msg='keystone endpoint: {}'.format(ret)) - def test_112_cinder_endpoint(self): + def test_142_cinder_endpoint(self): """Verify the cinder endpoint data.""" u.log.debug('Checking cinder endpoint...') - endpoints = self.keystone.endpoints.list() + endpoints = self.keystone_v2.endpoints.list() admin_port = internal_port = public_port = '8776' expected = { 'id': u.not_null, diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index 388b60e6..2995124d 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,10 @@ import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client +from keystoneclient.auth.identity import v3 as keystone_id_v3 +from keystoneclient import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 + import novaclient.v1_1.client as nova_client import pika import swiftclient @@ -139,7 +143,7 @@ class OpenStackAmuletUtils(AmuletUtils): return "role {} does not exist".format(e['name']) return ret - def validate_user_data(self, expected, actual): + def validate_user_data(self, expected, actual, api_version=None): """Validate user data. Validate a list of actual user data vs a list of expected user @@ -150,10 +154,15 @@ class OpenStackAmuletUtils(AmuletUtils): for e in expected: found = False for act in actual: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'tenantId': act.tenantId, - 'id': act.id} - if e['name'] == a['name']: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 2: + a['tenantId'] = act.tenantId + else: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') found = True ret = self._validate_dict_data(e, a) if ret: @@ -188,15 +197,30 @@ class OpenStackAmuletUtils(AmuletUtils): return cinder_client.Client(username, password, tenant, ept) def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant): + tenant=None, api_version=None, + keystone_ip=None): """Authenticates admin user with the keystone admin endpoint.""" self.log.debug('Authenticating keystone admin...') unit = keystone_sentry - service_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] - ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) + if not keystone_ip: + keystone_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) + if not api_version or api_version == 2: + ep = base_ep + "/v2.0" + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + else: + ep = base_ep + "/v3" + auth = keystone_id_v3.Password( + user_domain_name='admin_domain', + username=user, + password=password, + domain_name='admin_domain', + auth_url=ep, + ) + sess = keystone_session.Session(auth=auth) + return keystone_client_v3.Client(session=sess) def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" diff --git a/tests/charmhelpers/core/__init__.py b/tests/charmhelpers/core/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/tests/charmhelpers/core/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/core/decorators.py b/tests/charmhelpers/core/decorators.py new file mode 100644 index 00000000..bb05620b --- /dev/null +++ b/tests/charmhelpers/core/decorators.py @@ -0,0 +1,57 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..2dd70bc9 --- /dev/null +++ b/tests/charmhelpers/core/hookenv.py @@ -0,0 +1,978 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from functools import wraps +import glob +import os +import json +import yaml +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + if scope is not None: + return config_data + return Config(config_data) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The and provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The and provided must match a payload that has been previously + registered with juju using payload-register. The must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] diff --git a/tox.ini b/tox.ini index e8bf7cf9..20ee3778 100644 --- a/tox.ini +++ b/tox.ini @@ -18,7 +18,7 @@ deps = -r{toxinidir}/requirements.txt basepython = python2.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = flake8 {posargs} hooks unit_tests tests +commands = flake8 {posargs} --exclude */charmhelpers hooks unit_tests tests charm proof [testenv:venv] diff --git a/unit_tests/test_actions.py b/unit_tests/test_actions.py index c1e908a5..8346f17c 100644 --- a/unit_tests/test_actions.py +++ b/unit_tests/test_actions.py @@ -5,7 +5,8 @@ from test_utils import CharmTestCase with patch('actions.hooks.keystone_utils.is_paused') as is_paused: with patch('actions.hooks.keystone_utils.register_configs') as configs: - import actions.actions + with patch('actions.hooks.keystone_utils.os_release') as os_release: + import actions.actions class PauseTestCase(CharmTestCase): @@ -15,7 +16,8 @@ class PauseTestCase(CharmTestCase): actions.actions, ["service_pause", "HookData", "kv", "assess_status"]) - def test_pauses_services(self): + @patch('actions.hooks.keystone_utils.os_release') + def test_pauses_services(self, os_release): """Pause action pauses all Keystone services.""" pause_calls = [] @@ -29,7 +31,8 @@ class PauseTestCase(CharmTestCase): self.assertItemsEqual( pause_calls, ['haproxy', 'keystone', 'apache2']) - def test_bails_out_early_on_error(self): + @patch('actions.hooks.keystone_utils.os_release') + def test_bails_out_early_on_error(self, os_release): """Pause action fails early if there are errors stopping a service.""" pause_calls = [] @@ -46,7 +49,8 @@ class PauseTestCase(CharmTestCase): actions.actions.pause, []) self.assertEqual(pause_calls, ['haproxy']) - def test_pause_sets_value(self): + @patch('actions.hooks.keystone_utils.os_release') + def test_pause_sets_value(self, os_release): """Pause action sets the unit-paused value to True.""" self.HookData()().return_value = True @@ -61,7 +65,8 @@ class ResumeTestCase(CharmTestCase): actions.actions, ["service_resume", "HookData", "kv", "assess_status"]) - def test_resumes_services(self): + @patch('actions.hooks.keystone_utils.os_release') + def test_resumes_services(self, os_release): """Resume action resumes all Keystone services.""" resume_calls = [] @@ -73,7 +78,8 @@ class ResumeTestCase(CharmTestCase): actions.actions.resume([]) self.assertEqual(resume_calls, ['haproxy', 'keystone', 'apache2']) - def test_bails_out_early_on_error(self): + @patch('actions.hooks.keystone_utils.os_release') + def test_bails_out_early_on_error(self, os_release): """Resume action fails early if there are errors starting a service.""" resume_calls = [] @@ -90,7 +96,8 @@ class ResumeTestCase(CharmTestCase): actions.actions.resume, []) self.assertEqual(resume_calls, ['haproxy']) - def test_resume_sets_value(self): + @patch('actions.hooks.keystone_utils.os_release') + def test_resume_sets_value(self, os_release): """Resume action sets the unit-paused value to False.""" self.HookData()().return_value = True diff --git a/unit_tests/test_actions_git_reinstall.py b/unit_tests/test_actions_git_reinstall.py index 7ffe5afe..9284b691 100644 --- a/unit_tests/test_actions_git_reinstall.py +++ b/unit_tests/test_actions_git_reinstall.py @@ -1,7 +1,8 @@ from mock import patch with patch('hooks.keystone_utils.register_configs') as register_configs: - import git_reinstall + with patch('hooks.keystone_utils.os_release') as os_release: + import git_reinstall from test_utils import ( CharmTestCase diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py index f20d09cf..1c382a01 100644 --- a/unit_tests/test_actions_openstack_upgrade.py +++ b/unit_tests/test_actions_openstack_upgrade.py @@ -4,8 +4,9 @@ import os os.environ['JUJU_UNIT_NAME'] = 'keystone' with patch('keystone_utils.register_configs') as register_configs: - import openstack_upgrade - import keystone_hooks as hooks + with patch('keystone_utils.os_release') as os_release: + import openstack_upgrade + import keystone_hooks as hooks from test_utils import ( CharmTestCase @@ -23,13 +24,14 @@ class TestKeystoneUpgradeActions(CharmTestCase): super(TestKeystoneUpgradeActions, self).setUp(openstack_upgrade, TO_PATCH) + @patch.object(hooks, 'os_release') @patch.object(hooks, 'register_configs') @patch('charmhelpers.contrib.openstack.utils.config') @patch('charmhelpers.contrib.openstack.utils.action_set') @patch('charmhelpers.contrib.openstack.utils.git_install_requested') @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') def test_openstack_upgrade_true(self, upgrade_avail, git_requested, - action_set, config, reg_configs): + action_set, config, reg_configs, os_rel): git_requested.return_value = False upgrade_avail.return_value = True config.return_value = True @@ -40,13 +42,14 @@ class TestKeystoneUpgradeActions(CharmTestCase): self.os.execl.assert_called_with('./hooks/config-changed-postupgrade', '') + @patch.object(hooks, 'os_release') @patch.object(hooks, 'register_configs') @patch('charmhelpers.contrib.openstack.utils.config') @patch('charmhelpers.contrib.openstack.utils.action_set') @patch('charmhelpers.contrib.openstack.utils.git_install_requested') @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') def test_openstack_upgrade_false(self, upgrade_avail, git_requested, - action_set, config, reg_configs): + action_set, config, reg_configs, os_rel): git_requested.return_value = False upgrade_avail.return_value = True config.return_value = False diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index dae936f7..e20e6afa 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -73,6 +73,7 @@ TO_PATCH = [ 'git_install', 'is_service_present', 'delete_service_entry', + 'os_release', ] @@ -83,9 +84,10 @@ class KeystoneRelationTests(CharmTestCase): self.config.side_effect = self.test_config.get self.ssh_user = 'juju_keystone' + @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @patch.object(unison, 'ensure_user') - def test_install_hook(self, ensure_user, git_requested): + def test_install_hook(self, ensure_user, git_requested, os_release): git_requested.return_value = False repo = 'cloud:precise-grizzly' self.test_config.set('openstack-origin', repo) @@ -100,9 +102,10 @@ class KeystoneRelationTests(CharmTestCase): 'python-six', 'unison', 'uuid'], fatal=True) self.git_install.assert_called_with(None) + @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @patch.object(unison, 'ensure_user') - def test_install_hook_git(self, ensure_user, git_requested): + def test_install_hook_git(self, ensure_user, git_requested, os_release): git_requested.return_value = True repo = 'cloud:trusty-juno' openstack_origin_git = { @@ -135,6 +138,7 @@ class KeystoneRelationTests(CharmTestCase): mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils' + @patch.object(utils, 'os_release') @patch.object(hooks, 'config') @patch('%s.config' % (mod_ch_openstack_utils)) @patch('%s.relation_set' % (mod_ch_openstack_utils)) @@ -143,7 +147,7 @@ class KeystoneRelationTests(CharmTestCase): @patch('%s.sync_db_with_multi_ipv6_addresses' % (mod_ch_openstack_utils)) def test_db_joined(self, mock_sync_db_with_multi, mock_get_ipv6_addr, mock_relation_ids, mock_relation_set, mock_config, - mock_hooks_config): + mock_hooks_config, os_release): cfg_dict = {'prefer-ipv6': False, 'database': 'keystone', @@ -317,6 +321,7 @@ class KeystoneRelationTests(CharmTestCase): mock_ensure_ssl_cert_master, mock_log, mock_peer_store, mock_peer_retrieve, mock_relation_ids): + self.os_release.return_value = 'kilo' mock_relation_ids.return_value = ['peer/0'] peer_settings = {} @@ -907,6 +912,7 @@ class KeystoneRelationTests(CharmTestCase): cmd = ['a2dissite', 'openstack_https_frontend'] self.check_call.assert_called_with(cmd) + @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @patch.object(hooks, 'is_db_ready') @patch.object(hooks, 'is_db_initialised') @@ -926,7 +932,8 @@ class KeystoneRelationTests(CharmTestCase): mock_log, mock_is_db_initialised, mock_is_db_ready, - git_requested): + git_requested, + os_release): mock_is_db_initialised.return_value = True mock_is_db_ready.return_value = True mock_is_elected_leader.return_value = False @@ -949,6 +956,7 @@ class KeystoneRelationTests(CharmTestCase): 'Firing identity_changed hook for all related services.') self.assertTrue(self.ensure_initial_admin.called) + @patch.object(utils, 'os_release') @patch.object(utils, 'git_install_requested') @patch('keystone_utils.log') @patch('keystone_utils.relation_ids') @@ -959,7 +967,8 @@ class KeystoneRelationTests(CharmTestCase): mock_update_hash_from_path, mock_ensure_ssl_cert_master, mock_relation_ids, - mock_log, git_requested): + mock_log, git_requested, + os_release): mock_relation_ids.return_value = [] mock_ensure_ssl_cert_master.return_value = False # Ensure always returns diff diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py index 11f35e85..7e744bfd 100644 --- a/unit_tests/test_keystone_utils.py +++ b/unit_tests/test_keystone_utils.py @@ -1,7 +1,6 @@ from mock import patch, call, MagicMock, Mock from test_utils import CharmTestCase import os -import manager os.environ['JUJU_UNIT_NAME'] = 'keystone' with patch('charmhelpers.core.hookenv.config') as config: @@ -172,10 +171,11 @@ class TestKeystoneUtils(CharmTestCase): self.subprocess.check_output.assert_called_with(cmd) self.service_start.assert_called_with('keystone') + @patch.object(utils, 'get_manager') @patch.object(utils, 'resolve_address') @patch.object(utils, 'b64encode') def test_add_service_to_keystone_clustered_https_none_values( - self, b64encode, _resolve_address): + self, b64encode, _resolve_address, _get_manager): relation_id = 'identity-service:0' remote_unit = 'unit/0' _resolve_address.return_value = '10.10.10.10' @@ -214,7 +214,7 @@ class TestKeystoneUtils(CharmTestCase): @patch.object(utils, 'resolve_address') @patch.object(utils, 'ensure_valid_service') @patch.object(utils, 'add_endpoint') - @patch.object(manager, 'KeystoneManager') + @patch.object(utils, 'get_manager') def test_add_service_to_keystone_no_clustered_no_https_complete_values( self, KeystoneManager, add_endpoint, ensure_valid_service, _resolve_address): @@ -253,9 +253,12 @@ class TestKeystoneUtils(CharmTestCase): internalurl='192.168.1.2') self.assertTrue(self.get_admin_token.called) self.get_service_password.assert_called_with('keystone') - self.create_user.assert_called_with('keystone', 'password', 'tenant') - self.grant_role.assert_called_with('keystone', 'admin', 'tenant') - self.create_role.assert_called_with('role1', 'keystone', 'tenant') + self.create_user.assert_called_with('keystone', 'password', 'tenant', + None) + self.grant_role.assert_called_with('keystone', 'Admin', 'tenant', + None) + self.create_role.assert_called_with('role1', 'keystone', 'tenant', + None) relation_data = {'auth_host': '10.0.0.3', 'service_host': '10.0.0.3', 'admin_token': 'token', 'service_port': 81, @@ -266,7 +269,7 @@ class TestKeystoneUtils(CharmTestCase): 'ssl_cert': '__null__', 'ssl_key': '__null__', 'ca_cert': '__null__', 'auth_protocol': 'http', 'service_protocol': 'http', - 'service_tenant_id': 'tenant_id'} + 'service_tenant_id': 'tenant_id', 'api_version': 2} filtered = {} for k, v in relation_data.iteritems(): @@ -284,7 +287,7 @@ class TestKeystoneUtils(CharmTestCase): @patch('charmhelpers.contrib.openstack.ip.config') @patch.object(utils, 'ensure_valid_service') @patch.object(utils, 'add_endpoint') - @patch.object(manager, 'KeystoneManager') + @patch.object(utils, 'get_manager') def test_add_service_to_keystone_nosubset( self, KeystoneManager, add_endpoint, ensure_valid_service, ip_config): @@ -317,8 +320,9 @@ class TestKeystoneUtils(CharmTestCase): mock_grant_role, mock_user_exists): mock_user_exists.return_value = False - utils.create_user_credentials('userA', 'tenantA', 'passA') - mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA')]) + utils.create_user_credentials('userA', 'passA', tenant='tenantA') + mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA', + None)]) mock_create_role.assert_has_calls([]) mock_grant_role.assert_has_calls([]) @@ -329,11 +333,14 @@ class TestKeystoneUtils(CharmTestCase): def test_create_user_credentials(self, mock_create_user, mock_create_role, mock_grant_role, mock_user_exists): mock_user_exists.return_value = False - utils.create_user_credentials('userA', 'tenantA', 'passA', + utils.create_user_credentials('userA', 'passA', tenant='tenantA', grants=['roleA'], new_roles=['roleB']) - mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA')]) - mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA')]) - mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA')]) + mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA', + None)]) + mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA', + None)]) + mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA', + None)]) @patch.object(utils, 'update_user_password') @patch.object(utils, 'user_exists') @@ -346,11 +353,13 @@ class TestKeystoneUtils(CharmTestCase): mock_user_exists, mock_update_user_password): mock_user_exists.return_value = True - utils.create_user_credentials('userA', 'tenantA', 'passA', + utils.create_user_credentials('userA', 'passA', tenant='tenantA', grants=['roleA'], new_roles=['roleB']) mock_create_user.assert_has_calls([]) - mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA')]) - mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA')]) + mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA', + None)]) + mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA', + None)]) mock_update_user_password.assert_has_calls([call('userA', 'passA')]) @patch.object(utils, 'get_service_password') @@ -358,10 +367,12 @@ class TestKeystoneUtils(CharmTestCase): def test_create_service_credentials(self, mock_create_user_credentials, mock_get_service_password): mock_get_service_password.return_value = 'passA' - cfg = {'service-tenant': 'tenantA', 'admin-role': 'Admin'} + cfg = {'service-tenant': 'tenantA', 'admin-role': 'Admin', + 'preferred-api-version': 2} self.config.side_effect = lambda key: cfg.get(key, None) - calls = [call('serviceA', 'tenantA', 'passA', grants=['Admin'], - new_roles=None)] + calls = [call('serviceA', 'passA', domain=None, grants=['Admin'], + new_roles=None, tenant='tenantA')] + utils.create_service_credentials('serviceA') mock_create_user_credentials.assert_has_calls(calls) @@ -594,7 +605,7 @@ class TestKeystoneUtils(CharmTestCase): internal_ip='10.0.0.1', admin_ip='10.0.0.1', auth_port=35357, - region='RegionOne' + region='RegionOne', ) @patch.object(utils, 'peer_units') @@ -704,21 +715,21 @@ class TestKeystoneUtils(CharmTestCase): self.assertEquals(render.call_args_list, expected) service_restart.assert_called_with('keystone') - @patch.object(manager, 'KeystoneManager') + @patch.object(utils, 'get_manager') def test_is_service_present(self, KeystoneManager): mock_keystone = MagicMock() mock_keystone.resolve_service_id.return_value = 'sid1' KeystoneManager.return_value = mock_keystone self.assertTrue(utils.is_service_present('bob', 'bill')) - @patch.object(manager, 'KeystoneManager') + @patch.object(utils, 'get_manager') def test_is_service_present_false(self, KeystoneManager): mock_keystone = MagicMock() mock_keystone.resolve_service_id.return_value = None KeystoneManager.return_value = mock_keystone self.assertFalse(utils.is_service_present('bob', 'bill')) - @patch.object(manager, 'KeystoneManager') + @patch.object(utils, 'get_manager') def test_delete_service_entry(self, KeystoneManager): mock_keystone = MagicMock() mock_keystone.resolve_service_id.return_value = 'sid1'