diff --git a/Makefile b/Makefile index 9b1f7ad3..1e9e06da 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,6 @@ sync: bin/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -publish: lint test +publish: lint unit_test bzr push lp:charms/keystone bzr push lp:charms/trusty/keystone diff --git a/hooks/keystone_context.py b/hooks/keystone_context.py index 2d40ae99..fd3c8725 100644 --- a/hooks/keystone_context.py +++ b/hooks/keystone_context.py @@ -18,6 +18,7 @@ from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.core.hookenv import ( log, + DEBUG, INFO, ) @@ -173,9 +174,8 @@ class KeystoneContext(context.OSContextGenerator): def __call__(self): from keystone_utils import ( - api_port, set_admin_token, - endpoint_url, resolve_address, - PUBLIC, ADMIN + api_port, set_admin_token, endpoint_url, resolve_address, + PUBLIC, ADMIN, PKI_CERTS_DIR, SSH_USER, ensure_permissions, ) ctxt = {} ctxt['token'] = set_admin_token(config('admin-token')) @@ -205,6 +205,31 @@ class KeystoneContext(context.OSContextGenerator): enable_pki = config('enable-pki') if enable_pki and bool_from_string(enable_pki): ctxt['signing'] = True + ctxt['token_provider'] = 'pki' + + if 'token_provider' in ctxt: + log("Configuring PKI token cert paths", level=DEBUG) + certs = os.path.join(PKI_CERTS_DIR, 'certs') + privates = os.path.join(PKI_CERTS_DIR, 'privates') + for path in [PKI_CERTS_DIR, certs, privates]: + perms = 0o755 + if not os.path.isdir(path): + mkdir(path=path, owner=SSH_USER, group='keystone', + perms=perms) + else: + # Ensure accessible by ssh user and group (for sync). + ensure_permissions(path, user=SSH_USER, + group='keystone', perms=perms) + + signing_paths = {'certfile': os.path.join(certs, + 'signing_cert.pem'), + 'keyfile': os.path.join(privates, + 'signing_key.pem'), + 'ca_certs': os.path.join(certs, 'ca.pem'), + 'ca_key': os.path.join(certs, 'ca_key.pem')} + + for key, val in signing_paths.iteritems(): + ctxt[key] = val # Base endpoint URL's which are used in keystone responses # to unauthenticated requests to redirect clients to the diff --git a/hooks/keystone_hooks.py b/hooks/keystone_hooks.py index 79582277..f8eaaa83 100755 --- a/hooks/keystone_hooks.py +++ b/hooks/keystone_hooks.py @@ -70,6 +70,10 @@ from keystone_utils import ( clear_ssl_synced_units, is_db_initialised, update_certs_if_available, + is_pki_enabled, + ensure_ssl_dir, + ensure_pki_dir_permissions, + force_ssl_sync, filter_null, ensure_ssl_dirs, ) @@ -114,7 +118,7 @@ def install(): @hooks.hook('config-changed') @restart_on_change(restart_map()) -@synchronize_ca_if_changed() +@synchronize_ca_if_changed(fatal=True) def config_changed(): if config('prefer-ipv6'): setup_ipv6() @@ -130,18 +134,25 @@ def config_changed(): if openstack_upgrade_available('keystone'): do_openstack_upgrade(configs=CONFIGS) + # Ensure ssl dir exists and is unison-accessible + ensure_ssl_dir() + check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/']) ensure_ssl_dirs() save_script_rc() configure_https() + update_nrpe_config() CONFIGS.write_all() + if is_pki_enabled(): + initialise_pki() + # Update relations since SSL may have been configured. If we have peer # units we can rely on the sync to do this in cluster relation. - if is_elected_leader(CLUSTER_RES) and not peer_units(): + if not peer_units(): update_all_identity_relation_units() for rid in relation_ids('identity-admin'): @@ -154,6 +165,22 @@ def config_changed(): ha_joined(relation_id=r_id) +@synchronize_ca_if_changed(fatal=True) +def initialise_pki(): + """Create certs and keys required for PKI token signing. + + NOTE: keystone.conf [signing] section must be up-to-date prior to + executing this. + """ + if is_ssl_cert_master(): + log("Ensuring PKI token certs created", level=DEBUG) + cmd = ['keystone-manage', 'pki_setup', '--keystone-user', 'keystone', + '--keystone-group', 'keystone'] + check_call(cmd) + + ensure_pki_dir_permissions() + + @hooks.hook('shared-db-relation-joined') def db_joined(): if is_relation_made('pgsql-db'): @@ -294,6 +321,7 @@ def identity_changed(relation_id=None, remote_unit=None): peerdb_settings = filter_null(peerdb_settings) if 'service_password' in peerdb_settings: relation_set(relation_id=rel_id, **peerdb_settings) + log('Deferring identity_changed() to service leader.') if notifications: @@ -312,12 +340,20 @@ def send_ssl_sync_request(): """ unit = local_unit().replace('/', '-') count = 0 - if bool_from_string(config('use-https')): + + use_https = config('use-https') + if use_https and bool_from_string(use_https): count += 1 - if bool_from_string(config('https-service-endpoints')): + https_service_endpoints = config('https-service-endpoints') + if (https_service_endpoints and + bool_from_string(https_service_endpoints)): count += 2 + enable_pki = config('enable-pki') + if enable_pki and bool_from_string(enable_pki): + count += 3 + key = 'ssl-sync-required-%s' % (unit) settings = {key: count} @@ -385,23 +421,32 @@ def cluster_changed(): check_peer_actions() - if is_elected_leader(CLUSTER_RES) or is_ssl_cert_master(): - units = get_ssl_sync_request_units() - synced_units = relation_get(attribute='ssl-synced-units', - unit=local_unit()) - if synced_units: - synced_units = json.loads(synced_units) - diff = set(units).symmetric_difference(set(synced_units)) + if is_pki_enabled(): + initialise_pki() - if units and (not synced_units or diff): - log("New peers joined and need syncing - %s" % - (', '.join(units)), level=DEBUG) - update_all_identity_relation_units_force_sync() - else: - update_all_identity_relation_units() + # Figure out if we need to mandate a sync + units = get_ssl_sync_request_units() + synced_units = relation_get(attribute='ssl-synced-units', + unit=local_unit()) + diff = None + if synced_units: + synced_units = json.loads(synced_units) + diff = set(units).symmetric_difference(set(synced_units)) - for rid in relation_ids('identity-admin'): - admin_relation_changed(rid) + if units and (not synced_units or diff): + log("New peers joined and need syncing - %s" % + (', '.join(units)), level=DEBUG) + update_all_identity_relation_units_force_sync() + else: + update_all_identity_relation_units() + + for rid in relation_ids('identity-admin'): + admin_relation_changed(rid) + + if not is_elected_leader(CLUSTER_RES) and is_ssl_cert_master(): + # Force and sync and trigger a sync master re-election since we are not + # leader anymore. + force_ssl_sync() else: CONFIGS.write_all() diff --git a/hooks/keystone_ssl.py b/hooks/keystone_ssl.py index 0c6adcf8..fc4513c8 100644 --- a/hooks/keystone_ssl.py +++ b/hooks/keystone_ssl.py @@ -111,15 +111,16 @@ CA_SINGLETON = [] def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT): - print 'Ensuring certificate authority exists at %s.' % ca_dir + log('Ensuring certificate authority exists at %s.' % ca_dir, level=DEBUG) if not os.path.exists(ca_dir): - print 'Initializing new certificate authority at %s' % ca_dir + log('Initializing new certificate authority at %s' % ca_dir, + level=DEBUG) os.mkdir(ca_dir) for i in ['certs', 'crl', 'newcerts', 'private']: d = os.path.join(ca_dir, i) if not os.path.exists(d): - print 'Creating %s.' % d + log('Creating %s.' % d, level=DEBUG) os.mkdir(d) os.chmod(os.path.join(ca_dir, 'private'), 0o710) @@ -130,9 +131,11 @@ def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT): if not os.path.isfile(os.path.join(ca_dir, 'index.txt')): with open(os.path.join(ca_dir, 'index.txt'), 'wb') as out: out.write('') - if not os.path.isfile(os.path.join(ca_dir, 'ca.cnf')): - print 'Creating new CA config in %s' % ca_dir - with open(os.path.join(ca_dir, 'ca.cnf'), 'wb') as out: + + conf = os.path.join(ca_dir, 'ca.cnf') + if not os.path.isfile(conf): + log('Creating new CA config in %s' % ca_dir, level=DEBUG) + with open(conf, 'wb') as out: out.write(CA_CONFIG % locals()) @@ -142,40 +145,42 @@ def root_ca_crt_key(ca_dir): key = os.path.join(ca_dir, 'private', 'cacert.key') for f in [crt, key]: if not os.path.isfile(f): - print 'Missing %s, will re-initialize cert+key.' % f + log('Missing %s, will re-initialize cert+key.' % f, level=DEBUG) init = True else: - print 'Found %s.' % f + log('Found %s.' % f, level=DEBUG) + if init: - cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'), + conf = os.path.join(ca_dir, 'ca.cnf') + cmd = ['openssl', 'req', '-config', conf, '-x509', '-nodes', '-newkey', 'rsa', '-days', '21360', '-keyout', key, '-out', crt, '-outform', 'PEM'] subprocess.check_call(cmd) + return crt, key def intermediate_ca_csr_key(ca_dir): - print 'Creating new intermediate CSR.' + log('Creating new intermediate CSR.', level=DEBUG) key = os.path.join(ca_dir, 'private', 'cacert.key') csr = os.path.join(ca_dir, 'cacert.csr') - cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'), - '-sha1', '-newkey', 'rsa', '-nodes', '-keyout', key, '-out', - csr, '-outform', - 'PEM'] + conf = os.path.join(ca_dir, 'ca.cnf') + cmd = ['openssl', 'req', '-config', conf, '-sha1', '-newkey', 'rsa', + '-nodes', '-keyout', key, '-out', csr, '-outform', 'PEM'] subprocess.check_call(cmd) return csr, key def sign_int_csr(ca_dir, csr, common_name): - print 'Signing certificate request %s.' % csr - crt = os.path.join(ca_dir, 'certs', - '%s.crt' % os.path.basename(csr).split('.')[0]) + log('Signing certificate request %s.' % csr, level=DEBUG) + crt_name = os.path.basename(csr).split('.')[0] + crt = os.path.join(ca_dir, 'certs', '%s.crt' % crt_name) subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name) - cmd = ['openssl', 'ca', '-batch', '-config', - os.path.join(ca_dir, 'ca.cnf'), - '-extensions', 'ca_extensions', '-days', CA_EXPIRY, '-notext', - '-in', csr, '-out', crt, '-subj', subj, '-batch'] - print ' '.join(cmd) + conf = os.path.join(ca_dir, 'ca.cnf') + cmd = ['openssl', 'ca', '-batch', '-config', conf, '-extensions', + 'ca_extensions', '-days', CA_EXPIRY, '-notext', '-in', csr, '-out', + crt, '-subj', subj, '-batch'] + log("Executing: %s" % ' '.join(cmd), level=DEBUG) subprocess.check_call(cmd) return crt @@ -185,19 +190,20 @@ def init_root_ca(ca_dir, common_name): return root_ca_crt_key(ca_dir) -def init_intermediate_ca(ca_dir, common_name, root_ca_dir, - org_name=ORG_NAME, org_unit_name=ORG_UNIT): +def init_intermediate_ca(ca_dir, common_name, root_ca_dir, org_name=ORG_NAME, + org_unit_name=ORG_UNIT): init_ca(ca_dir, common_name) if not os.path.isfile(os.path.join(ca_dir, 'cacert.pem')): csr, key = intermediate_ca_csr_key(ca_dir) crt = sign_int_csr(root_ca_dir, csr, common_name) shutil.copy(crt, os.path.join(ca_dir, 'cacert.pem')) else: - print 'Intermediate CA certificate already exists.' + log('Intermediate CA certificate already exists.', level=DEBUG) - if not os.path.isfile(os.path.join(ca_dir, 'signing.cnf')): - print 'Creating new signing config in %s' % ca_dir - with open(os.path.join(ca_dir, 'signing.cnf'), 'wb') as out: + conf = os.path.join(ca_dir, 'signing.cnf') + if not os.path.isfile(conf): + log('Creating new signing config in %s' % ca_dir, level=DEBUG) + with open(conf, 'wb') as out: out.write(SIGNING_CONFIG % locals()) @@ -210,7 +216,7 @@ def create_certificate(ca_dir, service): key, '-out', csr, '-subj', subj] subprocess.check_call(cmd) crt = sign_int_csr(ca_dir, csr, common_name) - print 'Signed new CSR, crt @ %s' % crt + log('Signed new CSR, crt @ %s' % crt, level=DEBUG) return @@ -219,13 +225,14 @@ def update_bundle(bundle_file, new_bundle): if os.path.isfile(bundle_file): current = open(bundle_file, 'r').read().strip() if new_bundle == current: - print 'CA Bundle @ %s is up to date.' % bundle_file + log('CA Bundle @ %s is up to date.' % bundle_file, level=DEBUG) return - else: - print 'Updating CA bundle @ %s.' % bundle_file + + log('Updating CA bundle @ %s.' % bundle_file, level=DEBUG) with open(bundle_file, 'wb') as out: out.write(new_bundle) + subprocess.check_call(['update-ca-certificates']) @@ -248,15 +255,19 @@ def tar_directory(path): class JujuCA(object): def __init__(self, name, ca_dir, root_ca_dir, user, group): - root_crt, root_key = init_root_ca(root_ca_dir, - '%s Certificate Authority' % name) - init_intermediate_ca(ca_dir, - '%s Intermediate Certificate Authority' % name, - root_ca_dir) + # Root CA + cn = '%s Certificate Authority' % name + root_crt, root_key = init_root_ca(root_ca_dir, cn) + # Intermediate CA + cn = '%s Intermediate Certificate Authority' % name + init_intermediate_ca(ca_dir, cn, root_ca_dir) + + # Create dirs cmd = ['chown', '-R', '%s.%s' % (user, group), ca_dir] subprocess.check_call(cmd) cmd = ['chown', '-R', '%s.%s' % (user, group), root_ca_dir] subprocess.check_call(cmd) + self.ca_dir = ca_dir self.root_ca_dir = root_ca_dir self.user = user @@ -266,8 +277,8 @@ class JujuCA(object): def _sign_csr(self, csr, service, common_name): subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name) crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name) - cmd = ['openssl', 'ca', '-config', - os.path.join(self.ca_dir, 'signing.cnf'), '-extensions', + conf = os.path.join(self.ca_dir, 'signing.cnf') + cmd = ['openssl', 'ca', '-config', conf, '-extensions', 'req_extensions', '-days', '365', '-notext', '-in', csr, '-out', crt, '-batch', '-subj', subj] subprocess.check_call(cmd) @@ -286,10 +297,16 @@ class JujuCA(object): log('Signed new CSR, crt @ %s' % crt, level=DEBUG) return crt, key + def get_key_path(self, cn): + return os.path.join(self.ca_dir, 'certs', '%s.key' % cn) + + def get_cert_path(self, cn): + return os.path.join(self.ca_dir, 'certs', '%s.crt' % cn) + def get_cert_and_key(self, common_name): log('Getting certificate and key for %s.' % common_name, level=DEBUG) - keypath = os.path.join(self.ca_dir, 'certs', '%s.key' % common_name) - crtpath = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name) + keypath = self.get_key_path(common_name) + crtpath = self.get_cert_path(common_name) if os.path.isfile(crtpath): log('Found existing certificate for %s.' % common_name, level=DEBUG) @@ -300,8 +317,24 @@ class JujuCA(object): crt, key = self._create_certificate(common_name, common_name) return open(crt, 'r').read(), open(key, 'r').read() + @property + def ca_cert_path(self): + return os.path.join(self.ca_dir, 'cacert.pem') + + @property + def ca_key_path(self): + return os.path.join(self.ca_dir, 'private', 'cacert.key') + + @property + def root_ca_cert_path(self): + return os.path.join(self.root_ca_dir, 'cacert.pem') + + @property + def root_ca_key_path(self): + return os.path.join(self.root_ca_dir, 'private', 'cacert.key') + def get_ca_bundle(self): - int_cert = open(os.path.join(self.ca_dir, 'cacert.pem')).read() - root_cert = open(os.path.join(self.root_ca_dir, 'cacert.pem')).read() + int_cert = open(self.ca_cert_path).read() + root_cert = open(self.root_ca_cert_path).read() # NOTE: ordering of certs in bundle matters! return int_cert + root_cert diff --git a/hooks/keystone_utils.py b/hooks/keystone_utils.py index 4f7cd6b4..e72ea2da 100644 --- a/hooks/keystone_utils.py +++ b/hooks/keystone_utils.py @@ -140,10 +140,13 @@ SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/' SYNC_DIR = '/var/lib/keystone/juju_sync/' SSL_SYNC_ARCHIVE = os.path.join(SYNC_DIR, 'juju-ssl-sync.tar') SSL_DIR = '/var/lib/keystone/juju_ssl/' +PKI_CERTS_DIR = os.path.join(SSL_DIR, 'pki') SSL_CA_NAME = 'Ubuntu Cloud' CLUSTER_RES = 'grp_ks_vips' SSH_USER = 'juju_keystone' +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' SSL_SYNC_SEMAPHORE = threading.Semaphore() +SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH] BASE_RESOURCE_MAP = OrderedDict([ (KEYSTONE_CONF, { @@ -175,8 +178,6 @@ BASE_RESOURCE_MAP = OrderedDict([ }), ]) -CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' - valid_services = { "nova": { "type": "compute", @@ -784,6 +785,9 @@ def check_peer_actions(): elif action == 'update-ca-certificates': log("Running %s" % (action), level=DEBUG) subprocess.check_call(['update-ca-certificates']) + elif action == 'ensure-pki-permissions': + log("Running %s" % (action), level=DEBUG) + ensure_pki_dir_permissions() else: log("Unknown action flag=%s" % (flag), level=WARNING) @@ -881,8 +885,12 @@ def is_ssl_cert_master(votes=None): def is_ssl_enabled(): - if (bool_from_string(config('use-https')) or - bool_from_string(config('https-service-endpoints'))): + use_https = config('use-https') + https_service_endpoints = config('https-service-endpoints') + if ((use_https and bool_from_string(use_https)) or + (https_service_endpoints and + bool_from_string(https_service_endpoints)) or + is_pki_enabled()): log("SSL/HTTPS is enabled", level=DEBUG) return True @@ -960,6 +968,20 @@ def stage_paths_for_sync(paths): perms=0o755, recurse=True) +def is_pki_enabled(): + enable_pki = config('enable-pki') + if enable_pki and bool_from_string(enable_pki): + return True + + return False + + +def ensure_pki_dir_permissions(): + # Ensure accessible by unison user and group (for sync). + ensure_permissions(PKI_CERTS_DIR, user=SSH_USER, group='keystone', + perms=0o755, recurse=True) + + def update_certs_if_available(f): def _inner_update_certs_if_available(*args, **kwargs): path = None @@ -999,12 +1021,18 @@ def synchronize_ca(fatal=False): Returns a dictionary of settings to be set on the cluster relation. """ paths_to_sync = [] + peer_service_actions = [] + peer_actions = [] if bool_from_string(config('https-service-endpoints')): log("Syncing all endpoint certs since https-service-endpoints=True", level=DEBUG) paths_to_sync.append(SSL_DIR) paths_to_sync.append(CA_CERT_PATH) + # We need to restart peer apache services to ensure they have picked up + # new ssl keys. + peer_service_actions.append(('restart', ('apache2'))) + peer_actions.append('update-ca-certificates') if bool_from_string(config('use-https')): log("Syncing keystone-endpoint certs since use-https=True", @@ -1012,6 +1040,15 @@ def synchronize_ca(fatal=False): paths_to_sync.append(SSL_DIR) paths_to_sync.append(APACHE_SSL_DIR) paths_to_sync.append(CA_CERT_PATH) + # We need to restart peer apache services to ensure they have picked up + # new ssl keys. + peer_service_actions.append(('restart', ('apache2'))) + peer_actions.append('update-ca-certificates') + + if is_pki_enabled(): + log("Syncing token certs", level=DEBUG) + paths_to_sync.append(PKI_CERTS_DIR) + peer_actions.append('ensure-pki-permissions') if not paths_to_sync: log("Nothing to sync - skipping", level=DEBUG) @@ -1020,10 +1057,10 @@ def synchronize_ca(fatal=False): if not os.path.isdir(SYNC_FLAGS_DIR): mkdir(SYNC_FLAGS_DIR, SSH_USER, 'keystone', 0o775) - # We need to restart peer apache services to ensure they have picked up - # new ssl keys. - create_peer_service_actions('restart', ['apache2']) - create_peer_actions(['update-ca-certificates']) + for action, services in set(peer_service_actions): + create_peer_service_actions(action, services) + + create_peer_actions(peer_actions) paths_to_sync = list(set(paths_to_sync)) stage_paths_for_sync(paths_to_sync) @@ -1097,21 +1134,19 @@ def synchronize_ca_if_changed(force=False, fatal=False): return f(*args, **kwargs) if not ensure_ssl_cert_master(): - log("Not leader - ignoring sync", level=DEBUG) + log("Not ssl-cert-master - ignoring sync", level=DEBUG) return f(*args, **kwargs) peer_settings = {} if not force: - ssl_dirs = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH] - hash1 = hashlib.sha256() - for path in ssl_dirs: + for path in SSL_DIRS: update_hash_from_path(hash1, path) ret = f(*args, **kwargs) hash2 = hashlib.sha256() - for path in ssl_dirs: + for path in SSL_DIRS: update_hash_from_path(hash2, path) if hash1.hexdigest() != hash2.hexdigest(): @@ -1146,15 +1181,33 @@ def synchronize_ca_if_changed(force=False, fatal=False): return inner_synchronize_ca_if_changed1 +@synchronize_ca_if_changed(force=True, fatal=True) +def force_ssl_sync(): + """Force SSL sync to all peers. + + This is useful if we need to relinquish ssl-cert-master status while + making sure that the new master has up-to-date certs. + """ + return + + +def ensure_ssl_dir(): + """Ensure juju ssl dir exists and is unsion read/writable.""" + perms = 0o755 + if not os.path.isdir(SSL_DIR): + mkdir(SSL_DIR, SSH_USER, 'keystone', perms) + else: + ensure_permissions(SSL_DIR, user=SSH_USER, group='keystone', + perms=perms) + + def get_ca(user='keystone', group='keystone'): """Initialize a new CA object if one hasn't already been loaded. This will create a new CA or load an existing one. """ if not ssl.CA_SINGLETON: - if not os.path.isdir(SSL_DIR): - os.mkdir(SSL_DIR) - + ensure_ssl_dir() d_name = '_'.join(SSL_CA_NAME.lower().split(' ')) ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group, ca_dir=os.path.join(SSL_DIR, @@ -1162,12 +1215,6 @@ def get_ca(user='keystone', group='keystone'): root_ca_dir=os.path.join(SSL_DIR, '%s_root_ca' % d_name)) - # SSL_DIR is synchronized via all peers over unison+ssh, need - # to ensure permissions. - subprocess.check_output(['chown', '-R', '%s.%s' % (user, group), - '%s' % SSL_DIR]) - subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR]) - # Ensure a master is elected. This should cover the following cases: # * single unit == 'oldest' unit is elected as master # * multi unit + not clustered == 'oldest' unit is elcted as master @@ -1212,9 +1259,13 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): # Some backend services advertise no endpoint but require a # hook execution to update auth strategy. relation_data = {} + rel_only_data = {} # Check if clustered and use vip + haproxy ports if so - relation_data["auth_host"] = resolve_address(ADMIN) - relation_data["service_host"] = resolve_address(PUBLIC) + # NOTE(hopem): don't put these on peer relation because racey + # leader election causes cluster relation to spin) + rel_only_data["auth_host"] = resolve_address(ADMIN) + rel_only_data["service_host"] = resolve_address(PUBLIC) + relation_data["auth_protocol"] = protocol relation_data["service_protocol"] = protocol relation_data["auth_port"] = config('admin-port') @@ -1237,8 +1288,8 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): log("Creating requested role: %s" % role) create_role(role) - peer_store_and_set(relation_id=relation_id, - **relation_data) + relation_set(relation_id=relation_id, **rel_only_data) + peer_store_and_set(relation_id=relation_id, **relation_data) return else: ensure_valid_service(settings['service']) @@ -1342,13 +1393,16 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): # service credentials service_tenant = config('service-tenant') + # NOTE(hopem): don't put these on peer relation because racey + # leader election causes cluster relation to spin) + rel_only_data = {"auth_host": resolve_address(ADMIN), + "service_host": resolve_address(PUBLIC)} + # NOTE(dosaboy): we use __null__ to represent settings that are to be # routed to relations via the cluster relation and set to None. relation_data = { "admin_token": token, - "service_host": resolve_address(PUBLIC), "service_port": config("service-port"), - "auth_host": resolve_address(ADMIN), "auth_port": config("admin-port"), "service_username": service_username, "service_password": service_password, @@ -1381,6 +1435,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None): relation_data['ca_cert'] = b64encode(ca_bundle) relation_data['https_keystone'] = 'True' + relation_set(relation_id=relation_id, **rel_only_data) # NOTE(dosaboy): '__null__' settings are for peer relation only so that # settings can flushed so we filter them out for non-peer relation. filtered = filter_null(relation_data) diff --git a/templates/icehouse/keystone.conf b/templates/icehouse/keystone.conf index 6c545ee3..5ef7fe35 100644 --- a/templates/icehouse/keystone.conf +++ b/templates/icehouse/keystone.conf @@ -43,7 +43,15 @@ driver = keystone.catalog.backends.sql.Catalog [token] driver = keystone.token.backends.sql.Token -provider = keystone.token.providers.uuid.Provider +{% if token_provider == 'pki' -%} +provider = keystone.token.providers.pki.Provider +{% elif token_provider == 'pkiz' -%} +provider = keystone.token.providers.pkiz.Provider +{% else -%} +provider = keystone.token.providers.uuid.Provider +{% endif %} + +{% include "parts/section-signing" %} [cache] @@ -58,8 +66,6 @@ driver = keystone.assignment.backends.{{ assignment_backend }}.Assignment [oauth1] -[signing] - [auth] methods = external,password,token,oauth1 password = keystone.auth.plugins.password.Password diff --git a/templates/parts/section-signing b/templates/parts/section-signing new file mode 100644 index 00000000..77a17014 --- /dev/null +++ b/templates/parts/section-signing @@ -0,0 +1,13 @@ +[signing] +{% if certfile -%} +certfile = {{ certfile }} +{% endif -%} +{% if keyfile -%} +keyfile = {{ keyfile }} +{% endif -%} +{% if ca_certs -%} +ca_certs = {{ ca_certs }} +{% endif -%} +{% if ca_key -%} +ca_key = {{ ca_key }} +{% endif -%} diff --git a/unit_tests/test_keystone_hooks.py b/unit_tests/test_keystone_hooks.py index 5cd5f028..bcfd7167 100644 --- a/unit_tests/test_keystone_hooks.py +++ b/unit_tests/test_keystone_hooks.py @@ -59,6 +59,8 @@ TO_PATCH = [ 'synchronize_ca_if_changed', 'update_nrpe_config', 'ensure_ssl_dirs', + 'is_db_initialised', + 'is_db_ready', # other 'check_call', 'execd_preinstall', @@ -203,18 +205,15 @@ class KeystoneRelationTests(CharmTestCase): configs.write = MagicMock() hooks.pgsql_db_changed() - @patch.object(hooks, 'is_db_initialised') - @patch.object(hooks, 'is_db_ready') @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_db_changed_allowed(self, identity_changed, configs, mock_ensure_ssl_cert_master, - mock_log, mock_is_db_ready, - mock_is_db_initialised): - mock_is_db_initialised.return_value = True - mock_is_db_ready.return_value = True + mock_log): + self.is_db_initialised.return_value = True + self.is_db_ready.return_value = True mock_ensure_ssl_cert_master.return_value = False self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -228,15 +227,13 @@ class KeystoneRelationTests(CharmTestCase): relation_id='identity-service:0', remote_unit='unit/0') - @patch.object(hooks, 'is_db_ready') @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_db_changed_not_allowed(self, identity_changed, configs, - mock_ensure_ssl_cert_master, mock_log, - mock_is_db_ready): - mock_is_db_ready.return_value = False + mock_ensure_ssl_cert_master, mock_log): + self.is_db_ready.return_value = False mock_ensure_ssl_cert_master.return_value = False self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -250,15 +247,12 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') - @patch.object(hooks, 'is_db_initialised') - @patch.object(hooks, 'is_db_ready') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') def test_postgresql_db_changed(self, identity_changed, configs, - mock_is_db_ready, mock_is_db_initialised, mock_ensure_ssl_cert_master, mock_log): - mock_is_db_initialised.return_value = True - mock_is_db_ready.return_value = True + self.is_db_initialised.return_value = True + self.is_db_ready.return_value = True mock_ensure_ssl_cert_master.return_value = False self.relation_ids.return_value = ['identity-service:0'] self.related_units.return_value = ['unit/0'] @@ -274,11 +268,13 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') - @patch.object(hooks, 'send_ssl_sync_request') - @patch.object(hooks, 'is_db_initialised') - @patch.object(hooks, 'is_db_ready') - @patch.object(hooks, 'peer_units') @patch('keystone_utils.ensure_ssl_dirs') + @patch.object(hooks, 'ensure_pki_dir_permissions') + @patch.object(hooks, 'ensure_ssl_dir') + @patch.object(hooks, 'is_pki_enabled') + @patch.object(hooks, 'is_ssl_cert_master') + @patch.object(hooks, 'send_ssl_sync_request') + @patch.object(hooks, 'peer_units') @patch.object(hooks, 'admin_relation_changed') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -286,15 +282,25 @@ class KeystoneRelationTests(CharmTestCase): @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'configure_https') - def test_config_changed_no_openstack_upgrade_leader( - self, configure_https, identity_changed, - configs, get_homedir, ensure_user, cluster_joined, - admin_relation_changed, ensure_ssl_dirs, mock_peer_units, - mock_is_db_ready, mock_is_db_initialised, - mock_send_ssl_sync_request, - mock_ensure_ssl_cert_master, mock_log): - mock_is_db_initialised.return_value = True - mock_is_db_ready.return_value = True + def test_config_changed_no_upgrade_leader(self, configure_https, + identity_changed, + configs, get_homedir, + ensure_user, + cluster_joined, + admin_relation_changed, + mock_peer_units, + mock_send_ssl_sync_request, + mock_is_ssl_cert_master, + mock_is_pki_enabled, + mock_ensure_ssl_dir, + mock_ensure_pki_dir_permissions, + mock_ensure_ssl_dirs, + mock_ensure_ssl_cert_master, + mock_log): + mock_is_pki_enabled.return_value = True + mock_is_ssl_cert_master.return_value = True + self.is_db_initialised.return_value = True + self.is_db_ready.return_value = True self.openstack_upgrade_available.return_value = False self.is_elected_leader.return_value = True # avoid having to mock syncer @@ -322,17 +328,34 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.ensure_ssl_dirs') + @patch.object(hooks, 'update_all_identity_relation_units') + @patch.object(hooks, 'ensure_pki_dir_permissions') + @patch.object(hooks, 'ensure_ssl_dir') + @patch.object(hooks, 'is_pki_enabled') + @patch.object(hooks, 'peer_units') + @patch.object(hooks, 'is_ssl_cert_master') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @patch.object(unison, 'get_homedir') @patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'configure_https') - def test_config_changed_no_openstack_upgrade_not_leader( - self, configure_https, identity_changed, - configs, get_homedir, ensure_user, cluster_joined, - ensure_ssl_dirs, mock_ensure_ssl_cert_master, - mock_log): + def test_config_changed_no_upgrade_not_leader(self, configure_https, + identity_changed, + configs, get_homedir, + ensure_user, cluster_joined, + mock_is_ssl_cert_master, + mock_peer_units, + mock_is_pki_enabled, + mock_ensure_ssl_dir, + mock_ensure_pki_permissions, + mock_update_all_id_rel_units, + ensure_ssl_dirs, + mock_ensure_ssl_cert_master, + mock_log): + mock_is_pki_enabled.return_value = True + mock_is_ssl_cert_master.return_value = True + mock_peer_units.return_value = [] self.openstack_upgrade_available.return_value = False self.is_elected_leader.return_value = False mock_ensure_ssl_cert_master.return_value = False @@ -351,11 +374,13 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') - @patch.object(hooks, 'send_ssl_sync_request') - @patch.object(hooks, 'is_db_initialised') - @patch.object(hooks, 'is_db_ready') - @patch.object(hooks, 'peer_units') @patch('keystone_utils.ensure_ssl_dirs') + @patch.object(hooks, 'ensure_pki_dir_permissions') + @patch.object(hooks, 'ensure_ssl_dir') + @patch.object(hooks, 'is_pki_enabled') + @patch.object(hooks, 'is_ssl_cert_master') + @patch.object(hooks, 'send_ssl_sync_request') + @patch.object(hooks, 'peer_units') @patch.object(hooks, 'admin_relation_changed') @patch.object(hooks, 'cluster_joined') @patch.object(unison, 'ensure_user') @@ -368,15 +393,19 @@ class KeystoneRelationTests(CharmTestCase): configs, get_homedir, ensure_user, cluster_joined, admin_relation_changed, - ensure_ssl_dirs, mock_peer_units, - mock_is_db_ready, - mock_is_db_initialised, mock_send_ssl_sync_request, + mock_is_ssl_cert_master, + mock_is_pki_enabled, + mock_ensure_ssl_dir, + mock_ensure_pki_permissions, + mock_ensure_ssl_dirs, mock_ensure_ssl_cert_master, mock_log): - mock_is_db_ready.return_value = True - mock_is_db_initialised.return_value = True + mock_is_pki_enabled.return_value = True + mock_is_ssl_cert_master.return_value = True + self.is_db_ready.return_value = True + self.is_db_initialised.return_value = True self.openstack_upgrade_available.return_value = True self.is_elected_leader.return_value = True # avoid having to mock syncer @@ -403,18 +432,15 @@ class KeystoneRelationTests(CharmTestCase): remote_unit='unit/0') admin_relation_changed.assert_called_with('identity-service:0') - @patch.object(hooks, 'is_db_initialised') - @patch.object(hooks, 'is_db_ready') @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @patch.object(hooks, 'hashlib') @patch.object(hooks, 'send_notifications') def test_identity_changed_leader(self, mock_send_notifications, mock_hashlib, mock_ensure_ssl_cert_master, - mock_log, mock_is_db_ready, - mock_is_db_initialised): - mock_is_db_initialised.return_value = True - mock_is_db_ready.return_value = True + mock_log): + self.is_db_initialised.return_value = True + self.is_db_ready.return_value = True mock_ensure_ssl_cert_master.return_value = False hooks.identity_changed( relation_id='identity-service:0', @@ -450,9 +476,12 @@ class KeystoneRelationTests(CharmTestCase): user=self.ssh_user, group='juju_keystone', peer_interface='cluster', ensure_local_user=True) + @patch.object(hooks, 'update_all_identity_relation_units') + @patch.object(hooks, 'get_ssl_sync_request_units') @patch.object(hooks, 'is_ssl_cert_master') @patch.object(hooks, 'peer_units') @patch('keystone_utils.relation_ids') + @patch('keystone_utils.config') @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.synchronize_ca') @@ -462,13 +491,31 @@ class KeystoneRelationTests(CharmTestCase): def test_cluster_changed(self, configs, ssh_authorized_peers, check_peer_actions, mock_synchronize_ca, mock_ensure_ssl_cert_master, - mock_log, mock_relation_ids, mock_peer_units, - mock_is_ssl_cert_master): + mock_log, mock_config, mock_relation_ids, + mock_peer_units, + mock_is_ssl_cert_master, + mock_get_ssl_sync_request_units, + mock_update_all_identity_relation_units): + + relation_settings = {'foo_passwd': '123', + 'identity-service:16_foo': 'bar'} + mock_is_ssl_cert_master.return_value = False mock_peer_units.return_value = ['unit/0'] mock_ensure_ssl_cert_master.return_value = False mock_relation_ids.return_value = [] self.is_elected_leader.return_value = False + + def fake_rel_get(attribute=None, *args, **kwargs): + if not attribute: + return relation_settings + + return relation_settings.get(attribute) + + self.relation_get.side_effect = fake_rel_get + + mock_config.return_value = None + hooks.cluster_changed() whitelist = ['_passwd', 'identity-service:', 'ssl-cert-master', 'db-initialised', 'ssl-cert-available-updates'] @@ -572,18 +619,14 @@ class KeystoneRelationTests(CharmTestCase): @patch('keystone_utils.log') @patch('keystone_utils.ensure_ssl_cert_master') - @patch.object(hooks, 'is_db_ready') - @patch.object(hooks, 'is_db_initialised') @patch.object(hooks, 'identity_changed') @patch.object(hooks, 'CONFIGS') def test_ha_relation_changed_clustered_leader(self, configs, identity_changed, - mock_is_db_initialised, - mock_is_db_ready, mock_ensure_ssl_cert_master, mock_log): - mock_is_db_initialised.return_value = True - mock_is_db_ready.return_value = True + self.is_db_initialised.return_value = True + self.is_db_ready.return_value = True mock_ensure_ssl_cert_master.return_value = False self.relation_get.return_value = True self.is_elected_leader.return_value = True @@ -629,8 +672,6 @@ class KeystoneRelationTests(CharmTestCase): cmd = ['a2dissite', 'openstack_https_frontend'] self.check_call.assert_called_with(cmd) - @patch.object(hooks, 'is_db_ready') - @patch.object(hooks, 'is_db_initialised') @patch('keystone_utils.log') @patch('keystone_utils.relation_ids') @patch('keystone_utils.is_elected_leader') @@ -644,11 +685,9 @@ class KeystoneRelationTests(CharmTestCase): mock_ensure_ssl_cert_master, mock_is_elected_leader, mock_relation_ids, - mock_log, - mock_is_db_ready, - mock_is_db_initialised): - mock_is_db_initialised.return_value = True - mock_is_db_ready.return_value = True + mock_log): + self.is_db_initialised.return_value = True + self.is_db_ready.return_value = True mock_is_elected_leader.return_value = False mock_relation_ids.return_value = [] mock_ensure_ssl_cert_master.return_value = True diff --git a/unit_tests/test_keystone_utils.py b/unit_tests/test_keystone_utils.py index 4691d0fa..18ba81f9 100644 --- a/unit_tests/test_keystone_utils.py +++ b/unit_tests/test_keystone_utils.py @@ -179,18 +179,19 @@ class TestKeystoneUtils(CharmTestCase): self.assertTrue(self.https.called) self.assertTrue(self.create_role.called) - relation_data = {'auth_host': '10.10.10.10', - 'service_host': '10.10.10.10', - 'auth_protocol': 'https', + rel_only_data = {'auth_host': '10.10.10.10', + 'service_host': '10.10.10.10'} + relation_data = {'auth_protocol': 'https', 'service_protocol': 'https', 'auth_port': 80, 'service_port': 81, 'https_keystone': 'True', 'ca_cert': 'certificate', 'region': 'RegionOne'} - self.peer_store_and_set.assert_called_with( - relation_id=relation_id, - **relation_data) + self.relation_set.assert_called_with(relation_id=relation_id, + **rel_only_data) + self.peer_store_and_set.assert_called_with(relation_id=relation_id, + **relation_data) @patch.object(utils, 'ensure_valid_service') @patch.object(utils, 'add_endpoint') @@ -236,14 +237,15 @@ class TestKeystoneUtils(CharmTestCase): self.grant_role.assert_called_with('keystone', 'admin', 'tenant') self.create_role.assert_called_with('role1', 'keystone', 'tenant') + rel_only_data = {'auth_host': '10.0.0.3', + 'service_host': '10.0.0.3'} relation_data = {'admin_token': 'token', 'service_port': 81, 'auth_port': 80, 'service_username': 'keystone', 'service_password': 'password', 'service_tenant': 'tenant', 'https_keystone': '__null__', 'ssl_cert': '__null__', 'ssl_key': '__null__', - 'ca_cert': '__null__', 'auth_host': '10.0.0.3', - 'service_host': '10.0.0.3', + 'ca_cert': '__null__', 'auth_protocol': 'http', 'service_protocol': 'http', 'service_tenant_id': 'tenant_id'} @@ -254,9 +256,11 @@ class TestKeystoneUtils(CharmTestCase): else: filtered[k] = v - call1 = call(relation_id=relation_id, **filtered) - call2 = call(relation_id='cluster/0', **relation_data) - self.relation_set.assert_has_calls([call1, call2]) + call1 = call(relation_id=relation_id, **rel_only_data) + call2 = call(relation_id=relation_id, **filtered) + call3 = call(relation_id='cluster/0', **relation_data) + self.assertTrue(self.relation_set.called) + self.relation_set.assert_has_calls([call1, call2, call3]) @patch.object(utils, 'ensure_valid_service') @patch.object(utils, 'add_endpoint')