[hopem,r=]

Implement PKI token signing.

Closes-Bug: 1309667
This commit is contained in:
Edward Hope-Morley 2015-02-18 17:20:23 +00:00
commit d0993565ca
22 changed files with 1385 additions and 268 deletions

View File

@ -48,6 +48,9 @@ from charmhelpers.core.hookenv import (
from charmhelpers.core.decorators import ( from charmhelpers.core.decorators import (
retry_on_exception, retry_on_exception,
) )
from charmhelpers.core.strutils import (
bool_from_string,
)
class HAIncompleteConfig(Exception): class HAIncompleteConfig(Exception):
@ -164,7 +167,8 @@ def https():
. .
returns: boolean returns: boolean
''' '''
if config_get('use-https') == "yes": use_https = config_get('use-https')
if use_https and bool_from_string(use_https):
return True return True
if config_get('ssl_cert') and config_get('ssl_key'): if config_get('ssl_cert') and config_get('ssl_key'):
return True return True

View File

@ -71,16 +71,19 @@ class OpenStackAmuletDeployment(AmuletDeployment):
services.append(this_service) services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw'] 'ceph-osd', 'ceph-radosgw']
# Openstack subordinate charms do not expose an origin option as that
# is controlled by the principle
ignore = ['neutron-openvswitch']
if self.openstack: if self.openstack:
for svc in services: for svc in services:
if svc['name'] not in use_source: if svc['name'] not in use_source + ignore:
config = {'openstack-origin': self.openstack} config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config) self.d.configure(svc['name'], config)
if self.source: if self.source:
for svc in services: for svc in services:
if svc['name'] in use_source: if svc['name'] in use_source and svc['name'] not in ignore:
config = {'source': self.source} config = {'source': self.source}
self.d.configure(svc['name'], config) self.d.configure(svc['name'], config)

View File

@ -17,8 +17,6 @@
# You should have received a copy of the GNU Lesser General Public License # You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
from charmhelpers.fetch import apt_install, apt_update from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import log from charmhelpers.core.hookenv import log
@ -29,6 +27,8 @@ except ImportError:
apt_install('python-pip') apt_install('python-pip')
from pip import main as pip_execute from pip import main as pip_execute
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def parse_options(given, available): def parse_options(given, available):
"""Given a set of options, check if available""" """Given a set of options, check if available"""

View File

@ -17,11 +17,11 @@
# You should have received a copy of the GNU Lesser General Public License # You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import io import io
import os import os
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
class Fstab(io.FileIO): class Fstab(io.FileIO):
"""This class extends file in order to implement a file reader/writer """This class extends file in order to implement a file reader/writer

View File

@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False):
def write_file(path, content, owner='root', group='root', perms=0o444): def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a string""" """Create or overwrite a file with the contents of a byte string."""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid gid = grp.getgrnam(group).gr_gid
with open(path, 'w') as target: with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid) os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms) os.fchmod(target.fileno(), perms)
target.write(content) target.write(content)
@ -305,11 +305,11 @@ def restart_on_change(restart_map, stopstart=False):
ceph_client_changed function. ceph_client_changed function.
""" """
def wrap(f): def wrap(f):
def wrapped_f(*args): def wrapped_f(*args, **kwargs):
checksums = {} checksums = {}
for path in restart_map: for path in restart_map:
checksums[path] = file_hash(path) checksums[path] = file_hash(path)
f(*args) f(*args, **kwargs)
restarts = [] restarts = []
for path in restart_map: for path in restart_map:
if checksums[path] != file_hash(path): if checksums[path] != file_hash(path):
@ -361,7 +361,7 @@ def list_nics(nic_type):
ip_output = (line for line in ip_output if line) ip_output = (line for line in ip_output if line)
for line in ip_output: for line in ip_output:
if line.split()[1].startswith(int_type): if line.split()[1].startswith(int_type):
matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
if matched: if matched:
interface = matched.groups()[0] interface = matched.groups()[0]
else: else:

View File

@ -0,0 +1,42 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
def bool_from_string(value):
"""Interpret string value as boolean.
Returns True if value translates to True otherwise False.
"""
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
raise ValueError(msg)
value = value.strip().lower()
if value in ['y', 'yes', 'true', 't']:
return True
elif value in ['n', 'no', 'false', 'f']:
return False
msg = "Unable to interpret string value '%s' as boolean" % (value)
raise ValueError(msg)

View File

@ -17,8 +17,6 @@
# You should have received a copy of the GNU Lesser General Public License # You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import yaml import yaml
from subprocess import check_call from subprocess import check_call
@ -29,6 +27,8 @@ from charmhelpers.core.hookenv import (
ERROR, ERROR,
) )
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file): def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array """Creates a sysctl.conf file from a YAML associative array

View File

@ -21,7 +21,7 @@ from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root', def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None): perms=0o444, templates_dir=None, encoding='UTF-8'):
""" """
Render a template. Render a template.
@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root',
level=hookenv.ERROR) level=hookenv.ERROR)
raise e raise e
content = template.render(context) content = template.render(context)
host.mkdir(os.path.dirname(target), owner, group) host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content, owner, group, perms) host.write_file(target, content.encode(encoding), owner, group, perms)

View File

@ -0,0 +1,477 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
#
#
# Authors:
# Kapil Thangavelu <kapil.foss@gmail.com>
#
"""
Intro
-----
A simple way to store state in units. This provides a key value
storage with support for versioned, transactional operation,
and can calculate deltas from previous values to simplify unit logic
when processing changes.
Hook Integration
----------------
There are several extant frameworks for hook execution, including
- charmhelpers.core.hookenv.Hooks
- charmhelpers.core.services.ServiceManager
The storage classes are framework agnostic, one simple integration is
via the HookData contextmanager. It will record the current hook
execution environment (including relation data, config data, etc.),
setup a transaction and allow easy access to the changes from
previously seen values. One consequence of the integration is the
reservation of particular keys ('rels', 'unit', 'env', 'config',
'charm_revisions') for their respective values.
Here's a fully worked integration example using hookenv.Hooks::
from charmhelper.core import hookenv, unitdata
hook_data = unitdata.HookData()
db = unitdata.kv()
hooks = hookenv.Hooks()
@hooks.hook
def config_changed():
# Print all changes to configuration from previously seen
# values.
for changed, (prev, cur) in hook_data.conf.items():
print('config changed', changed,
'previous value', prev,
'current value', cur)
# Get some unit specific bookeeping
if not db.get('pkg_key'):
key = urllib.urlopen('https://example.com/pkg_key').read()
db.set('pkg_key', key)
# Directly access all charm config as a mapping.
conf = db.getrange('config', True)
# Directly access all relation data as a mapping
rels = db.getrange('rels', True)
if __name__ == '__main__':
with hook_data():
hook.execute()
A more basic integration is via the hook_scope context manager which simply
manages transaction scope (and records hook name, and timestamp)::
>>> from unitdata import kv
>>> db = kv()
>>> with db.hook_scope('install'):
... # do work, in transactional scope.
... db.set('x', 1)
>>> db.get('x')
1
Usage
-----
Values are automatically json de/serialized to preserve basic typing
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
Individual values can be manipulated via get/set::
>>> kv.set('y', True)
>>> kv.get('y')
True
# We can set complex values (dicts, lists) as a single key.
>>> kv.set('config', {'a': 1, 'b': True'})
# Also supports returning dictionaries as a record which
# provides attribute access.
>>> config = kv.get('config', record=True)
>>> config.b
True
Groups of keys can be manipulated with update/getrange::
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
>>> kv.getrange('gui.', strip=True)
{'z': 1, 'y': 2}
When updating values, its very helpful to understand which values
have actually changed and how have they changed. The storage
provides a delta method to provide for this::
>>> data = {'debug': True, 'option': 2}
>>> delta = kv.delta(data, 'config.')
>>> delta.debug.previous
None
>>> delta.debug.current
True
>>> delta
{'debug': (None, True), 'option': (None, 2)}
Note the delta method does not persist the actual change, it needs to
be explicitly saved via 'update' method::
>>> kv.update(data, 'config.')
Values modified in the context of a hook scope retain historical values
associated to the hookname.
>>> with db.hook_scope('config-changed'):
... db.set('x', 42)
>>> db.gethistory('x')
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
"""
import collections
import contextlib
import datetime
import json
import os
import pprint
import sqlite3
import sys
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
class Storage(object):
"""Simple key value database for local unit state within charms.
Modifications are automatically committed at hook exit. That's
currently regardless of exit code.
To support dicts, lists, integer, floats, and booleans values
are automatically json encoded/decoded.
"""
def __init__(self, path=None):
self.db_path = path
if path is None:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None
self._closed = False
self._init()
def close(self):
if self._closed:
return
self.flush(False)
self.cursor.close()
self.conn.close()
self._closed = True
def _scoped_query(self, stmt, params=None):
if params is None:
params = []
return stmt, params
def get(self, key, default=None, record=False):
self.cursor.execute(
*self._scoped_query(
'select data from kv where key=?', [key]))
result = self.cursor.fetchone()
if not result:
return default
if record:
return Record(json.loads(result[0]))
return json.loads(result[0])
def getrange(self, key_prefix, strip=False):
stmt = "select key, data from kv where key like '%s%%'" % key_prefix
self.cursor.execute(*self._scoped_query(stmt))
result = self.cursor.fetchall()
if not result:
return None
if not strip:
key_prefix = ''
return dict([
(k[len(key_prefix):], json.loads(v)) for k, v in result])
def update(self, mapping, prefix=""):
for k, v in mapping.items():
self.set("%s%s" % (prefix, k), v)
def unset(self, key):
self.cursor.execute('delete from kv where key=?', [key])
if self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)',
[key, self.revision, json.dumps('DELETED')])
def set(self, key, value):
serialized = json.dumps(value)
self.cursor.execute(
'select data from kv where key=?', [key])
exists = self.cursor.fetchone()
# Skip mutations to the same value
if exists:
if exists[0] == serialized:
return value
if not exists:
self.cursor.execute(
'insert into kv (key, data) values (?, ?)',
(key, serialized))
else:
self.cursor.execute('''
update kv
set data = ?
where key = ?''', [serialized, key])
# Save
if not self.revision:
return value
self.cursor.execute(
'select 1 from kv_revisions where key=? and revision=?',
[key, self.revision])
exists = self.cursor.fetchone()
if not exists:
self.cursor.execute(
'''insert into kv_revisions (
revision, key, data) values (?, ?, ?)''',
(self.revision, key, serialized))
else:
self.cursor.execute(
'''
update kv_revisions
set data = ?
where key = ?
and revision = ?''',
[serialized, key, self.revision])
return value
def delta(self, mapping, prefix):
"""
return a delta containing values that have changed.
"""
previous = self.getrange(prefix, strip=True)
if not previous:
pk = set()
else:
pk = set(previous.keys())
ck = set(mapping.keys())
delta = DeltaSet()
# added
for k in ck.difference(pk):
delta[k] = Delta(None, mapping[k])
# removed
for k in pk.difference(ck):
delta[k] = Delta(previous[k], None)
# changed
for k in pk.intersection(ck):
c = mapping[k]
p = previous[k]
if c != p:
delta[k] = Delta(p, c)
return delta
@contextlib.contextmanager
def hook_scope(self, name=""):
"""Scope all future interactions to the current hook execution
revision."""
assert not self.revision
self.cursor.execute(
'insert into hooks (hook, date) values (?, ?)',
(name or sys.argv[0],
datetime.datetime.utcnow().isoformat()))
self.revision = self.cursor.lastrowid
try:
yield self.revision
self.revision = None
except:
self.flush(False)
self.revision = None
raise
else:
self.flush()
def flush(self, save=True):
if save:
self.conn.commit()
elif self._closed:
return
else:
self.conn.rollback()
def _init(self):
self.cursor.execute('''
create table if not exists kv (
key text,
data text,
primary key (key)
)''')
self.cursor.execute('''
create table if not exists kv_revisions (
key text,
revision integer,
data text,
primary key (key, revision)
)''')
self.cursor.execute('''
create table if not exists hooks (
version integer primary key autoincrement,
hook text,
date text
)''')
self.conn.commit()
def gethistory(self, key, deserialize=False):
self.cursor.execute(
'''
select kv.revision, kv.key, kv.data, h.hook, h.date
from kv_revisions kv,
hooks h
where kv.key=?
and kv.revision = h.version
''', [key])
if deserialize is False:
return self.cursor.fetchall()
return map(_parse_history, self.cursor.fetchall())
def debug(self, fh=sys.stderr):
self.cursor.execute('select * from kv')
pprint.pprint(self.cursor.fetchall(), stream=fh)
self.cursor.execute('select * from kv_revisions')
pprint.pprint(self.cursor.fetchall(), stream=fh)
def _parse_history(d):
return (d[0], d[1], json.loads(d[2]), d[3],
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
class HookData(object):
"""Simple integration for existing hook exec frameworks.
Records all unit information, and stores deltas for processing
by the hook.
Sample::
from charmhelper.core import hookenv, unitdata
changes = unitdata.HookData()
db = unitdata.kv()
hooks = hookenv.Hooks()
@hooks.hook
def config_changed():
# View all changes to configuration
for changed, (prev, cur) in changes.conf.items():
print('config changed', changed,
'previous value', prev,
'current value', cur)
# Get some unit specific bookeeping
if not db.get('pkg_key'):
key = urllib.urlopen('https://example.com/pkg_key').read()
db.set('pkg_key', key)
if __name__ == '__main__':
with changes():
hook.execute()
"""
def __init__(self):
self.kv = kv()
self.conf = None
self.rels = None
@contextlib.contextmanager
def __call__(self):
from charmhelpers.core import hookenv
hook_name = hookenv.hook_name()
with self.kv.hook_scope(hook_name):
self._record_charm_version(hookenv.charm_dir())
delta_config, delta_relation = self._record_hook(hookenv)
yield self.kv, delta_config, delta_relation
def _record_charm_version(self, charm_dir):
# Record revisions.. charm revisions are meaningless
# to charm authors as they don't control the revision.
# so logic dependnent on revision is not particularly
# useful, however it is useful for debugging analysis.
charm_rev = open(
os.path.join(charm_dir, 'revision')).read().strip()
charm_rev = charm_rev or '0'
revs = self.kv.get('charm_revisions', [])
if charm_rev not in revs:
revs.append(charm_rev.strip() or '0')
self.kv.set('charm_revisions', revs)
def _record_hook(self, hookenv):
data = hookenv.execution_environment()
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
self.kv.set('env', data['env'])
self.kv.set('unit', data['unit'])
self.kv.set('relid', data.get('relid'))
return conf_delta, rels_delta
class Record(dict):
__slots__ = ()
def __getattr__(self, k):
if k in self:
return self[k]
raise AttributeError(k)
class DeltaSet(Record):
__slots__ = ()
Delta = collections.namedtuple('Delta', ['previous', 'current'])
_KV = None
def kv():
global _KV
if _KV is None:
_KV = Storage()
return _KV

View File

@ -18,6 +18,16 @@ import os
import hashlib import hashlib
import re import re
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.host import mkdir, check_hash
import six import six
if six.PY3: if six.PY3:
from urllib.request import ( from urllib.request import (
@ -35,16 +45,6 @@ else:
) )
from urlparse import urlparse, urlunparse, parse_qs from urlparse import urlparse, urlunparse, parse_qs
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.host import mkdir, check_hash
def splituser(host): def splituser(host):
'''urllib.splituser(), but six's support of this seems broken''' '''urllib.splituser(), but six's support of this seems broken'''

View File

@ -32,7 +32,7 @@ except ImportError:
apt_install("python-git") apt_install("python-git")
from git import Repo from git import Repo
from git.exc import GitCommandError from git.exc import GitCommandError # noqa E402
class GitUrlFetchHandler(BaseFetchHandler): class GitUrlFetchHandler(BaseFetchHandler):

View File

@ -18,9 +18,14 @@ from charmhelpers.contrib.hahelpers.cluster import (
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
DEBUG,
INFO, INFO,
) )
from charmhelpers.core.strutils import (
bool_from_string,
)
from charmhelpers.contrib.hahelpers.apache import install_ca_cert from charmhelpers.contrib.hahelpers.apache import install_ca_cert
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -169,9 +174,8 @@ class KeystoneContext(context.OSContextGenerator):
def __call__(self): def __call__(self):
from keystone_utils import ( from keystone_utils import (
api_port, set_admin_token, api_port, set_admin_token, endpoint_url, resolve_address,
endpoint_url, resolve_address, PUBLIC, ADMIN, PKI_CERTS_DIR, SSH_USER, ensure_permissions,
PUBLIC, ADMIN
) )
ctxt = {} ctxt = {}
ctxt['token'] = set_admin_token(config('admin-token')) ctxt['token'] = set_admin_token(config('admin-token'))
@ -179,8 +183,12 @@ class KeystoneContext(context.OSContextGenerator):
singlenode_mode=True) singlenode_mode=True)
ctxt['public_port'] = determine_api_port(api_port('keystone-public'), ctxt['public_port'] = determine_api_port(api_port('keystone-public'),
singlenode_mode=True) singlenode_mode=True)
ctxt['debug'] = config('debug') in ['yes', 'true', 'True']
ctxt['verbose'] = config('verbose') in ['yes', 'true', 'True'] debug = config('debug')
ctxt['debug'] = debug and bool_from_string(debug)
verbose = config('verbose')
ctxt['verbose'] = verbose and bool_from_string(verbose)
ctxt['identity_backend'] = config('identity-backend') ctxt['identity_backend'] = config('identity-backend')
ctxt['assignment_backend'] = config('assignment-backend') ctxt['assignment_backend'] = config('assignment-backend')
if config('identity-backend') == 'ldap': if config('identity-backend') == 'ldap':
@ -194,8 +202,37 @@ class KeystoneContext(context.OSContextGenerator):
flags = context.config_flags_parser(ldap_flags) flags = context.config_flags_parser(ldap_flags)
ctxt['ldap_config_flags'] = flags ctxt['ldap_config_flags'] = flags
if config('enable-pki') not in ['false', 'False', 'no', 'No']: enable_pki = config('enable-pki')
enable_pkiz = config('enable-pkiz')
if enable_pki and bool_from_string(enable_pki):
ctxt['signing'] = True ctxt['signing'] = True
ctxt['token_provider'] = 'pki'
elif enable_pkiz and bool_from_string(enable_pkiz):
ctxt['token_provider'] = 'pkiz'
if 'token_provider' in ctxt:
log("Configuring PKI token cert paths", level=DEBUG)
certs = os.path.join(PKI_CERTS_DIR, 'certs')
privates = os.path.join(PKI_CERTS_DIR, 'privates')
for path in [PKI_CERTS_DIR, certs, privates]:
perms = 0o755
if not os.path.isdir(path):
mkdir(path=path, owner=SSH_USER, group='keystone',
perms=perms)
else:
# Ensure accessible by ssh user and group (for sync).
ensure_permissions(path, user=SSH_USER,
group='keystone', perms=perms)
signing_paths = {'certfile': os.path.join(certs,
'signing_cert.pem'),
'keyfile': os.path.join(privates,
'signing_key.pem'),
'ca_certs': os.path.join(certs, 'ca.pem'),
'ca_key': os.path.join(certs, 'ca_key.pem')}
for key, val in signing_paths.iteritems():
ctxt[key] = val
# Base endpoint URL's which are used in keystone responses # Base endpoint URL's which are used in keystone responses
# to unauthenticated requests to redirect clients to the # to unauthenticated requests to redirect clients to the
@ -214,7 +251,7 @@ class KeystoneLoggingContext(context.OSContextGenerator):
def __call__(self): def __call__(self):
ctxt = {} ctxt = {}
debug = config('debug') debug = config('debug')
if debug and debug.lower() in ['yes', 'true']: if debug and bool_from_string(debug):
ctxt['root_level'] = 'DEBUG' ctxt['root_level'] = 'DEBUG'
return ctxt return ctxt

View File

@ -4,7 +4,6 @@ import json
import os import os
import stat import stat
import sys import sys
import time
from subprocess import check_call from subprocess import check_call
@ -18,6 +17,7 @@ from charmhelpers.core.hookenv import (
log, log,
local_unit, local_unit,
DEBUG, DEBUG,
INFO,
WARNING, WARNING,
ERROR, ERROR,
relation_get, relation_get,
@ -32,6 +32,10 @@ from charmhelpers.core.host import (
restart_on_change, restart_on_change,
) )
from charmhelpers.core.strutils import (
bool_from_string,
)
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_update, apt_install, apt_update,
filter_installed_packages filter_installed_packages
@ -64,9 +68,12 @@ from keystone_utils import (
CA_CERT_PATH, CA_CERT_PATH,
ensure_permissions, ensure_permissions,
get_ssl_sync_request_units, get_ssl_sync_request_units,
is_str_true,
is_ssl_cert_master, is_ssl_cert_master,
is_db_ready, is_db_ready,
clear_ssl_synced_units,
is_db_initialised,
is_pki_enabled,
ensure_pki_cert_permissions,
) )
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
@ -110,7 +117,7 @@ def install():
@hooks.hook('config-changed') @hooks.hook('config-changed')
@restart_on_change(restart_map()) @restart_on_change(restart_map())
@synchronize_ca_if_changed() @synchronize_ca_if_changed(fatal=True)
def config_changed(): def config_changed():
if config('prefer-ipv6'): if config('prefer-ipv6'):
setup_ipv6() setup_ipv6()
@ -138,9 +145,13 @@ def config_changed():
save_script_rc() save_script_rc()
configure_https() configure_https()
update_nrpe_config() update_nrpe_config()
CONFIGS.write_all() CONFIGS.write_all()
if is_pki_enabled():
initialise_pki()
# Update relations since SSL may have been configured. If we have peer # Update relations since SSL may have been configured. If we have peer
# units we can rely on the sync to do this in cluster relation. # units we can rely on the sync to do this in cluster relation.
if is_elected_leader(CLUSTER_RES) and not peer_units(): if is_elected_leader(CLUSTER_RES) and not peer_units():
@ -150,15 +161,28 @@ def config_changed():
admin_relation_changed(rid) admin_relation_changed(rid)
# Ensure sync request is sent out (needed for upgrade to ssl from non-ssl) # Ensure sync request is sent out (needed for upgrade to ssl from non-ssl)
settings = {} send_ssl_sync_request()
append_ssl_sync_request(settings)
if settings:
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, relation_settings=settings)
for r_id in relation_ids('ha'): for r_id in relation_ids('ha'):
ha_joined(relation_id=r_id) ha_joined(relation_id=r_id)
@synchronize_ca_if_changed(fatal=True)
def initialise_pki():
"""Create certs and keys required for PKI token signing.
NOTE: keystone.conf [signing] section must be up-to-date prior to
executing this.
"""
if is_ssl_cert_master():
log("Ensuring PKI token certs created", level=DEBUG)
cmd = ['keystone-manage', 'pki_setup', '--keystone-user', 'keystone',
'--keystone-group', 'keystone']
check_call(cmd)
ensure_pki_cert_permissions()
@hooks.hook('shared-db-relation-joined') @hooks.hook('shared-db-relation-joined')
def db_joined(): def db_joined():
if is_relation_made('pgsql-db'): if is_relation_made('pgsql-db'):
@ -189,19 +213,25 @@ def pgsql_db_joined():
relation_set(database=config('database')) relation_set(database=config('database'))
def update_all_identity_relation_units(): def update_all_identity_relation_units(check_db_ready=True):
CONFIGS.write_all() CONFIGS.write_all()
try: if check_db_ready and not is_db_ready():
migrate_database() log('Allowed_units list provided and this unit not present',
except Exception as exc: level=INFO)
log("Database initialisation failed (%s) - db not ready?" % (exc), return
level=WARNING)
else: if not is_db_initialised():
log("Database not yet initialised - deferring identity-relation "
"updates", level=INFO)
return
if is_elected_leader(CLUSTER_RES):
ensure_initial_admin(config) ensure_initial_admin(config)
log('Firing identity_changed hook for all related services.')
for rid in relation_ids('identity-service'): log('Firing identity_changed hook for all related services.')
for unit in related_units(rid): for rid in relation_ids('identity-service'):
identity_changed(relation_id=rid, remote_unit=unit) for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit)
@synchronize_ca_if_changed(force=True) @synchronize_ca_if_changed(force=True)
@ -222,11 +252,14 @@ def db_changed():
# units acl entry has been added. So, if the db supports passing # units acl entry has been added. So, if the db supports passing
# a list of permitted units then check if we're in the list. # a list of permitted units then check if we're in the list.
if not is_db_ready(use_current_context=True): if not is_db_ready(use_current_context=True):
log('Allowed_units list provided and this unit not present') log('Allowed_units list provided and this unit not present',
level=INFO)
return return
migrate_database()
# Ensure any existing service entries are updated in the # Ensure any existing service entries are updated in the
# new database backend # new database backend. Also avoid duplicate db ready check.
update_all_identity_relation_units() update_all_identity_relation_units(check_db_ready=False)
@hooks.hook('pgsql-db-relation-changed') @hooks.hook('pgsql-db-relation-changed')
@ -238,24 +271,35 @@ def pgsql_db_changed():
else: else:
CONFIGS.write(KEYSTONE_CONF) CONFIGS.write(KEYSTONE_CONF)
if is_elected_leader(CLUSTER_RES): if is_elected_leader(CLUSTER_RES):
if not is_db_ready(use_current_context=True):
log('Allowed_units list provided and this unit not present',
level=INFO)
return
migrate_database()
# Ensure any existing service entries are updated in the # Ensure any existing service entries are updated in the
# new database backend # new database backend. Also avoid duplicate db ready check.
update_all_identity_relation_units() update_all_identity_relation_units(check_db_ready=False)
@hooks.hook('identity-service-relation-changed') @hooks.hook('identity-service-relation-changed')
@restart_on_change(restart_map())
@synchronize_ca_if_changed() @synchronize_ca_if_changed()
def identity_changed(relation_id=None, remote_unit=None): def identity_changed(relation_id=None, remote_unit=None):
CONFIGS.write_all() CONFIGS.write_all()
notifications = {} notifications = {}
if is_elected_leader(CLUSTER_RES): if is_elected_leader(CLUSTER_RES):
if not is_db_ready(): if not is_db_ready():
log("identity-service-relation-changed hook fired before db " log("identity-service-relation-changed hook fired before db "
"ready - deferring until db ready", level=WARNING) "ready - deferring until db ready", level=WARNING)
return return
if not is_db_initialised():
log("Database not yet initialised - deferring identity-relation "
"updates", level=INFO)
return
add_service_to_keystone(relation_id, remote_unit) add_service_to_keystone(relation_id, remote_unit)
settings = relation_get(rid=relation_id, unit=remote_unit) settings = relation_get(rid=relation_id, unit=remote_unit)
service = settings.get('service', None) service = settings.get('service', None)
@ -283,15 +327,51 @@ def identity_changed(relation_id=None, remote_unit=None):
send_notifications(notifications) send_notifications(notifications)
def append_ssl_sync_request(settings): def send_ssl_sync_request():
"""Add request to be synced to relation settings. """Set sync request on cluster relation.
This will be consumed by cluster-relation-changed ssl master. Value set equals number of ssl configs currently enabled so that if they
change, we ensure that certs are synced. This setting is consumed by
cluster-relation-changed ssl master. We also clear the 'synced' set to
guarantee that a sync will occur.
Note the we do nothing if the setting is already applied.
""" """
if (is_str_true(config('use-https')) or unit = local_unit().replace('/', '-')
is_str_true(config('https-service-endpoints'))): count = 0
unit = local_unit().replace('/', '-')
settings['ssl-sync-required-%s' % (unit)] = '1' use_https = config('use-https')
if use_https and bool_from_string(use_https):
count += 1
https_service_endpoints = config('https-service-endpoints')
if (https_service_endpoints and
bool_from_string(https_service_endpoints)):
count += 2
enable_pki = config('enable-pki')
if enable_pki and bool_from_string(enable_pki):
count += 3
enable_pkiz = config('enable-pkiz')
if enable_pkiz and bool_from_string(enable_pkiz):
count += 4
if count:
key = 'ssl-sync-required-%s' % (unit)
settings = {key: count}
prev = 0
rid = None
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_prev = relation_get(rid=rid, unit=unit, attribute=key) or 0
if _prev and _prev > prev:
prev = _prev
if rid and prev < count:
clear_ssl_synced_units()
log("Setting %s=%s" % (key, count), level=DEBUG)
relation_set(relation_id=rid, relation_settings=settings)
@hooks.hook('cluster-relation-joined') @hooks.hook('cluster-relation-joined')
@ -314,9 +394,8 @@ def cluster_joined():
private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
settings['private-address'] = private_addr settings['private-address'] = private_addr
append_ssl_sync_request(settings)
relation_set(relation_settings=settings) relation_set(relation_settings=settings)
send_ssl_sync_request()
def apply_echo_filters(settings, echo_whitelist): def apply_echo_filters(settings, echo_whitelist):
@ -362,7 +441,7 @@ def cluster_changed():
# NOTE(jamespage) re-echo passwords for peer storage # NOTE(jamespage) re-echo passwords for peer storage
echo_whitelist, overrides = \ echo_whitelist, overrides = \
apply_echo_filters(settings, ['_passwd', 'identity-service:', apply_echo_filters(settings, ['_passwd', 'identity-service:',
'ssl-cert-master']) 'ssl-cert-master', 'db-initialised'])
log("Peer echo overrides: %s" % (overrides), level=DEBUG) log("Peer echo overrides: %s" % (overrides), level=DEBUG)
relation_set(**overrides) relation_set(**overrides)
if echo_whitelist: if echo_whitelist:
@ -379,6 +458,9 @@ def cluster_changed():
synced_units = json.loads(synced_units) synced_units = json.loads(synced_units)
diff = set(units).symmetric_difference(set(synced_units)) diff = set(units).symmetric_difference(set(synced_units))
if is_pki_enabled():
initialise_pki()
if units and (not synced_units or diff): if units and (not synced_units or diff):
log("New peers joined and need syncing - %s" % log("New peers joined and need syncing - %s" %
(', '.join(units)), level=DEBUG) (', '.join(units)), level=DEBUG)
@ -455,10 +537,8 @@ def ha_changed():
clustered = relation_get('clustered') clustered = relation_get('clustered')
if clustered and is_elected_leader(CLUSTER_RES): if clustered and is_elected_leader(CLUSTER_RES):
ensure_initial_admin(config)
log('Cluster configured, notifying other services and updating ' log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration') 'keystone endpoint configuration')
update_all_identity_relation_units() update_all_identity_relation_units()
@ -509,7 +589,6 @@ def upgrade_charm():
if is_elected_leader(CLUSTER_RES): if is_elected_leader(CLUSTER_RES):
log('Cluster leader - ensuring endpoint configuration is up to ' log('Cluster leader - ensuring endpoint configuration is up to '
'date', level=DEBUG) 'date', level=DEBUG)
time.sleep(10)
update_all_identity_relation_units() update_all_identity_relation_units()

View File

@ -113,15 +113,16 @@ CA_SINGLETON = []
def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT): def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
print 'Ensuring certificate authority exists at %s.' % ca_dir log('Ensuring certificate authority exists at %s.' % ca_dir, level=DEBUG)
if not os.path.exists(ca_dir): if not os.path.exists(ca_dir):
print 'Initializing new certificate authority at %s' % ca_dir log('Initializing new certificate authority at %s' % ca_dir,
level=DEBUG)
os.mkdir(ca_dir) os.mkdir(ca_dir)
for i in ['certs', 'crl', 'newcerts', 'private']: for i in ['certs', 'crl', 'newcerts', 'private']:
d = os.path.join(ca_dir, i) d = os.path.join(ca_dir, i)
if not os.path.exists(d): if not os.path.exists(d):
print 'Creating %s.' % d log('Creating %s.' % d, level=DEBUG)
os.mkdir(d) os.mkdir(d)
os.chmod(os.path.join(ca_dir, 'private'), 0o710) os.chmod(os.path.join(ca_dir, 'private'), 0o710)
@ -132,9 +133,11 @@ def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
if not os.path.isfile(os.path.join(ca_dir, 'index.txt')): if not os.path.isfile(os.path.join(ca_dir, 'index.txt')):
with open(os.path.join(ca_dir, 'index.txt'), 'wb') as out: with open(os.path.join(ca_dir, 'index.txt'), 'wb') as out:
out.write('') out.write('')
if not os.path.isfile(os.path.join(ca_dir, 'ca.cnf')):
print 'Creating new CA config in %s' % ca_dir conf = os.path.join(ca_dir, 'ca.cnf')
with open(os.path.join(ca_dir, 'ca.cnf'), 'wb') as out: if not os.path.isfile(conf):
log('Creating new CA config in %s' % ca_dir, level=DEBUG)
with open(conf, 'wb') as out:
out.write(CA_CONFIG % locals()) out.write(CA_CONFIG % locals())
@ -144,40 +147,42 @@ def root_ca_crt_key(ca_dir):
key = os.path.join(ca_dir, 'private', 'cacert.key') key = os.path.join(ca_dir, 'private', 'cacert.key')
for f in [crt, key]: for f in [crt, key]:
if not os.path.isfile(f): if not os.path.isfile(f):
print 'Missing %s, will re-initialize cert+key.' % f log('Missing %s, will re-initialize cert+key.' % f, level=DEBUG)
init = True init = True
else: else:
print 'Found %s.' % f log('Found %s.' % f, level=DEBUG)
if init: if init:
cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'), conf = os.path.join(ca_dir, 'ca.cnf')
cmd = ['openssl', 'req', '-config', conf,
'-x509', '-nodes', '-newkey', 'rsa', '-days', '21360', '-x509', '-nodes', '-newkey', 'rsa', '-days', '21360',
'-keyout', key, '-out', crt, '-outform', 'PEM'] '-keyout', key, '-out', crt, '-outform', 'PEM']
subprocess.check_call(cmd) subprocess.check_call(cmd)
return crt, key return crt, key
def intermediate_ca_csr_key(ca_dir): def intermediate_ca_csr_key(ca_dir):
print 'Creating new intermediate CSR.' log('Creating new intermediate CSR.', level=DEBUG)
key = os.path.join(ca_dir, 'private', 'cacert.key') key = os.path.join(ca_dir, 'private', 'cacert.key')
csr = os.path.join(ca_dir, 'cacert.csr') csr = os.path.join(ca_dir, 'cacert.csr')
cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'), conf = os.path.join(ca_dir, 'ca.cnf')
'-sha1', '-newkey', 'rsa', '-nodes', '-keyout', key, '-out', cmd = ['openssl', 'req', '-config', conf, '-sha1', '-newkey', 'rsa',
csr, '-outform', '-nodes', '-keyout', key, '-out', csr, '-outform', 'PEM']
'PEM']
subprocess.check_call(cmd) subprocess.check_call(cmd)
return csr, key return csr, key
def sign_int_csr(ca_dir, csr, common_name): def sign_int_csr(ca_dir, csr, common_name):
print 'Signing certificate request %s.' % csr log('Signing certificate request %s.' % csr, level=DEBUG)
crt = os.path.join(ca_dir, 'certs', crt_name = os.path.basename(csr).split('.')[0]
'%s.crt' % os.path.basename(csr).split('.')[0]) crt = os.path.join(ca_dir, 'certs', '%s.crt' % crt_name)
subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name) subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
cmd = ['openssl', 'ca', '-batch', '-config', conf = os.path.join(ca_dir, 'ca.cnf')
os.path.join(ca_dir, 'ca.cnf'), cmd = ['openssl', 'ca', '-batch', '-config', conf, '-extensions',
'-extensions', 'ca_extensions', '-days', CA_EXPIRY, '-notext', 'ca_extensions', '-days', CA_EXPIRY, '-notext', '-in', csr, '-out',
'-in', csr, '-out', crt, '-subj', subj, '-batch'] crt, '-subj', subj, '-batch']
print ' '.join(cmd) log("Executing: %s" % ' '.join(cmd), level=DEBUG)
subprocess.check_call(cmd) subprocess.check_call(cmd)
return crt return crt
@ -187,19 +192,20 @@ def init_root_ca(ca_dir, common_name):
return root_ca_crt_key(ca_dir) return root_ca_crt_key(ca_dir)
def init_intermediate_ca(ca_dir, common_name, root_ca_dir, def init_intermediate_ca(ca_dir, common_name, root_ca_dir, org_name=ORG_NAME,
org_name=ORG_NAME, org_unit_name=ORG_UNIT): org_unit_name=ORG_UNIT):
init_ca(ca_dir, common_name) init_ca(ca_dir, common_name)
if not os.path.isfile(os.path.join(ca_dir, 'cacert.pem')): if not os.path.isfile(os.path.join(ca_dir, 'cacert.pem')):
csr, key = intermediate_ca_csr_key(ca_dir) csr, key = intermediate_ca_csr_key(ca_dir)
crt = sign_int_csr(root_ca_dir, csr, common_name) crt = sign_int_csr(root_ca_dir, csr, common_name)
shutil.copy(crt, os.path.join(ca_dir, 'cacert.pem')) shutil.copy(crt, os.path.join(ca_dir, 'cacert.pem'))
else: else:
print 'Intermediate CA certificate already exists.' log('Intermediate CA certificate already exists.', level=DEBUG)
if not os.path.isfile(os.path.join(ca_dir, 'signing.cnf')): conf = os.path.join(ca_dir, 'signing.cnf')
print 'Creating new signing config in %s' % ca_dir if not os.path.isfile(conf):
with open(os.path.join(ca_dir, 'signing.cnf'), 'wb') as out: log('Creating new signing config in %s' % ca_dir, level=DEBUG)
with open(conf, 'wb') as out:
out.write(SIGNING_CONFIG % locals()) out.write(SIGNING_CONFIG % locals())
@ -212,7 +218,7 @@ def create_certificate(ca_dir, service):
key, '-out', csr, '-subj', subj] key, '-out', csr, '-subj', subj]
subprocess.check_call(cmd) subprocess.check_call(cmd)
crt = sign_int_csr(ca_dir, csr, common_name) crt = sign_int_csr(ca_dir, csr, common_name)
print 'Signed new CSR, crt @ %s' % crt log('Signed new CSR, crt @ %s' % crt, level=DEBUG)
return return
@ -221,13 +227,14 @@ def update_bundle(bundle_file, new_bundle):
if os.path.isfile(bundle_file): if os.path.isfile(bundle_file):
current = open(bundle_file, 'r').read().strip() current = open(bundle_file, 'r').read().strip()
if new_bundle == current: if new_bundle == current:
print 'CA Bundle @ %s is up to date.' % bundle_file log('CA Bundle @ %s is up to date.' % bundle_file, level=DEBUG)
return return
else:
print 'Updating CA bundle @ %s.' % bundle_file log('Updating CA bundle @ %s.' % bundle_file, level=DEBUG)
with open(bundle_file, 'wb') as out: with open(bundle_file, 'wb') as out:
out.write(new_bundle) out.write(new_bundle)
subprocess.check_call(['update-ca-certificates']) subprocess.check_call(['update-ca-certificates'])
@ -250,15 +257,19 @@ def tar_directory(path):
class JujuCA(object): class JujuCA(object):
def __init__(self, name, ca_dir, root_ca_dir, user, group): def __init__(self, name, ca_dir, root_ca_dir, user, group):
root_crt, root_key = init_root_ca(root_ca_dir, # Root CA
'%s Certificate Authority' % name) cn = '%s Certificate Authority' % name
init_intermediate_ca(ca_dir, root_crt, root_key = init_root_ca(root_ca_dir, cn)
'%s Intermediate Certificate Authority' % name, # Intermediate CA
root_ca_dir) cn = '%s Intermediate Certificate Authority' % name
init_intermediate_ca(ca_dir, cn, root_ca_dir)
# Create dirs
cmd = ['chown', '-R', '%s.%s' % (user, group), ca_dir] cmd = ['chown', '-R', '%s.%s' % (user, group), ca_dir]
subprocess.check_call(cmd) subprocess.check_call(cmd)
cmd = ['chown', '-R', '%s.%s' % (user, group), root_ca_dir] cmd = ['chown', '-R', '%s.%s' % (user, group), root_ca_dir]
subprocess.check_call(cmd) subprocess.check_call(cmd)
self.ca_dir = ca_dir self.ca_dir = ca_dir
self.root_ca_dir = root_ca_dir self.root_ca_dir = root_ca_dir
self.user = user self.user = user
@ -268,8 +279,8 @@ class JujuCA(object):
def _sign_csr(self, csr, service, common_name): def _sign_csr(self, csr, service, common_name):
subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name) subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name) crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name)
cmd = ['openssl', 'ca', '-config', conf = os.path.join(self.ca_dir, 'signing.cnf')
os.path.join(self.ca_dir, 'signing.cnf'), '-extensions', cmd = ['openssl', 'ca', '-config', conf, '-extensions',
'req_extensions', '-days', '365', '-notext', '-in', csr, 'req_extensions', '-days', '365', '-notext', '-in', csr,
'-out', crt, '-batch', '-subj', subj] '-out', crt, '-batch', '-subj', subj]
subprocess.check_call(cmd) subprocess.check_call(cmd)
@ -288,10 +299,16 @@ class JujuCA(object):
log('Signed new CSR, crt @ %s' % crt, level=DEBUG) log('Signed new CSR, crt @ %s' % crt, level=DEBUG)
return crt, key return crt, key
def get_key_path(self, cn):
return os.path.join(self.ca_dir, 'certs', '%s.key' % cn)
def get_cert_path(self, cn):
return os.path.join(self.ca_dir, 'certs', '%s.crt' % cn)
def get_cert_and_key(self, common_name): def get_cert_and_key(self, common_name):
log('Getting certificate and key for %s.' % common_name, level=DEBUG) log('Getting certificate and key for %s.' % common_name, level=DEBUG)
keypath = os.path.join(self.ca_dir, 'certs', '%s.key' % common_name) keypath = self.get_key_path(common_name)
crtpath = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name) crtpath = self.get_cert_path(common_name)
if os.path.isfile(crtpath): if os.path.isfile(crtpath):
log('Found existing certificate for %s.' % common_name, log('Found existing certificate for %s.' % common_name,
level=DEBUG) level=DEBUG)
@ -324,8 +341,24 @@ class JujuCA(object):
crt, key = self._create_certificate(common_name, common_name) crt, key = self._create_certificate(common_name, common_name)
return open(crt, 'r').read(), open(key, 'r').read() return open(crt, 'r').read(), open(key, 'r').read()
@property
def ca_cert_path(self):
return os.path.join(self.ca_dir, 'cacert.pem')
@property
def ca_key_path(self):
return os.path.join(self.ca_dir, 'private', 'cacert.key')
@property
def root_ca_cert_path(self):
return os.path.join(self.root_ca_dir, 'cacert.pem')
@property
def root_ca_key_path(self):
return os.path.join(self.root_ca_dir, 'private', 'cacert.key')
def get_ca_bundle(self): def get_ca_bundle(self):
int_cert = open(os.path.join(self.ca_dir, 'cacert.pem')).read() int_cert = open(self.ca_cert_path).read()
root_cert = open(os.path.join(self.root_ca_dir, 'cacert.pem')).read() root_cert = open(self.root_ca_cert_path).read()
# NOTE: ordering of certs in bundle matters! # NOTE: ordering of certs in bundle matters!
return int_cert + root_cert return int_cert + root_cert

View File

@ -21,7 +21,6 @@ from charmhelpers.contrib.hahelpers.cluster import(
determine_api_port, determine_api_port,
https, https,
peer_units, peer_units,
oldest_peer,
) )
from charmhelpers.contrib.openstack import context, templating from charmhelpers.contrib.openstack import context, templating
@ -49,6 +48,10 @@ from charmhelpers.core.host import (
write_file, write_file,
) )
from charmhelpers.core.strutils import (
bool_from_string,
)
import charmhelpers.contrib.unison as unison import charmhelpers.contrib.unison as unison
from charmhelpers.core.decorators import ( from charmhelpers.core.decorators import (
@ -134,10 +137,13 @@ APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
APACHE_SSL_DIR = '/etc/apache2/ssl/keystone' APACHE_SSL_DIR = '/etc/apache2/ssl/keystone'
SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/' SYNC_FLAGS_DIR = '/var/lib/keystone/juju_sync_flags/'
SSL_DIR = '/var/lib/keystone/juju_ssl/' SSL_DIR = '/var/lib/keystone/juju_ssl/'
PKI_CERTS_DIR = os.path.join(SSL_DIR, 'pki')
SSL_CA_NAME = 'Ubuntu Cloud' SSL_CA_NAME = 'Ubuntu Cloud'
CLUSTER_RES = 'grp_ks_vips' CLUSTER_RES = 'grp_ks_vips'
SSH_USER = 'juju_keystone' SSH_USER = 'juju_keystone'
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
SSL_SYNC_SEMAPHORE = threading.Semaphore() SSL_SYNC_SEMAPHORE = threading.Semaphore()
SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]
BASE_RESOURCE_MAP = OrderedDict([ BASE_RESOURCE_MAP = OrderedDict([
(KEYSTONE_CONF, { (KEYSTONE_CONF, {
@ -169,8 +175,6 @@ BASE_RESOURCE_MAP = OrderedDict([
}), }),
]) ])
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
valid_services = { valid_services = {
"nova": { "nova": {
"type": "compute", "type": "compute",
@ -227,18 +231,18 @@ valid_services = {
} }
def is_str_true(value): def ensure_pki_cert_permissions():
if value and value.lower() in ['true', 'yes']: perms = 0o755
return True # Ensure accessible by unison user and group (for sync).
for path in glob.glob("%s/*" % PKI_CERTS_DIR):
return False ensure_permissions(path, user=SSH_USER, group='keystone', perms=perms,
recurse=True)
def resource_map(): def resource_map():
''' """Dynamically generate a map of resources that will be managed for a
Dynamically generate a map of resources that will be managed for a single single hook execution.
hook execution. """
'''
resource_map = deepcopy(BASE_RESOURCE_MAP) resource_map = deepcopy(BASE_RESOURCE_MAP)
if os.path.exists('/etc/apache2/conf-available'): if os.path.exists('/etc/apache2/conf-available'):
@ -264,7 +268,7 @@ def restart_map():
def services(): def services():
''' Returns a list of services associate with this charm ''' """Returns a list of services associate with this charm"""
_services = [] _services = []
for v in restart_map().values(): for v in restart_map().values():
_services = _services + v _services = _services + v
@ -272,7 +276,7 @@ def services():
def determine_ports(): def determine_ports():
'''Assemble a list of API ports for services we are managing''' """Assemble a list of API ports for services we are managing"""
ports = [config('admin-port'), config('service-port')] ports = [config('admin-port'), config('service-port')]
return list(set(ports)) return list(set(ports))
@ -319,11 +323,36 @@ def do_openstack_upgrade(configs):
configs.write_all() configs.write_all()
if is_elected_leader(CLUSTER_RES): if is_elected_leader(CLUSTER_RES):
migrate_database() if is_db_ready():
migrate_database()
else:
log("Database not ready - deferring to shared-db relation",
level=INFO)
return
def set_db_initialised():
for rid in relation_ids('cluster'):
relation_set(relation_settings={'db-initialised': 'True'},
relation_id=rid)
def is_db_initialised():
for rid in relation_ids('cluster'):
units = related_units(rid) + [local_unit()]
for unit in units:
db_initialised = relation_get(attribute='db-initialised',
unit=unit, rid=rid)
if db_initialised:
log("Database is initialised", level=DEBUG)
return True
log("Database is NOT initialised", level=DEBUG)
return False
def migrate_database(): def migrate_database():
'''Runs keystone-manage to initialize a new database or migrate existing''' """Runs keystone-manage to initialize a new database or migrate existing"""
log('Migrating the keystone database.', level=INFO) log('Migrating the keystone database.', level=INFO)
service_stop('keystone') service_stop('keystone')
# NOTE(jamespage) > icehouse creates a log file as root so use # NOTE(jamespage) > icehouse creates a log file as root so use
@ -333,12 +362,13 @@ def migrate_database():
subprocess.check_output(cmd) subprocess.check_output(cmd)
service_start('keystone') service_start('keystone')
time.sleep(10) time.sleep(10)
set_db_initialised()
# OLD # OLD
def get_local_endpoint(): def get_local_endpoint():
""" Returns the URL for the local end-point bypassing haproxy/ssl """ """Returns the URL for the local end-point bypassing haproxy/ssl"""
if config('prefer-ipv6'): if config('prefer-ipv6'):
ipv6_addr = get_ipv6_addr(exc_list=[config('vip')])[0] ipv6_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
endpoint_url = 'http://[%s]:{}/v2.0/' % ipv6_addr endpoint_url = 'http://[%s]:{}/v2.0/' % ipv6_addr
@ -439,7 +469,7 @@ def create_endpoint_template(region, service, publicurl, adminurl,
def create_tenant(name): def create_tenant(name):
""" creates a tenant if it does not already exist """ """Creates a tenant if it does not already exist"""
import manager import manager
manager = manager.KeystoneManager(endpoint=get_local_endpoint(), manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
token=get_admin_token()) token=get_admin_token())
@ -453,7 +483,7 @@ def create_tenant(name):
def create_user(name, password, tenant): def create_user(name, password, tenant):
""" creates a user if it doesn't already exist, as a member of tenant """ """Creates a user if it doesn't already exist, as a member of tenant"""
import manager import manager
manager = manager.KeystoneManager(endpoint=get_local_endpoint(), manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
token=get_admin_token()) token=get_admin_token())
@ -472,7 +502,7 @@ def create_user(name, password, tenant):
def create_role(name, user=None, tenant=None): def create_role(name, user=None, tenant=None):
""" creates a role if it doesn't already exist. grants role to user """ """Creates a role if it doesn't already exist. grants role to user"""
import manager import manager
manager = manager.KeystoneManager(endpoint=get_local_endpoint(), manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
token=get_admin_token()) token=get_admin_token())
@ -499,7 +529,7 @@ def create_role(name, user=None, tenant=None):
def grant_role(user, role, tenant): def grant_role(user, role, tenant):
"""grant user+tenant a specific role""" """Grant user and tenant a specific role"""
import manager import manager
manager = manager.KeystoneManager(endpoint=get_local_endpoint(), manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
token=get_admin_token()) token=get_admin_token())
@ -646,7 +676,7 @@ def load_stored_passwords(path=SERVICE_PASSWD_PATH):
def _migrate_service_passwords(): def _migrate_service_passwords():
''' Migrate on-disk service passwords to peer storage ''' """Migrate on-disk service passwords to peer storage"""
if os.path.exists(SERVICE_PASSWD_PATH): if os.path.exists(SERVICE_PASSWD_PATH):
log('Migrating on-disk stored passwords to peer storage') log('Migrating on-disk stored passwords to peer storage')
creds = load_stored_passwords() creds = load_stored_passwords()
@ -666,11 +696,25 @@ def get_service_password(service_username):
return passwd return passwd
def ensure_permissions(path, user=None, group=None, perms=None): def ensure_permissions(path, user=None, group=None, perms=None, recurse=False,
maxdepth=50):
"""Set chownand chmod for path """Set chownand chmod for path
Note that -1 for uid or gid result in no change. Note that -1 for uid or gid result in no change.
""" """
if recurse:
if not maxdepth:
log("Max recursion depth reached - skipping further recursion")
return
paths = glob.glob("%s/*" % (path))
if len(paths) > 1:
for path in paths:
ensure_permissions(path, user=user, group=group, perms=perms,
recurse=recurse, maxdepth=maxdepth - 1)
return
if user: if user:
uid = pwd.getpwnam(user).pw_uid uid = pwd.getpwnam(user).pw_uid
else: else:
@ -764,14 +808,27 @@ def create_peer_actions(actions):
def unison_sync(paths_to_sync): def unison_sync(paths_to_sync):
"""Do unison sync and retry a few times if it fails since peers may not be """Do unison sync and retry a few times if it fails since peers may not be
ready for sync. ready for sync.
Returns list of synced units or None if one or more peers was not synced.
""" """
log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)), log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),
level=INFO) level=INFO)
keystone_gid = grp.getgrnam('keystone').gr_gid keystone_gid = grp.getgrnam('keystone').gr_gid
# NOTE(dosaboy): This will sync to all peers who have already provided
# their ssh keys. If any existing peers have not provided their keys yet,
# they will be silently ignored.
unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync, unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,
user=SSH_USER, verbose=True, gid=keystone_gid, user=SSH_USER, verbose=True, gid=keystone_gid,
fatal=True) fatal=True)
synced_units = peer_units()
if len(unison.collect_authed_hosts('cluster')) != len(synced_units):
log("Not all peer units synced due to missing public keys", level=INFO)
return None
else:
return synced_units
def get_ssl_sync_request_units(): def get_ssl_sync_request_units():
"""Get list of units that have requested to be synced. """Get list of units that have requested to be synced.
@ -791,20 +848,31 @@ def get_ssl_sync_request_units():
return units return units
def is_ssl_cert_master(): def is_ssl_cert_master(votes=None):
"""Return True if this unit is ssl cert master.""" """Return True if this unit is ssl cert master."""
master = None master = None
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
master = relation_get(attribute='ssl-cert-master', rid=rid, master = relation_get(attribute='ssl-cert-master', rid=rid,
unit=local_unit()) unit=local_unit())
return master == local_unit() if master == local_unit():
votes = votes or get_ssl_cert_master_votes()
if not peer_units() or (len(votes) == 1 and master in votes):
return True
log("Did not get consensus from peers on who is ssl-cert-master "
"(%s)" % (votes), level=INFO)
return False
def is_ssl_enabled(): def is_ssl_enabled():
# Don't do anything if we are not in ssl/https mode use_https = config('use-https')
if (is_str_true(config('use-https')) or https_service_endpoints = config('https-service-endpoints')
is_str_true(config('https-service-endpoints'))): if ((use_https and bool_from_string(use_https)) or
(https_service_endpoints and
bool_from_string(https_service_endpoints)) or
is_pki_enabled()):
log("SSL/HTTPS is enabled", level=DEBUG) log("SSL/HTTPS is enabled", level=DEBUG)
return True return True
@ -812,7 +880,21 @@ def is_ssl_enabled():
return True return True
def ensure_ssl_cert_master(use_oldest_peer=False): def get_ssl_cert_master_votes():
"""Returns a list of unique votes."""
votes = []
# Gather election results from peers. These will need to be consistent.
for rid in relation_ids('cluster'):
for unit in related_units(rid):
m = relation_get(rid=rid, unit=unit,
attribute='ssl-cert-master')
if m is not None:
votes.append(m)
return list(set(votes))
def ensure_ssl_cert_master():
"""Ensure that an ssl cert master has been elected. """Ensure that an ssl cert master has been elected.
Normally the cluster leader will take control but we allow for this to be Normally the cluster leader will take control but we allow for this to be
@ -822,31 +904,19 @@ def ensure_ssl_cert_master(use_oldest_peer=False):
if not is_ssl_enabled(): if not is_ssl_enabled():
return False return False
elect = False
peers = peer_units()
master_override = False master_override = False
if use_oldest_peer: elect = is_elected_leader(CLUSTER_RES)
elect = oldest_peer(peers)
else:
elect = is_elected_leader(CLUSTER_RES)
# If no peers we allow this unit to elect itsef as master and do # If no peers we allow this unit to elect itsef as master and do
# sync immediately. # sync immediately.
if not peers and not is_ssl_cert_master(): if not peer_units():
elect = True elect = True
master_override = True master_override = True
if elect: if elect:
masters = [] votes = get_ssl_cert_master_votes()
for rid in relation_ids('cluster'):
for unit in related_units(rid):
m = relation_get(rid=rid, unit=unit,
attribute='ssl-cert-master')
if m is not None:
masters.append(m)
# We expect all peers to echo this setting # We expect all peers to echo this setting
if not masters or 'unknown' in masters: if not votes or 'unknown' in votes:
log("Notifying peers this unit is ssl-cert-master", level=INFO) log("Notifying peers this unit is ssl-cert-master", level=INFO)
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
settings = {'ssl-cert-master': local_unit()} settings = {'ssl-cert-master': local_unit()}
@ -855,10 +925,11 @@ def ensure_ssl_cert_master(use_oldest_peer=False):
# Return now and wait for cluster-relation-changed (peer_echo) for # Return now and wait for cluster-relation-changed (peer_echo) for
# sync. # sync.
return master_override return master_override
elif len(set(masters)) != 1 and local_unit() not in masters: elif not is_ssl_cert_master(votes):
log("Did not get consensus from peers on who is ssl-cert-master " if not master_override:
"(%s) - waiting for current master to release before " log("Conscensus not reached - current master will need to "
"self-electing" % (masters), level=INFO) "release", level=INFO)
return master_override return master_override
if not is_ssl_cert_master(): if not is_ssl_cert_master():
@ -868,6 +939,16 @@ def ensure_ssl_cert_master(use_oldest_peer=False):
return True return True
def is_pki_enabled():
enable_pki = config('enable-pki')
enable_pkiz = config('enable-pkiz')
if (enable_pki and bool_from_string(enable_pki) or
enable_pkiz and bool_from_string(enable_pkiz)):
return True
return False
def synchronize_ca(fatal=False): def synchronize_ca(fatal=False):
"""Broadcast service credentials to peers. """Broadcast service credentials to peers.
@ -883,19 +964,26 @@ def synchronize_ca(fatal=False):
""" """
paths_to_sync = [SYNC_FLAGS_DIR] paths_to_sync = [SYNC_FLAGS_DIR]
if is_str_true(config('https-service-endpoints')): if bool_from_string(config('https-service-endpoints')):
log("Syncing all endpoint certs since https-service-endpoints=True", log("Syncing all endpoint certs since https-service-endpoints=True",
level=DEBUG) level=DEBUG)
paths_to_sync.append(SSL_DIR) paths_to_sync.append(SSL_DIR)
paths_to_sync.append(APACHE_SSL_DIR)
paths_to_sync.append(CA_CERT_PATH) paths_to_sync.append(CA_CERT_PATH)
elif is_str_true(config('use-https')):
if bool_from_string(config('use-https')):
log("Syncing keystone-endpoint certs since use-https=True", log("Syncing keystone-endpoint certs since use-https=True",
level=DEBUG) level=DEBUG)
paths_to_sync.append(SSL_DIR) paths_to_sync.append(SSL_DIR)
paths_to_sync.append(APACHE_SSL_DIR) paths_to_sync.append(APACHE_SSL_DIR)
paths_to_sync.append(CA_CERT_PATH) paths_to_sync.append(CA_CERT_PATH)
if is_pki_enabled():
log("Syncing token certs", level=DEBUG)
paths_to_sync.append(PKI_CERTS_DIR)
# Ensure unique
paths_to_sync = list(set(paths_to_sync))
if not paths_to_sync: if not paths_to_sync:
log("Nothing to sync - skipping", level=DEBUG) log("Nothing to sync - skipping", level=DEBUG)
return {} return {}
@ -908,8 +996,7 @@ def synchronize_ca(fatal=False):
create_peer_service_actions('restart', ['apache2']) create_peer_service_actions('restart', ['apache2'])
create_peer_actions(['update-ca-certificates']) create_peer_actions(['update-ca-certificates'])
# Format here needs to match that used when peers request sync cluster_rel_settings = {}
synced_units = [unit.replace('/', '-') for unit in peer_units()]
retries = 3 retries = 3
while True: while True:
@ -918,12 +1005,17 @@ def synchronize_ca(fatal=False):
update_hash_from_path(hash1, path) update_hash_from_path(hash1, path)
try: try:
unison_sync(paths_to_sync) synced_units = unison_sync(paths_to_sync)
except: if synced_units:
# Format here needs to match that used when peers request sync
synced_units = [u.replace('/', '-') for u in synced_units]
cluster_rel_settings['ssl-synced-units'] = \
json.dumps(synced_units)
except Exception as exc:
if fatal: if fatal:
raise raise
else: else:
log("Sync failed but fatal=False", level=INFO) log("Sync failed but fatal=False - %s" % (exc), level=INFO)
return {} return {}
hash2 = hashlib.sha256() hash2 = hashlib.sha256()
@ -947,10 +1039,22 @@ def synchronize_ca(fatal=False):
hash = hash1.hexdigest() hash = hash1.hexdigest()
log("Sending restart-services-trigger=%s to all peers" % (hash), log("Sending restart-services-trigger=%s to all peers" % (hash),
level=DEBUG) level=DEBUG)
cluster_rel_settings['restart-services-trigger'] = hash
log("Sync complete", level=DEBUG) log("Sync complete", level=DEBUG)
return {'restart-services-trigger': hash, return cluster_rel_settings
'ssl-synced-units': json.dumps(synced_units)}
def clear_ssl_synced_units():
"""Clear the 'synced' units record on the cluster relation.
If new unit sync reauests are set this will ensure that a sync occurs when
the sync master receives the requests.
"""
log("Clearing ssl sync units", level=DEBUG)
for rid in relation_ids('cluster'):
relation_set(relation_id=rid,
relation_settings={'ssl-synced-units': None})
def update_hash_from_path(hash, path, recurse_depth=10): def update_hash_from_path(hash, path, recurse_depth=10):
@ -992,16 +1096,14 @@ def synchronize_ca_if_changed(force=False, fatal=False):
peer_settings = {} peer_settings = {}
if not force: if not force:
ssl_dirs = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH]
hash1 = hashlib.sha256() hash1 = hashlib.sha256()
for path in ssl_dirs: for path in SSL_DIRS:
update_hash_from_path(hash1, path) update_hash_from_path(hash1, path)
ret = f(*args, **kwargs) ret = f(*args, **kwargs)
hash2 = hashlib.sha256() hash2 = hashlib.sha256()
for path in ssl_dirs: for path in SSL_DIRS:
update_hash_from_path(hash2, path) update_hash_from_path(hash2, path)
if hash1.hexdigest() != hash2.hexdigest(): if hash1.hexdigest() != hash2.hexdigest():
@ -1037,13 +1139,18 @@ def synchronize_ca_if_changed(force=False, fatal=False):
def get_ca(user='keystone', group='keystone'): def get_ca(user='keystone', group='keystone'):
""" """Initialize a new CA object if one hasn't already been loaded.
Initialize a new CA object if one hasn't already been loaded.
This will create a new CA or load an existing one. This will create a new CA or load an existing one.
""" """
if not ssl.CA_SINGLETON: if not ssl.CA_SINGLETON:
# Ensure unsion read/writable
perms = 0o755
if not os.path.isdir(SSL_DIR): if not os.path.isdir(SSL_DIR):
os.mkdir(SSL_DIR) mkdir(SSL_DIR, SSH_USER, 'keystone', perms)
else:
ensure_permissions(SSL_DIR, user=SSH_USER, group='keystone',
perms=perms)
d_name = '_'.join(SSL_CA_NAME.lower().split(' ')) d_name = '_'.join(SSL_CA_NAME.lower().split(' '))
ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group, ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group,
@ -1058,11 +1165,11 @@ def get_ca(user='keystone', group='keystone'):
'%s' % SSL_DIR]) '%s' % SSL_DIR])
subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR]) subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR])
# Ensure a master has been elected and prefer this unit. Note that we # Ensure a master is elected. This should cover the following cases:
# prefer oldest peer as predicate since this action i normally only # * single unit == 'oldest' unit is elected as master
# performed once at deploy time when the oldest peer should be the # * multi unit + not clustered == 'oldest' unit is elcted as master
# first to be ready. # * multi unit + clustered == cluster leader is elected as master
ensure_ssl_cert_master(use_oldest_peer=True) ensure_ssl_cert_master()
ssl.CA_SINGLETON.append(ca) ssl.CA_SINGLETON.append(ca)
@ -1090,6 +1197,12 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
single = set(['service', 'region', 'public_url', 'admin_url', single = set(['service', 'region', 'public_url', 'admin_url',
'internal_url']) 'internal_url'])
https_cns = [] https_cns = []
if https():
protocol = 'https'
else:
protocol = 'http'
if single.issubset(settings): if single.issubset(settings):
# other end of relation advertised only one endpoint # other end of relation advertised only one endpoint
if 'None' in settings.itervalues(): if 'None' in settings.itervalues():
@ -1099,22 +1212,22 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
# Check if clustered and use vip + haproxy ports if so # Check if clustered and use vip + haproxy ports if so
relation_data["auth_host"] = resolve_address(ADMIN) relation_data["auth_host"] = resolve_address(ADMIN)
relation_data["service_host"] = resolve_address(PUBLIC) relation_data["service_host"] = resolve_address(PUBLIC)
if https(): relation_data["auth_protocol"] = protocol
relation_data["auth_protocol"] = "https" relation_data["service_protocol"] = protocol
relation_data["service_protocol"] = "https"
else:
relation_data["auth_protocol"] = "http"
relation_data["service_protocol"] = "http"
relation_data["auth_port"] = config('admin-port') relation_data["auth_port"] = config('admin-port')
relation_data["service_port"] = config('service-port') relation_data["service_port"] = config('service-port')
relation_data["region"] = config('region') relation_data["region"] = config('region')
if is_str_true(config('https-service-endpoints')):
https_service_endpoints = config('https-service-endpoints')
if (https_service_endpoints and
bool_from_string(https_service_endpoints)):
# Pass CA cert as client will need it to # Pass CA cert as client will need it to
# verify https connections # verify https connections
ca = get_ca(user=SSH_USER) ca = get_ca(user=SSH_USER)
ca_bundle = ca.get_ca_bundle() ca_bundle = ca.get_ca_bundle()
relation_data['https_keystone'] = 'True' relation_data['https_keystone'] = 'True'
relation_data['ca_cert'] = b64encode(ca_bundle) relation_data['ca_cert'] = b64encode(ca_bundle)
# Allow the remote service to request creation of any additional # Allow the remote service to request creation of any additional
# roles. Currently used by Horizon # roles. Currently used by Horizon
for role in get_requested_roles(settings): for role in get_requested_roles(settings):
@ -1142,8 +1255,8 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
# NOTE(jamespage) internal IP for backwards compat for SSL certs # NOTE(jamespage) internal IP for backwards compat for SSL certs
internal_cn = urlparse.urlparse(settings['internal_url']).hostname internal_cn = urlparse.urlparse(settings['internal_url']).hostname
https_cns.append(internal_cn) https_cns.append(internal_cn)
https_cns.append( public_cn = urlparse.urlparse(settings['public_url']).hostname
urlparse.urlparse(settings['public_url']).hostname) https_cns.append(public_cn)
https_cns.append(urlparse.urlparse(settings['admin_url']).hostname) https_cns.append(urlparse.urlparse(settings['admin_url']).hostname)
else: else:
# assemble multiple endpoints from relation data. service name # assemble multiple endpoints from relation data. service name
@ -1169,6 +1282,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
if ep not in endpoints: if ep not in endpoints:
endpoints[ep] = {} endpoints[ep] = {}
endpoints[ep][x] = v endpoints[ep][x] = v
services = [] services = []
https_cn = None https_cn = None
for ep in endpoints: for ep in endpoints:
@ -1189,6 +1303,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
https_cns.append(internal_cn) https_cns.append(internal_cn)
https_cns.append(urlparse.urlparse(ep['public_url']).hostname) https_cns.append(urlparse.urlparse(ep['public_url']).hostname)
https_cns.append(urlparse.urlparse(ep['admin_url']).hostname) https_cns.append(urlparse.urlparse(ep['admin_url']).hostname)
service_username = '_'.join(services) service_username = '_'.join(services)
# If an admin username prefix is provided, ensure all services use it. # If an admin username prefix is provided, ensure all services use it.
@ -1214,8 +1329,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
# Currently used by Swift and Ceilometer. # Currently used by Swift and Ceilometer.
for role in get_requested_roles(settings): for role in get_requested_roles(settings):
log("Creating requested role: %s" % role) log("Creating requested role: %s" % role)
create_role(role, service_username, create_role(role, service_username, config('service-tenant'))
config('service-tenant'))
# As of https://review.openstack.org/#change,4675, all nodes hosting # As of https://review.openstack.org/#change,4675, all nodes hosting
# an endpoint(s) needs a service username and password assigned to # an endpoint(s) needs a service username and password assigned to
@ -1237,18 +1351,14 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
"https_keystone": "False", "https_keystone": "False",
"ssl_cert": "", "ssl_cert": "",
"ssl_key": "", "ssl_key": "",
"ca_cert": "" "ca_cert": "",
"auth_protocol": protocol,
"service_protocol": protocol,
} }
# Check if https is enabled
if https():
relation_data["auth_protocol"] = "https"
relation_data["service_protocol"] = "https"
else:
relation_data["auth_protocol"] = "http"
relation_data["service_protocol"] = "http"
# generate or get a new cert/key for service if set to manage certs. # generate or get a new cert/key for service if set to manage certs.
if is_str_true(config('https-service-endpoints')): https_service_endpoints = config('https-service-endpoints')
if https_service_endpoints and bool_from_string(https_service_endpoints):
ca = get_ca(user=SSH_USER) ca = get_ca(user=SSH_USER)
# NOTE(jamespage) may have multiple cns to deal with to iterate # NOTE(jamespage) may have multiple cns to deal with to iterate
https_cns = set(https_cns) https_cns = set(https_cns)
@ -1256,6 +1366,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
cert, key = ca.get_cert_and_key(common_name=https_cn) cert, key = ca.get_cert_and_key(common_name=https_cn)
relation_data['ssl_cert_{}'.format(https_cn)] = b64encode(cert) relation_data['ssl_cert_{}'.format(https_cn)] = b64encode(cert)
relation_data['ssl_key_{}'.format(https_cn)] = b64encode(key) relation_data['ssl_key_{}'.format(https_cn)] = b64encode(key)
# NOTE(jamespage) for backwards compatibility # NOTE(jamespage) for backwards compatibility
cert, key = ca.get_cert_and_key(common_name=internal_cn) cert, key = ca.get_cert_and_key(common_name=internal_cn)
relation_data['ssl_cert'] = b64encode(cert) relation_data['ssl_cert'] = b64encode(cert)
@ -1264,8 +1375,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
relation_data['ca_cert'] = b64encode(ca_bundle) relation_data['ca_cert'] = b64encode(ca_bundle)
relation_data['https_keystone'] = 'True' relation_data['https_keystone'] = 'True'
peer_store_and_set(relation_id=relation_id, peer_store_and_set(relation_id=relation_id, **relation_data)
**relation_data)
def ensure_valid_service(service): def ensure_valid_service(service):
@ -1286,7 +1396,7 @@ def add_endpoint(region, service, publicurl, adminurl, internalurl):
def get_requested_roles(settings): def get_requested_roles(settings):
''' Retrieve any valid requested_roles from dict settings ''' """Retrieve any valid requested_roles from dict settings"""
if ('requested_roles' in settings and if ('requested_roles' in settings and
settings['requested_roles'] not in ['None', None]): settings['requested_roles'] not in ['None', None]):
return settings['requested_roles'].split(',') return settings['requested_roles'].split(',')
@ -1295,6 +1405,7 @@ def get_requested_roles(settings):
def setup_ipv6(): def setup_ipv6():
"""Check ipv6-mode validity and setup dependencies"""
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower() ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
if ubuntu_rel < "trusty": if ubuntu_rel < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu " raise Exception("IPv6 is not supported in the charms for Ubuntu "
@ -1408,9 +1519,8 @@ def is_db_ready(use_current_context=False, db_rel=None):
if allowed_units and local_unit() in allowed_units.split(): if allowed_units and local_unit() in allowed_units.split():
return True return True
# If relation has units rel_has_units = True
return False
# If neither relation has units then we are probably in sqllite mode return # If neither relation has units then we are probably in sqlite mode so
# True. # return True.
return not rel_has_units return not rel_has_units

View File

@ -43,7 +43,15 @@ driver = keystone.catalog.backends.sql.Catalog
[token] [token]
driver = keystone.token.backends.sql.Token driver = keystone.token.backends.sql.Token
provider = keystone.token.providers.uuid.Provider {% if token_provider == 'pki' -%}
provider = keystone.token.providers.pki.Provider
{% elif token_provider == 'pkiz' -%}
provider = keystone.token.providers.pkiz.Provider
{% else -%}
provider = keystone.token.providers.uuid.Provider
{% endif %}
{% include "parts/section-signing" %}
[cache] [cache]
@ -58,8 +66,6 @@ driver = keystone.assignment.backends.{{ assignment_backend }}.Assignment
[oauth1] [oauth1]
[signing]
[auth] [auth]
methods = external,password,token,oauth1 methods = external,password,token,oauth1
password = keystone.auth.plugins.password.Password password = keystone.auth.plugins.password.Password

View File

@ -0,0 +1,105 @@
# kilo
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
public_endpoint = {{ public_endpoint }}
admin_endpoint = {{ admin_endpoint }}
bind_host = {{ bind_host }}
public_workers = {{ workers }}
admin_workers = {{ workers }}
[database]
{% if database_host -%}
connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.{{ identity_backend }}.Identity
[credential]
driver = keystone.credential.backends.sql.Credential
[trust]
driver = keystone.trust.backends.sql.Trust
[os_inherit]
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[endpoint_filter]
[token]
driver = keystone.token.persistence.backends.sql.Token
provider = keystone.token.providers.uuid.Provider
[cache]
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[assignment]
driver = keystone.assignment.backends.{{ assignment_backend }}.Assignment
[oauth1]
[signing]
[auth]
methods = external,password,token,oauth1
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
oauth1 = keystone.auth.plugins.oauth1.OAuth
[paste_deploy]
config_file = keystone-paste.ini
[extra_headers]
Distribution = Ubuntu
[ldap]
{% if identity_backend == 'ldap' -%}
url = {{ ldap_server }}
user = {{ ldap_user }}
password = {{ ldap_password }}
suffix = {{ ldap_suffix }}
{% if ldap_config_flags -%}
{% for key, value in ldap_config_flags.iteritems() -%}
{{ key }} = {{ value }}
{% endfor -%}
{% endif -%}
{% if ldap_readonly -%}
user_allow_create = False
user_allow_update = False
user_allow_delete = False
tenant_allow_create = False
tenant_allow_update = False
tenant_allow_delete = False
role_allow_create = False
role_allow_update = False
role_allow_delete = False
group_allow_create = False
group_allow_update = False
group_allow_delete = False
{% endif -%}
{% endif -%}

View File

@ -0,0 +1,44 @@
# kilo
[loggers]
keys=root
[formatters]
keys=normal,normal_with_name,debug
[handlers]
keys=production,file,devel
[logger_root]
{% if root_level -%}
level={{ root_level }}
{% else -%}
level=WARNING
{% endif -%}
handlers=file
[handler_production]
class=handlers.SysLogHandler
level=ERROR
formatter=normal_with_name
args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
[handler_file]
class=FileHandler
level=DEBUG
formatter=normal_with_name
args=('/var/log/keystone/keystone.log', 'a')
[handler_devel]
class=StreamHandler
level=NOTSET
formatter=debug
args=(sys.stdout,)
[formatter_normal]
format=%(asctime)s %(levelname)s %(message)s
[formatter_normal_with_name]
format=(%(name)s): %(asctime)s %(levelname)s %(message)s
[formatter_debug]
format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s

View File

@ -0,0 +1,13 @@
[signing]
{% if certfile -%}
certfile = {{ certfile }}
{% endif -%}
{% if keyfile -%}
keyfile = {{ keyfile }}
{% endif -%}
{% if ca_certs -%}
ca_certs = {{ ca_certs }}
{% endif -%}
{% if ca_key -%}
ca_key = {{ ca_key }}
{% endif -%}

View File

@ -71,16 +71,19 @@ class OpenStackAmuletDeployment(AmuletDeployment):
services.append(this_service) services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw'] 'ceph-osd', 'ceph-radosgw']
# Openstack subordinate charms do not expose an origin option as that
# is controlled by the principle
ignore = ['neutron-openvswitch']
if self.openstack: if self.openstack:
for svc in services: for svc in services:
if svc['name'] not in use_source: if svc['name'] not in use_source + ignore:
config = {'openstack-origin': self.openstack} config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config) self.d.configure(svc['name'], config)
if self.source: if self.source:
for svc in services: for svc in services:
if svc['name'] in use_source: if svc['name'] in use_source and svc['name'] not in ignore:
config = {'source': self.source} config = {'source': self.source}
self.d.configure(svc['name'], config) self.d.configure(svc['name'], config)

View File

@ -63,7 +63,6 @@ TO_PATCH = [
'execd_preinstall', 'execd_preinstall',
'mkdir', 'mkdir',
'os', 'os',
'time',
# ip # ip
'get_iface_for_address', 'get_iface_for_address',
'get_netmask_for_address', 'get_netmask_for_address',
@ -203,6 +202,7 @@ class KeystoneRelationTests(CharmTestCase):
configs.write = MagicMock() configs.write = MagicMock()
hooks.pgsql_db_changed() hooks.pgsql_db_changed()
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'is_db_ready') @patch.object(hooks, 'is_db_ready')
@patch('keystone_utils.log') @patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.ensure_ssl_cert_master')
@ -210,7 +210,9 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'identity_changed') @patch.object(hooks, 'identity_changed')
def test_db_changed_allowed(self, identity_changed, configs, def test_db_changed_allowed(self, identity_changed, configs,
mock_ensure_ssl_cert_master, mock_ensure_ssl_cert_master,
mock_log, mock_is_db_ready): mock_log, mock_is_db_ready,
mock_is_db_initialised):
mock_is_db_initialised.return_value = True
mock_is_db_ready.return_value = True mock_is_db_ready.return_value = True
mock_ensure_ssl_cert_master.return_value = False mock_ensure_ssl_cert_master.return_value = False
self.relation_ids.return_value = ['identity-service:0'] self.relation_ids.return_value = ['identity-service:0']
@ -247,10 +249,15 @@ class KeystoneRelationTests(CharmTestCase):
@patch('keystone_utils.log') @patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed') @patch.object(hooks, 'identity_changed')
def test_postgresql_db_changed(self, identity_changed, configs, def test_postgresql_db_changed(self, identity_changed, configs,
mock_is_db_ready, mock_is_db_initialised,
mock_ensure_ssl_cert_master, mock_log): mock_ensure_ssl_cert_master, mock_log):
mock_is_db_initialised.return_value = True
mock_is_db_ready.return_value = True
mock_ensure_ssl_cert_master.return_value = False mock_ensure_ssl_cert_master.return_value = False
self.relation_ids.return_value = ['identity-service:0'] self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0'] self.related_units.return_value = ['unit/0']
@ -266,6 +273,10 @@ class KeystoneRelationTests(CharmTestCase):
@patch('keystone_utils.log') @patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'is_pki_enabled')
@patch.object(hooks, 'is_ssl_cert_master')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'peer_units') @patch.object(hooks, 'peer_units')
@patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'admin_relation_changed') @patch.object(hooks, 'admin_relation_changed')
@ -275,11 +286,24 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed') @patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https') @patch.object(hooks, 'configure_https')
def test_config_changed_no_openstack_upgrade_leader( def test_config_changed_no_upgrade_leader(self, configure_https,
self, configure_https, identity_changed, identity_changed,
configs, get_homedir, ensure_user, cluster_joined, configs, get_homedir,
admin_relation_changed, ensure_permissions, mock_peer_units, ensure_user,
mock_ensure_ssl_cert_master, mock_log): cluster_joined,
admin_relation_changed,
ensure_permissions,
mock_peer_units,
mock_is_db_ready,
mock_is_db_initialised,
mock_is_ssl_cert_master,
mock_is_pki_enabled,
mock_ensure_ssl_cert_master,
mock_log):
mock_is_pki_enabled.return_value = True
mock_is_ssl_cert_master.return_value = True
mock_is_db_initialised.return_value = True
mock_is_db_ready.return_value = True
self.openstack_upgrade_available.return_value = False self.openstack_upgrade_available.return_value = False
self.is_elected_leader.return_value = True self.is_elected_leader.return_value = True
# avoid having to mock syncer # avoid having to mock syncer
@ -296,7 +320,6 @@ class KeystoneRelationTests(CharmTestCase):
configure_https.assert_called_with() configure_https.assert_called_with()
self.assertTrue(configs.write_all.called) self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called) self.assertTrue(self.ensure_initial_admin.called)
self.log.assert_called_with( self.log.assert_called_with(
'Firing identity_changed hook for all related services.') 'Firing identity_changed hook for all related services.')
@ -307,6 +330,8 @@ class KeystoneRelationTests(CharmTestCase):
@patch('keystone_utils.log') @patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'is_pki_enabled')
@patch.object(hooks, 'is_ssl_cert_master')
@patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'cluster_joined') @patch.object(hooks, 'cluster_joined')
@patch.object(unison, 'ensure_user') @patch.object(unison, 'ensure_user')
@ -317,8 +342,10 @@ class KeystoneRelationTests(CharmTestCase):
def test_config_changed_no_openstack_upgrade_not_leader( def test_config_changed_no_openstack_upgrade_not_leader(
self, configure_https, identity_changed, self, configure_https, identity_changed,
configs, get_homedir, ensure_user, cluster_joined, configs, get_homedir, ensure_user, cluster_joined,
ensure_permissions, mock_ensure_ssl_cert_master, ensure_permissions, mock_is_ssl_cert_master, mock_is_pki_enabled,
mock_log): mock_ensure_ssl_cert_master, mock_log):
mock_is_pki_enabled.return_value = True
mock_is_ssl_cert_master.return_value = True
self.openstack_upgrade_available.return_value = False self.openstack_upgrade_available.return_value = False
self.is_elected_leader.return_value = False self.is_elected_leader.return_value = False
mock_ensure_ssl_cert_master.return_value = False mock_ensure_ssl_cert_master.return_value = False
@ -337,6 +364,10 @@ class KeystoneRelationTests(CharmTestCase):
@patch('keystone_utils.log') @patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'is_pki_enabled')
@patch.object(hooks, 'is_ssl_cert_master')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'peer_units') @patch.object(hooks, 'peer_units')
@patch.object(hooks, 'ensure_permissions') @patch.object(hooks, 'ensure_permissions')
@patch.object(hooks, 'admin_relation_changed') @patch.object(hooks, 'admin_relation_changed')
@ -346,12 +377,23 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed') @patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https') @patch.object(hooks, 'configure_https')
def test_config_changed_with_openstack_upgrade( def test_config_changed_with_openstack_upgrade(self, configure_https,
self, configure_https, identity_changed, identity_changed,
configs, get_homedir, ensure_user, cluster_joined, configs, get_homedir,
admin_relation_changed, ensure_user, cluster_joined,
ensure_permissions, mock_peer_units, mock_ensure_ssl_cert_master, admin_relation_changed,
mock_log): ensure_permissions,
mock_peer_units,
mock_is_db_ready,
mock_is_db_initialised,
mock_is_ssl_cert_master,
mock_is_pki_enabled,
mock_ensure_ssl_cert_master,
mock_log):
mock_is_pki_enabled.return_value = True
mock_is_ssl_cert_master.return_value = True
mock_is_db_ready.return_value = True
mock_is_db_initialised.return_value = True
self.openstack_upgrade_available.return_value = True self.openstack_upgrade_available.return_value = True
self.is_elected_leader.return_value = True self.is_elected_leader.return_value = True
# avoid having to mock syncer # avoid having to mock syncer
@ -370,7 +412,6 @@ class KeystoneRelationTests(CharmTestCase):
configure_https.assert_called_with() configure_https.assert_called_with()
self.assertTrue(configs.write_all.called) self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called) self.assertTrue(self.ensure_initial_admin.called)
self.log.assert_called_with( self.log.assert_called_with(
'Firing identity_changed hook for all related services.') 'Firing identity_changed hook for all related services.')
@ -379,6 +420,7 @@ class KeystoneRelationTests(CharmTestCase):
remote_unit='unit/0') remote_unit='unit/0')
admin_relation_changed.assert_called_with('identity-service:0') admin_relation_changed.assert_called_with('identity-service:0')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'is_db_ready') @patch.object(hooks, 'is_db_ready')
@patch('keystone_utils.log') @patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.ensure_ssl_cert_master')
@ -386,7 +428,9 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'send_notifications') @patch.object(hooks, 'send_notifications')
def test_identity_changed_leader(self, mock_send_notifications, def test_identity_changed_leader(self, mock_send_notifications,
mock_hashlib, mock_ensure_ssl_cert_master, mock_hashlib, mock_ensure_ssl_cert_master,
mock_log, mock_is_db_ready): mock_log, mock_is_db_ready,
mock_is_db_initialised):
mock_is_db_initialised.return_value = True
mock_is_db_ready.return_value = True mock_is_db_ready.return_value = True
mock_ensure_ssl_cert_master.return_value = False mock_ensure_ssl_cert_master.return_value = False
hooks.identity_changed( hooks.identity_changed(
@ -544,12 +588,18 @@ class KeystoneRelationTests(CharmTestCase):
@patch('keystone_utils.log') @patch('keystone_utils.log')
@patch('keystone_utils.ensure_ssl_cert_master') @patch('keystone_utils.ensure_ssl_cert_master')
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'is_db_initialised')
@patch.object(hooks, 'identity_changed') @patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'CONFIGS') @patch.object(hooks, 'CONFIGS')
def test_ha_relation_changed_clustered_leader(self, configs, def test_ha_relation_changed_clustered_leader(self, configs,
identity_changed, identity_changed,
mock_is_db_initialised,
mock_is_db_ready,
mock_ensure_ssl_cert_master, mock_ensure_ssl_cert_master,
mock_log): mock_log):
mock_is_db_initialised.return_value = True
mock_is_db_ready.return_value = True
mock_ensure_ssl_cert_master.return_value = False mock_ensure_ssl_cert_master.return_value = False
self.relation_get.return_value = True self.relation_get.return_value = True
self.is_elected_leader.return_value = True self.is_elected_leader.return_value = True
@ -595,6 +645,8 @@ class KeystoneRelationTests(CharmTestCase):
cmd = ['a2dissite', 'openstack_https_frontend'] cmd = ['a2dissite', 'openstack_https_frontend']
self.check_call.assert_called_with(cmd) self.check_call.assert_called_with(cmd)
@patch.object(hooks, 'is_db_ready')
@patch.object(hooks, 'is_db_initialised')
@patch('keystone_utils.log') @patch('keystone_utils.log')
@patch('keystone_utils.relation_ids') @patch('keystone_utils.relation_ids')
@patch('keystone_utils.is_elected_leader') @patch('keystone_utils.is_elected_leader')
@ -608,7 +660,11 @@ class KeystoneRelationTests(CharmTestCase):
mock_ensure_ssl_cert_master, mock_ensure_ssl_cert_master,
mock_is_elected_leader, mock_is_elected_leader,
mock_relation_ids, mock_relation_ids,
mock_log): mock_log,
mock_is_db_ready,
mock_is_db_initialised):
mock_is_db_initialised.return_value = True
mock_is_db_ready.return_value = True
mock_is_elected_leader.return_value = False mock_is_elected_leader.return_value = False
mock_relation_ids.return_value = [] mock_relation_ids.return_value = []
mock_ensure_ssl_cert_master.return_value = True mock_ensure_ssl_cert_master.return_value = True

View File

@ -28,6 +28,7 @@ TO_PATCH = [
'grant_role', 'grant_role',
'configure_installation_source', 'configure_installation_source',
'is_elected_leader', 'is_elected_leader',
'is_ssl_cert_master',
'https', 'https',
'peer_store_and_set', 'peer_store_and_set',
'service_stop', 'service_stop',
@ -352,65 +353,169 @@ class TestKeystoneUtils(CharmTestCase):
self.assertEqual(utils.get_admin_passwd(), 'supersecretgen') self.assertEqual(utils.get_admin_passwd(), 'supersecretgen')
def test_is_db_ready(self): def test_is_db_ready(self):
allowed_units = None
def fake_rel_get(attribute=None, *args, **kwargs):
if attribute == 'allowed_units':
return allowed_units
self.relation_get.side_effect = fake_rel_get
self.relation_id.return_value = 'shared-db:0' self.relation_id.return_value = 'shared-db:0'
self.relation_ids.return_value = [self.relation_id.return_value] self.relation_ids.return_value = ['shared-db:0']
self.local_unit.return_value = 'unit/0' self.local_unit.return_value = 'unit/0'
self.relation_get.return_value = 'unit/0' allowed_units = 'unit/0'
self.assertTrue(utils.is_db_ready(use_current_context=True)) self.assertTrue(utils.is_db_ready(use_current_context=True))
self.relation_ids.return_value = ['acme:0'] self.relation_ids.return_value = ['acme:0']
self.assertRaises(utils.is_db_ready, use_current_context=True) self.assertRaises(utils.is_db_ready, use_current_context=True)
self.related_units.return_value = ['unit/0'] self.related_units.return_value = ['unit/0']
self.relation_ids.return_value = [self.relation_id.return_value] self.relation_ids.return_value = ['shared-db:0', 'shared-db:1']
self.assertTrue(utils.is_db_ready()) self.assertTrue(utils.is_db_ready())
self.relation_get.return_value = 'unit/1' allowed_units = 'unit/1'
self.assertFalse(utils.is_db_ready()) self.assertFalse(utils.is_db_ready())
self.related_units.return_value = [] self.related_units.return_value = []
self.assertTrue(utils.is_db_ready()) self.assertTrue(utils.is_db_ready())
@patch.object(utils, 'peer_units') @patch.object(utils, 'peer_units')
@patch.object(utils, 'is_elected_leader')
@patch.object(utils, 'oldest_peer')
@patch.object(utils, 'is_ssl_enabled') @patch.object(utils, 'is_ssl_enabled')
def test_ensure_ssl_cert_master(self, mock_is_str_true, mock_oldest_peer, def test_ensure_ssl_cert_master_no_ssl(self, mock_is_ssl_enabled,
mock_is_elected_leader, mock_peer_units): mock_peer_units):
mock_is_ssl_enabled.return_value = False
self.assertFalse(utils.ensure_ssl_cert_master())
self.assertFalse(self.relation_set.called)
@patch.object(utils, 'peer_units')
@patch.object(utils, 'is_ssl_enabled')
def test_ensure_ssl_cert_master_ssl_no_peers(self, mock_is_ssl_enabled,
mock_peer_units):
def mock_rel_get(unit=None, **kwargs):
return None
self.relation_get.side_effect = mock_rel_get
mock_is_ssl_enabled.return_value = True
self.relation_ids.return_value = ['cluster:0'] self.relation_ids.return_value = ['cluster:0']
self.local_unit.return_value = 'unit/0' self.local_unit.return_value = 'unit/0'
self.related_units.return_value = []
mock_is_str_true.return_value = False
self.assertFalse(utils.ensure_ssl_cert_master())
self.assertFalse(self.relation_set.called)
mock_is_elected_leader.return_value = False
self.assertFalse(utils.ensure_ssl_cert_master())
self.assertFalse(self.relation_set.called)
mock_is_str_true.return_value = True
mock_is_elected_leader.return_value = False
mock_peer_units.return_value = ['unit/0']
self.assertFalse(utils.ensure_ssl_cert_master())
self.assertFalse(self.relation_set.called)
mock_peer_units.return_value = [] mock_peer_units.return_value = []
# This should get ignored since we are overriding
self.is_ssl_cert_master.return_value = False
self.is_elected_leader.return_value = False
self.assertTrue(utils.ensure_ssl_cert_master()) self.assertTrue(utils.ensure_ssl_cert_master())
settings = {'ssl-cert-master': 'unit/0'} settings = {'ssl-cert-master': 'unit/0'}
self.relation_set.assert_called_with(relation_id='cluster:0', self.relation_set.assert_called_with(relation_id='cluster:0',
relation_settings=settings) relation_settings=settings)
self.relation_set.reset_mock()
self.assertTrue(utils.ensure_ssl_cert_master(use_oldest_peer=True)) @patch.object(utils, 'peer_units')
@patch.object(utils, 'is_ssl_enabled')
def test_ensure_ssl_cert_master_ssl_master_no_peers(self,
mock_is_ssl_enabled,
mock_peer_units):
def mock_rel_get(unit=None, **kwargs):
if unit == 'unit/0':
return 'unit/0'
return None
self.relation_get.side_effect = mock_rel_get
mock_is_ssl_enabled.return_value = True
self.relation_ids.return_value = ['cluster:0']
self.local_unit.return_value = 'unit/0'
self.related_units.return_value = []
mock_peer_units.return_value = []
# This should get ignored since we are overriding
self.is_ssl_cert_master.return_value = False
self.is_elected_leader.return_value = False
self.assertTrue(utils.ensure_ssl_cert_master())
settings = {'ssl-cert-master': 'unit/0'} settings = {'ssl-cert-master': 'unit/0'}
self.relation_set.assert_called_with(relation_id='cluster:0', self.relation_set.assert_called_with(relation_id='cluster:0',
relation_settings=settings) relation_settings=settings)
self.relation_set.reset_mock()
mock_peer_units.return_value = ['unit/0'] @patch.object(utils, 'peer_units')
@patch.object(utils, 'is_ssl_enabled')
def test_ensure_ssl_cert_master_ssl_not_leader(self, mock_is_ssl_enabled,
mock_peer_units):
mock_is_ssl_enabled.return_value = True
self.relation_ids.return_value = ['cluster:0']
self.local_unit.return_value = 'unit/0'
mock_peer_units.return_value = ['unit/1']
self.is_ssl_cert_master.return_value = False
self.is_elected_leader.return_value = False
self.assertFalse(utils.ensure_ssl_cert_master())
self.assertFalse(self.relation_set.called)
@patch.object(utils, 'peer_units')
@patch.object(utils, 'is_ssl_enabled')
def test_ensure_ssl_cert_master_is_leader_new_peer(self,
mock_is_ssl_enabled,
mock_peer_units):
def mock_rel_get(unit=None, **kwargs):
if unit == 'unit/0':
return 'unit/0'
return 'unknown'
self.relation_get.side_effect = mock_rel_get
mock_is_ssl_enabled.return_value = True
self.relation_ids.return_value = ['cluster:0']
self.local_unit.return_value = 'unit/0'
mock_peer_units.return_value = ['unit/1']
self.related_units.return_value = ['unit/1']
self.is_ssl_cert_master.return_value = False
self.is_elected_leader.return_value = True
self.assertFalse(utils.ensure_ssl_cert_master()) self.assertFalse(utils.ensure_ssl_cert_master())
self.assertFalse(utils.ensure_ssl_cert_master(use_oldest_peer=True))
settings = {'ssl-cert-master': 'unit/0'} settings = {'ssl-cert-master': 'unit/0'}
self.relation_set.assert_called_with(relation_id='cluster:0', self.relation_set.assert_called_with(relation_id='cluster:0',
relation_settings=settings) relation_settings=settings)
self.relation_set.reset_mock()
@patch.object(utils, 'peer_units')
@patch.object(utils, 'is_ssl_enabled')
def test_ensure_ssl_cert_master_is_leader_no_new_peer(self,
mock_is_ssl_enabled,
mock_peer_units):
def mock_rel_get(unit=None, **kwargs):
if unit == 'unit/0':
return 'unit/0'
return 'unit/0'
self.relation_get.side_effect = mock_rel_get
mock_is_ssl_enabled.return_value = True
self.relation_ids.return_value = ['cluster:0']
self.local_unit.return_value = 'unit/0'
mock_peer_units.return_value = ['unit/1']
self.related_units.return_value = ['unit/1']
self.is_ssl_cert_master.return_value = False
self.is_elected_leader.return_value = True
self.assertFalse(utils.ensure_ssl_cert_master())
self.assertFalse(self.relation_set.called)
@patch.object(utils, 'peer_units')
@patch.object(utils, 'is_ssl_enabled')
def test_ensure_ssl_cert_master_is_leader_bad_votes(self,
mock_is_ssl_enabled,
mock_peer_units):
counter = {0: 0}
def mock_rel_get(unit=None, **kwargs):
"""Returns a mix of votes."""
if unit == 'unit/0':
return 'unit/0'
ret = 'unit/%d' % (counter[0])
counter[0] += 1
return ret
self.relation_get.side_effect = mock_rel_get
mock_is_ssl_enabled.return_value = True
self.relation_ids.return_value = ['cluster:0']
self.local_unit.return_value = 'unit/0'
mock_peer_units.return_value = ['unit/1']
self.related_units.return_value = ['unit/1']
self.is_ssl_cert_master.return_value = False
self.is_elected_leader.return_value = True
self.assertFalse(utils.ensure_ssl_cert_master())
self.assertFalse(self.relation_set.called)