Final tidy

This commit is contained in:
James Page 2014-04-10 17:00:28 +01:00
parent 7cc221cd08
commit a3f42101cc
14 changed files with 151 additions and 1233 deletions

View File

@ -2,8 +2,7 @@
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers unit_tests
@flake8 --exclude hooks/charmhelpers hooks unit_tests
@charm proof
test:

View File

@ -120,8 +120,8 @@ def db_joined():
def pgsql_db_joined():
if is_relation_made('shared-db'):
# raise error
e = ('Attempting to associate a postgresql database when there is already '
'associated a mysql one')
e = ('Attempting to associate a postgresql database when there'
' is already associated a mysql one')
log(e, level=ERROR)
raise Exception(e)

View File

View File

@ -1,194 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from lib.utils import (
relation_ids,
relation_list,
relation_get,
render_template,
juju_log,
config_get,
install,
get_host_ip,
restart)
from lib.cluster_utils import https
import os
import subprocess
from base64 import b64decode
APACHE_SITE_DIR = "/etc/apache2/sites-available"
SITE_TEMPLATE = "apache2_site.tmpl"
RELOAD_CHECK = "To activate the new configuration"
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
juju_log('INFO',
"Inspecting identity-service relations for SSL certificate.")
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
juju_log('INFO',
"Inspecting identity-service relations for CA SSL certificate.")
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])
def enable_https(port_maps, namespace, cert, key, ca_cert=None):
'''
For a given number of port mappings, configures apache2
HTTPs local reverse proxying using certficates and keys provided in
either configuration data (preferred) or relation data. Assumes ports
are not in use (calling charm should ensure that).
port_maps: dict: external to internal port mappings
namespace: str: name of charm
'''
def _write_if_changed(path, new_content):
content = None
if os.path.exists(path):
with open(path, 'r') as f:
content = f.read().strip()
if content != new_content:
with open(path, 'w') as f:
f.write(new_content)
return True
else:
return False
juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps))
http_restart = False
if cert:
cert = b64decode(cert)
if key:
key = b64decode(key)
if ca_cert:
ca_cert = b64decode(ca_cert)
if not cert and not key:
juju_log('ERROR',
"Expected but could not find SSL certificate data, not "
"configuring HTTPS!")
return False
install('apache2')
if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl',
'proxy', 'proxy_http']):
http_restart = True
ssl_dir = os.path.join('/etc/apache2/ssl', namespace)
if not os.path.exists(ssl_dir):
os.makedirs(ssl_dir)
if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)):
http_restart = True
if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)):
http_restart = True
os.chmod(os.path.join(ssl_dir, 'key'), 0600)
install_ca_cert(ca_cert)
sites_dir = '/etc/apache2/sites-available'
for ext_port, int_port in port_maps.items():
juju_log('INFO',
'Creating apache2 reverse proxy vhost'
' for {}:{}'.format(ext_port,
int_port))
site = "{}_{}".format(namespace, ext_port)
site_path = os.path.join(sites_dir, site)
with open(site_path, 'w') as fsite:
context = {
"ext": ext_port,
"int": int_port,
"namespace": namespace,
"private_address": get_host_ip()}
fsite.write(render_template(SITE_TEMPLATE,
context))
if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]):
http_restart = True
if http_restart:
restart('apache2')
return True
def disable_https(port_maps, namespace):
'''
Ensure HTTPS reverse proxying is disables for given port mappings
port_maps: dict: of ext -> int port mappings
namespace: str: name of chamr
'''
juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))
if (not os.path.exists('/etc/apache2') or
not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
return
http_restart = False
for ext_port in port_maps.keys():
if os.path.exists(os.path.join(APACHE_SITE_DIR,
"{}_{}".format(namespace,
ext_port))):
juju_log('INFO',
"Disabling HTTPS reverse proxy"
" for {} {}.".format(namespace,
ext_port))
if (RELOAD_CHECK in
subprocess.check_output(['a2dissite',
'{}_{}'.format(namespace,
ext_port)])):
http_restart = True
if http_restart:
restart(['apache2'])
def setup_https(port_maps, namespace, cert, key, ca_cert=None):
'''
Ensures HTTPS is either enabled or disabled for given port
mapping.
port_maps: dict: of ext -> int port mappings
namespace: str: name of charm
'''
if not https:
disable_https(port_maps, namespace)
else:
enable_https(port_maps, namespace, cert, key, ca_cert)

View File

@ -1,128 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from lib.utils import (
juju_log,
relation_ids,
relation_list,
relation_get,
get_unit_hostname,
config_get)
import subprocess
import os
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
juju_log('INFO', 'Deferring action to CRM leader.')
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
juju_log('INFO', 'Deferring action to oldest service unit.')
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if (relation_get('https_keystone', rid=r_id, unit=unit) and
relation_get('ssl_cert', rid=r_id, unit=unit) and
relation_get('ssl_key', rid=r_id, unit=unit) and
relation_get('ca_cert', rid=r_id, unit=unit)):
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_haproxy_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
i += 1
return public_port - (i * 10)

View File

@ -1,53 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from lib.utils import (
relation_ids,
relation_list,
relation_get,
unit_get,
reload,
render_template)
import os
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
HAPROXY_DEFAULT = '/etc/default/haproxy'
def configure_haproxy(service_ports):
'''
Configure HAProxy based on the current peers in the service
cluster using the provided port map:
"swift": [ 8080, 8070 ]
HAproxy will also be reloaded/started if required
service_ports: dict: dict of lists of [ frontend, backend ]
'''
cluster_hosts = {}
cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
unit_get('private-address')
for r_id in relation_ids('cluster'):
for unit in relation_list(r_id):
cluster_hosts[unit.replace('/', '-')] = \
relation_get(attribute='private-address',
rid=r_id,
unit=unit)
context = {
'units': cluster_hosts,
'service_ports': service_ports}
with open(HAPROXY_CONF, 'w') as f:
f.write(render_template(os.path.basename(HAPROXY_CONF),
context))
with open(HAPROXY_DEFAULT, 'w') as f:
f.write('ENABLED=1')
reload('haproxy')

View File

@ -1,239 +0,0 @@
#!/usr/bin/python
# Common python helper functions used for OpenStack charms.
import apt_pkg as apt
import subprocess
import os
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
ubuntu_openstack_release = {
'oneiric': 'diablo',
'precise': 'essex',
'quantal': 'folsom',
'raring': 'grizzly',
'saucy': 'havana',
'trusty': 'icehouse',
}
openstack_codenames = {
'2011.2': 'diablo',
'2012.1': 'essex',
'2012.2': 'folsom',
'2013.1': 'grizzly',
'2013.2': 'havana',
'2014.1': 'icehouse',
}
# The ugly duckling
swift_codenames = {
'1.4.3': 'diablo',
'1.4.8': 'essex',
'1.7.4': 'folsom',
'1.7.6': 'grizzly',
'1.7.7': 'grizzly',
'1.8.0': 'grizzly',
}
def juju_log(msg):
subprocess.check_call(['juju-log', msg])
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg)
exit(1)
def lsb_release():
'''Return /etc/lsb-release in a dict'''
lsb = open('/etc/lsb-release', 'r')
d = {}
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
return d
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src == 'distro':
try:
rel = ubuntu_openstack_release[ubuntu_rel]
except KeyError:
e = 'Code not derive openstack release for '\
'this Ubuntu release: %s' % rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in openstack_codenames.iteritems():
if v in src:
return v
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return openstack_codenames[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in openstack_codenames.iteritems():
if v == codename:
return k
e = 'Code not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(pkg):
'''Derive OpenStack release codename from an installed package.'''
apt.init()
cache = apt.Cache()
try:
pkg = cache[pkg]
except:
e = 'Could not determine version of installed package: %s' % pkg
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
vers = vers[:5]
return swift_codenames[vers]
else:
vers = vers[:6]
return openstack_codenames[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg)
if 'swift' in pkg:
vers_map = swift_codenames
else:
vers_map = openstack_codenames
for version, cname in vers_map.iteritems():
if cname == codename:
return version
e = "Could not determine OpenStack version for package: %s" % pkg
error_out(e)
def configure_installation_source(rel):
'''Configure apt installation source.'''
def _import_key(keyid):
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
if rel == 'distro':
return
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
_import_key(key)
elif l == 1:
src = rel
else:
error_out("Invalid openstack-release: %s" % rel)
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
_import_key(CLOUD_ARCHIVE_KEY_ID)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
charm_dir = os.getenv('CHARM_DIR')
juju_rc_path = "%s/%s" % (charm_dir, script_path)
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]

View File

@ -1,217 +0,0 @@
#!/usr/bin/python
#
# Easy file synchronization among peer units using ssh + unison.
#
# From *both* peer relation -joined and -changed, add a call to
# ssh_authorized_peers() describing the peer relation and the desired
# user + group. After all peer relations have settled, all hosts should
# be able to connect to on another via key auth'd ssh as the specified user.
#
# Other hooks are then free to synchronize files and directories using
# sync_to_peers().
#
# For a peer relation named 'cluster', for example:
#
# cluster-relation-joined:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# cluster-relation-changed:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# Hooks are now free to sync files as easily as:
#
# files = ['/etc/fstab', '/etc/apt.conf.d/']
# sync_to_peers(peer_interface='cluster',
# user='juju_ssh, paths=[files])
#
# It is assumed the charm itself has setup permissions on each unit
# such that 'juju_ssh' has read + write permissions. Also assumed
# that the calling charm takes care of leader delegation.
#
# TODO: Currently depends on the utils.py shipped with the keystone charm.
# Either copy required functionality to this library or depend on
# something more generic.
import os
import sys
import lib.utils as utils
import subprocess
import grp
import pwd
def get_homedir(user):
try:
user = pwd.getpwnam(user)
return user.pw_dir
except KeyError:
utils.juju_log('INFO',
'Could not get homedir for user %s: user exists?')
sys.exit(1)
def get_keypair(user):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir)
priv_key = os.path.join(ssh_dir, 'id_rsa')
if not os.path.isfile(priv_key):
utils.juju_log('INFO', 'Generating new ssh key for user %s.' % user)
cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
'-f', priv_key]
subprocess.check_call(cmd)
pub_key = '%s.pub' % priv_key
if not os.path.isfile(pub_key):
utils.juju_log('INFO', 'Generatring missing ssh public key @ %s.' %
pub_key)
cmd = ['ssh-keygen', '-y', '-f', priv_key]
p = subprocess.check_output(cmd).strip()
with open(pub_key, 'wb') as out:
out.write(p)
subprocess.check_call(['chown', '-R', user, ssh_dir])
return open(priv_key, 'r').read().strip(), open(pub_key, 'r').read().strip()
def write_authorized_keys(user, keys):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
utils.juju_log('INFO', 'Syncing authorized_keys @ %s.' % auth_keys)
with open(auth_keys, 'wb') as out:
for k in keys:
out.write('%s\n' % k)
def write_known_hosts(user, hosts):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
known_hosts = os.path.join(ssh_dir, 'known_hosts')
khosts = []
for host in hosts:
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
remote_key = subprocess.check_output(cmd).strip()
khosts.append(remote_key)
utils.juju_log('INFO', 'Syncing known_hosts @ %s.' % known_hosts)
with open(known_hosts, 'wb') as out:
for host in khosts:
out.write('%s\n' % host)
def ensure_user(user, group=None):
# need to ensure a bash shell'd user exists.
try:
pwd.getpwnam(user)
except KeyError:
utils.juju_log('INFO', 'Creating new user %s.%s.' % (user, group))
cmd = ['adduser', '--system', '--shell', '/bin/bash', user]
if group:
try:
grp.getgrnam(group)
except KeyError:
subprocess.check_call(['addgroup', group])
cmd += ['--ingroup', group]
subprocess.check_call(cmd)
def ssh_authorized_peers(peer_interface, user, group=None, ensure_local_user=False):
"""
Main setup function, should be called from both peer -changed and -joined
hooks with the same parameters.
"""
if ensure_local_user:
ensure_user(user, group)
priv_key, pub_key = get_keypair(user)
hook = os.path.basename(sys.argv[0])
if hook == '%s-relation-joined' % peer_interface:
utils.relation_set(ssh_pub_key=pub_key)
print 'joined'
elif hook == '%s-relation-changed' % peer_interface:
hosts = []
keys = []
for r_id in utils.relation_ids(peer_interface):
for unit in utils.relation_list(r_id):
settings = utils.relation_get_dict(relation_id=r_id,
remote_unit=unit)
if 'ssh_pub_key' in settings:
keys.append(settings['ssh_pub_key'])
hosts.append(settings['private-address'])
else:
utils.juju_log('INFO',
'ssh_authorized_peers(): ssh_pub_key '
'missing for unit %s, skipping.' % unit)
write_authorized_keys(user, keys)
write_known_hosts(user, hosts)
authed_hosts = ':'.join(hosts)
utils.relation_set(ssh_authorized_hosts=authed_hosts)
def _run_as_user(user):
try:
user = pwd.getpwnam(user)
except KeyError:
utils.juju_log('INFO', 'Invalid user: %s' % user)
sys.exit(1)
uid, gid = user.pw_uid, user.pw_gid
os.environ['HOME'] = user.pw_dir
def _inner():
os.setgid(gid)
os.setuid(uid)
return _inner
def run_as_user(user, cmd):
return subprocess.check_output(cmd, preexec_fn=_run_as_user(user), cwd='/')
def sync_to_peers(peer_interface, user, paths=[], verbose=False):
base_cmd = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
'-fastcheck=true', '-group=false', '-owner=false',
'-prefer=newer', '-times=true']
if not verbose:
base_cmd.append('-silent')
hosts = []
for r_id in (utils.relation_ids(peer_interface) or []):
for unit in utils.relation_list(r_id):
settings = utils.relation_get_dict(relation_id=r_id,
remote_unit=unit)
try:
authed_hosts = settings['ssh_authorized_hosts'].split(':')
except KeyError:
print 'unison sync_to_peers: peer has not authorized *any* '\
'hosts yet.'
return
unit_hostname = utils.unit_get('private-address')
add_host = None
for authed_host in authed_hosts:
if unit_hostname == authed_host:
add_host = settings['private-address']
if add_host:
hosts.append(settings['private-address'])
else:
print 'unison sync_to_peers: peer (%s) has not authorized '\
'*this* host yet, skipping.' % settings['private-address']
for path in paths:
# removing trailing slash from directory paths, unison
# doesn't like these.
if path.endswith('/'):
path = path[:(len(path) - 1)]
for host in hosts:
cmd = base_cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
utils.juju_log('INFO', 'Syncing local path %s to %s@%s:%s' %
(path, user, host, path))
run_as_user(user, cmd)

View File

@ -1,319 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Paul Collins <paul.collins@canonical.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import json
import os
import subprocess
import socket
import sys
def do_hooks(hooks):
hook = os.path.basename(sys.argv[0])
try:
hook_func = hooks[hook]
except KeyError:
juju_log('INFO',
"This charm doesn't know how to handle '{}'.".format(hook))
else:
hook_func()
def install(*pkgs):
cmd = [
'apt-get',
'-y',
'install']
for pkg in pkgs:
cmd.append(pkg)
subprocess.check_call(cmd)
TEMPLATES_DIR = 'templates'
try:
import jinja2
except ImportError:
install('python-jinja2')
import jinja2
try:
import dns.resolver
except ImportError:
install('python-dnspython')
import dns.resolver
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = templates.get_template(template_name)
return template.render(context)
CLOUD_ARCHIVE = \
""" # Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
CLOUD_ARCHIVE_POCKETS = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse'}
def configure_source():
source = str(config_get('openstack-origin'))
if not source:
return
if source.startswith('ppa:'):
cmd = [
'add-apt-repository',
source]
subprocess.check_call(cmd)
if source.startswith('cloud:'):
# CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
# cloud:precise-folsom/updates or cloud:precise-folsom/proposed
install('ubuntu-cloud-keyring')
pocket = source.split(':')[1]
pocket = pocket.split('-')[1]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
if source.startswith('deb'):
l = len(source.split('|'))
if l == 2:
(apt_line, key) = source.split('|')
cmd = [
'apt-key',
'adv', '--keyserver keyserver.ubuntu.com',
'--recv-keys', key]
subprocess.check_call(cmd)
elif l == 1:
apt_line = source
with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
apt.write(apt_line + "\n")
cmd = [
'apt-get',
'update']
subprocess.check_call(cmd)
# Protocols
TCP = 'TCP'
UDP = 'UDP'
def expose(port, protocol='TCP'):
cmd = [
'open-port',
'{}/{}'.format(port, protocol)]
subprocess.check_call(cmd)
def juju_log(severity, message):
cmd = [
'juju-log',
'--log-level', severity,
message]
subprocess.check_call(cmd)
cache = {}
def cached(func):
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
@cached
def relation_ids(relation):
cmd = [
'relation-ids',
relation]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_list(rid):
cmd = [
'relation-list',
'-r', rid]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_get(attribute, unit=None, rid=None):
cmd = [
'relation-get']
if rid:
cmd.append('-r')
cmd.append(rid)
cmd.append(attribute)
if unit:
cmd.append(unit)
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def relation_get_dict(relation_id=None, remote_unit=None):
"""Obtain all relation data as dict by way of JSON"""
cmd = [
'relation-get', '--format=json']
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
if remote_unit:
cmd.append('-')
cmd.append(remote_unit)
j = subprocess.check_output(cmd)
d = json.loads(j)
settings = {}
# convert unicode to strings
for k, v in d.iteritems():
settings[str(k)] = str(v)
return settings
def relation_set(**kwargs):
cmd = [
'relation-set']
args = []
for k, v in kwargs.items():
if k == 'rid':
if v:
cmd.append('-r')
cmd.append(v)
else:
args.append('{}={}'.format(k, v))
cmd += args
subprocess.check_call(cmd)
@cached
def unit_get(attribute):
cmd = [
'unit-get',
attribute]
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def config_get(attribute):
cmd = [
'config-get',
'--format',
'json']
out = subprocess.check_output(cmd).strip() # IGNORE:E1103
cfg = json.loads(out)
try:
return cfg[attribute]
except KeyError:
return None
@cached
def get_unit_hostname():
return socket.gethostname()
@cached
def get_host_ip(hostname=unit_get('private-address')):
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
return None
def _svc_control(service, action):
subprocess.check_call(['service', service, action])
def restart(*services):
for service in services:
_svc_control(service, 'restart')
def stop(*services):
for service in services:
_svc_control(service, 'stop')
def start(*services):
for service in services:
_svc_control(service, 'start')
def reload(*services):
for service in services:
try:
_svc_control(service, 'reload')
except subprocess.CalledProcessError:
# Reload failed - either service does not support reload
# or it was not running - restart will fixup most things
_svc_control(service, 'restart')
def running(service):
try:
output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def is_relation_made(relation, key='private-address'):
for r_id in (relation_ids(relation) or []):
for unit in (relation_list(r_id) or []):
if relation_get(key, rid=r_id, unit=unit):
return True
return False

1
hooks/start Symbolic link
View File

@ -0,0 +1 @@
keystone_hooks.py

1
hooks/stop Symbolic link
View File

@ -0,0 +1 @@
keystone_hooks.py

View File

@ -1,10 +1,6 @@
import keystone_context as context
from mock import patch
with patch('charmhelpers.core.hookenv.config') as config:
config.return_value = 'keystone'
import keystone_utils as utils
from test_utils import (
CharmTestCase
)
@ -37,8 +33,8 @@ class TestKeystoneContexts(CharmTestCase):
mock_is_clustered.return_value = False
ctxt = context.ApacheSSLContext()
with patch.object(ctxt, 'enable_modules') as mock_enable_modules:
with patch.object(ctxt, 'configure_cert') as mock_configure_cert:
with patch.object(ctxt, 'enable_modules'):
with patch.object(ctxt, 'configure_cert'):
self.assertEquals(ctxt(), {'endpoints': [(34, 12)],
'private_address': '1.2.3.4',
'namespace': 'keystone'})
@ -51,20 +47,27 @@ class TestKeystoneContexts(CharmTestCase):
@patch('charmhelpers.contrib.openstack.context.relation_get')
@patch('charmhelpers.contrib.openstack.context.log')
@patch('__builtin__.open')
def test_haproxy_context_service_enabled(self, mock_open, mock_log, mock_relation_get, mock_related_units,
mock_unit_get, mock_relation_ids):
mock_relation_ids.return_value = ['identity-service:0',]
def test_haproxy_context_service_enabled(
self, mock_open, mock_log, mock_relation_get, mock_related_units,
mock_unit_get, mock_relation_ids):
mock_relation_ids.return_value = ['identity-service:0', ]
mock_unit_get.return_value = '1.2.3.4'
mock_relation_get.return_value = '10.0.0.0'
mock_related_units.return_value = ['unit/0',]
mock_related_units.return_value = ['unit/0', ]
self.determine_apache_port.return_value = '34'
ctxt = context.HAProxyContext()
self.assertEquals(ctxt(), {'listen_ports': {'admin_port': 'keystone', 'public_port': 'keystone'},
'service_ports': {'admin-port': ['keystone', '34'],
'public-port': ['keystone', '34']},
'units': {'keystone': '1.2.3.4', 'unit-0':'10.0.0.0'}})
self.assertEquals(
ctxt(),
{'listen_ports': {'admin_port': 'keystone',
'public_port': 'keystone'},
'service_ports': {'admin-port': ['keystone', '34'],
'public-port': ['keystone', '34']},
'units': {'keystone': '1.2.3.4', 'unit-0': '10.0.0.0'}})
mock_unit_get.assert_called_with('private-address')
mock_relation_get.assert_called_with('private-address', rid='identity-service:0', unit='unit/0')
mock_relation_get.assert_called_with(
'private-address',
rid='identity-service:0',
unit='unit/0')
mock_open.assert_called_with('/etc/default/haproxy', 'w')

View File

@ -76,9 +76,10 @@ class KeystoneRelationTests(CharmTestCase):
hooks.install()
self.configure_installation_source.assert_called_with(repo)
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(['haproxy', 'unison', 'python-keystoneclient',
'uuid', 'python-mysqldb', 'openssl', 'apache2',
'pwgen', 'keystone', 'python-psycopg2'], fatal=True)
self.apt_install.assert_called_with(
['haproxy', 'unison', 'python-keystoneclient',
'uuid', 'python-mysqldb', 'openssl', 'apache2',
'pwgen', 'keystone', 'python-psycopg2'], fatal=True)
self.assertTrue(self.execd_preinstall.called)
def test_db_joined(self):
@ -157,7 +158,9 @@ class KeystoneRelationTests(CharmTestCase):
configs.write.call_args_list)
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called)
identity_changed.assert_called_with(relation_id='identity-service:0', remote_unit='unit/0')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@ -170,14 +173,18 @@ class KeystoneRelationTests(CharmTestCase):
configs.write.call_args_list)
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called)
identity_changed.assert_called_with(relation_id='identity-service:0', remote_unit='unit/0')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_no_openstack_upgrade_leader(self, configure_https, identity_changed, configs, get_homedir, ensure_user):
def test_config_changed_no_openstack_upgrade_leader(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user):
self.openstack_upgrade_available.return_value = False
self.eligible_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
@ -193,15 +200,20 @@ class KeystoneRelationTests(CharmTestCase):
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called)
self.log.assert_called_with('Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(relation_id='identity-service:0', remote_unit='unit/0')
self.log.assert_called_with(
'Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_no_openstack_upgrade_not_leader(self, configure_https, identity_changed, configs, get_homedir, ensure_user):
def test_config_changed_no_openstack_upgrade_not_leader(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user):
self.openstack_upgrade_available.return_value = False
self.eligible_leader.return_value = False
@ -222,7 +234,9 @@ class KeystoneRelationTests(CharmTestCase):
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_with_openstack_upgrade(self, configure_https, identity_changed, configs, get_homedir, ensure_user):
def test_config_changed_with_openstack_upgrade(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user):
self.openstack_upgrade_available.return_value = True
self.eligible_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
@ -240,34 +254,46 @@ class KeystoneRelationTests(CharmTestCase):
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called)
self.log.assert_called_with('Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(relation_id='identity-service:0', remote_unit='unit/0')
self.log.assert_called_with(
'Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
def test_identity_changed_leader(self):
self.eligible_leader.return_value = True
hooks.identity_changed(relation_id='identity-service:0', remote_unit='unit/0')
self.add_service_to_keystone.assert_called_with('identity-service:0', 'unit/0')
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
self.add_service_to_keystone.assert_called_with(
'identity-service:0',
'unit/0')
self.assertTrue(self.synchronize_ca.called)
def test_identity_changed_no_leader(self):
self.eligible_leader.return_value = False
hooks.identity_changed(relation_id='identity-service:0', remote_unit='unit/0')
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
self.assertFalse(self.add_service_to_keystone.called)
self.log.assert_called_with('Deferring identity_changed() to service leader.')
self.log.assert_called_with(
'Deferring identity_changed() to service leader.')
@patch.object(unison, 'ssh_authorized_peers')
def test_cluster_joined(self, ssh_authorized_peers):
hooks.cluster_joined()
ssh_authorized_peers.assert_called_with(user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
@patch.object(unison, 'ssh_authorized_peers')
@patch.object(hooks, 'CONFIGS')
def test_cluster_changed(self, configs, ssh_authorized_peers):
hooks.cluster_changed()
self.peer_echo.assert_called_with(includes=['_passwd'])
ssh_authorized_peers.assert_called_with(user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.assertTrue(configs.write_all.called)
@ -312,8 +338,9 @@ class KeystoneRelationTests(CharmTestCase):
hooks.ha_changed()
self.assertTrue(configs.write_all.called)
self.log.assert_called_with('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
self.log.assert_called_with(
'Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
self.relation_set.assert_called_with(relation_id='identity-service:0',
auth_host='10.10.10.10',
service_host='10.10.10.10')
@ -346,11 +373,13 @@ class KeystoneRelationTests(CharmTestCase):
self.filter_installed_packages.return_value = []
hooks.upgrade_charm()
self.assertTrue(self.apt_install.called)
ssh_authorized_peers.assert_called_with(user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.log.assert_called_with('Cluster leader - ensuring endpoint configuration'
' is up to date')
self.log.assert_called_with(
'Cluster leader - ensuring endpoint configuration'
' is up to date')
self.assertTrue(self.ensure_initial_admin.called)
@patch.object(unison, 'ssh_authorized_peers')
@ -359,8 +388,9 @@ class KeystoneRelationTests(CharmTestCase):
self.filter_installed_packages.return_value = []
hooks.upgrade_charm()
self.assertTrue(self.apt_install.called)
ssh_authorized_peers.assert_called_with(user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.assertFalse(self.log.called)
self.assertFalse(self.ensure_initial_admin.called)

View File

@ -1,8 +1,5 @@
from mock import patch, call, MagicMock
from test_utils import CharmTestCase
from copy import deepcopy
from collections import OrderedDict
import os
import manager
@ -10,9 +7,7 @@ os.environ['JUJU_UNIT_NAME'] = 'keystone'
with patch('charmhelpers.core.hookenv.config') as config:
import keystone_utils as utils
import keystone_context
import keystone_hooks as hooks
from charmhelpers.contrib.openstack import context
TO_PATCH = [
'api_port',
@ -49,6 +44,7 @@ TO_PATCH = [
'pwgen',
]
class TestKeystoneUtils(CharmTestCase):
def setUp(self):
@ -88,14 +84,18 @@ class TestKeystoneUtils(CharmTestCase):
ex_reg = [
call('/etc/keystone/keystone.conf', [self.ctxt]),
call('/etc/apache2/sites-available/openstack_https_frontend', [self.ctxt]),
call('/etc/apache2/sites-available/openstack_https_frontend.conf', [self.ctxt]),
call(
'/etc/apache2/sites-available/openstack_https_frontend',
[self.ctxt]),
call(
'/etc/apache2/sites-available/openstack_https_frontend.conf',
[self.ctxt]),
]
self.assertEquals(fake_renderer.register.call_args_list, ex_reg)
def test_determine_ports(self):
self.test_config.set('admin-port','80')
self.test_config.set('service-port','81')
self.test_config.set('admin-port', '80')
self.test_config.set('service-port', '81')
result = utils.determine_ports()
self.assertEquals(result, ['80', '81'])
@ -107,14 +107,15 @@ class TestKeystoneUtils(CharmTestCase):
@patch.object(hooks, 'CONFIGS')
@patch.object(utils, 'determine_packages')
@patch.object(utils, 'migrate_database')
def test_openstack_upgrade_leader(self, migrate_database, determine_packages, configs):
def test_openstack_upgrade_leader(
self, migrate_database, determine_packages, configs):
self.test_config.set('openstack-origin', 'precise')
determine_packages.return_value = []
self.eligible_leader.return_value = True
utils.do_openstack_upgrade(configs)
self.get_os_codename_install_source.assert_called_with('precise')
self.get_os_codename_install_source.assert_called_with('precise')
self.configure_installation_source.assert_called_with('precise')
self.assertTrue(self.apt_update.called)
@ -122,8 +123,14 @@ class TestKeystoneUtils(CharmTestCase):
'--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef',
]
self.apt_upgrade.assert_called_with(options=dpkg_opts, fatal=True, dist=True)
self.apt_install.assert_called_with(packages=[], options=dpkg_opts, fatal=True)
self.apt_upgrade.assert_called_with(
options=dpkg_opts,
fatal=True,
dist=True)
self.apt_install.assert_called_with(
packages=[],
options=dpkg_opts,
fatal=True)
self.assertTrue(configs.set_release.called)
self.assertTrue(configs.write_all.called)
@ -138,7 +145,8 @@ class TestKeystoneUtils(CharmTestCase):
self.service_start.assert_called_wkth('keystone')
@patch.object(utils, 'b64encode')
def test_add_service_to_keystone_clustered_https_none_values(self, b64encode):
def test_add_service_to_keystone_clustered_https_none_values(
self, b64encode):
relation_id = 'identity-service:0'
remote_unit = 'unit/0'
self.is_clustered.return_value = True
@ -148,7 +156,7 @@ class TestKeystoneUtils(CharmTestCase):
self.test_config.set('admin-port', 80)
self.test_config.set('service-port', 81)
b64encode.return_value = 'certificate'
self.get_requested_roles.return_value = ['role1',]
self.get_requested_roles.return_value = ['role1', ]
self.relation_get.return_value = {'service': 'keystone',
'region': 'RegionOne',
@ -156,7 +164,9 @@ class TestKeystoneUtils(CharmTestCase):
'admin_url': '10.0.0.2',
'internal_url': '192.168.1.2'}
utils.add_service_to_keystone(relation_id=relation_id, remote_unit=remote_unit)
utils.add_service_to_keystone(
relation_id=relation_id,
remote_unit=remote_unit)
self.assertTrue(self.is_clustered.called)
self.assertTrue(self.https.called)
self.assertTrue(self.create_role.called)
@ -169,19 +179,22 @@ class TestKeystoneUtils(CharmTestCase):
'service_port': 81,
'https_keystone': 'True',
'ca_cert': 'certificate'}
self.relation_set.assert_called_with(relation_id=relation_id, **relation_data)
self.relation_set.assert_called_with(
relation_id=relation_id,
**relation_data)
@patch.object(utils, 'ensure_valid_service')
@patch.object(utils, 'add_endpoint')
@patch.object(manager, 'KeystoneManager')
def test_add_service_to_keystone_no_clustered_no_https_complete_values(self, KeystoneManager, add_endpoint, ensure_valid_service):
def test_add_service_to_keystone_no_clustered_no_https_complete_values(
self, KeystoneManager, add_endpoint, ensure_valid_service):
relation_id = 'identity-service:0'
remote_unit = 'unit/0'
self.get_admin_token.return_value = 'token'
self.get_service_password.return_value = 'password'
self.test_config.set('service-tenant', 'tenant')
self.test_config.set('admin-role', 'admin')
self.get_requested_roles.return_value = ['role1',]
self.get_requested_roles.return_value = ['role1', ]
self.unit_private_ip.return_value = '10.0.0.3'
self.test_config.set('admin-port', 80)
self.test_config.set('service-port', 81)
@ -200,10 +213,13 @@ class TestKeystoneUtils(CharmTestCase):
'admin_url': '10.0.0.2',
'internal_url': '192.168.1.2'}
utils.add_service_to_keystone(relation_id=relation_id, remote_unit=remote_unit)
utils.add_service_to_keystone(
relation_id=relation_id,
remote_unit=remote_unit)
ensure_valid_service.assert_called_with('keystone')
add_endpoint.assert_called_with(region='RegionOne', service='keystone',
publicurl='10.0.0.1', adminurl='10.0.0.2',
publicurl='10.0.0.1',
adminurl='10.0.0.2',
internalurl='192.168.1.2')
self.assertTrue(self.get_admin_token.called)
self.get_service_password.assert_called_with('keystone')
@ -212,19 +228,25 @@ class TestKeystoneUtils(CharmTestCase):
self.create_role.assert_called_with('role1', 'keystone', 'tenant')
self.assertTrue(self.is_clustered.called)
relation_data = {'admin_token': 'token', 'service_port':81,
'auth_port':80, 'service_username':'keystone',
'service_password': 'password', 'service_tenant': 'tenant',
'https_keystone': 'False', 'ssl_cert': '', 'ssl_key': '',
'ca_cert': '', 'auth_host':'10.0.0.3', 'service_host': '10.0.0.3',
relation_data = {'admin_token': 'token', 'service_port': 81,
'auth_port': 80, 'service_username': 'keystone',
'service_password': 'password',
'service_tenant': 'tenant',
'https_keystone': 'False',
'ssl_cert': '', 'ssl_key': '',
'ca_cert': '', 'auth_host': '10.0.0.3',
'service_host': '10.0.0.3',
'auth_protocol': 'http', 'service_protocol': 'http',
'service_tenant_id': 'tenant_id'}
self.relation_set.assert_called_with(relation_id=relation_id, **relation_data)
self.relation_set.assert_called_with(
relation_id=relation_id,
**relation_data)
@patch.object(utils, 'ensure_valid_service')
@patch.object(utils, 'add_endpoint')
@patch.object(manager, 'KeystoneManager')
def test_add_service_to_keystone_nosubset(self, KeystoneManager, add_endpoint, ensure_valid_service):
def test_add_service_to_keystone_nosubset(
self, KeystoneManager, add_endpoint, ensure_valid_service):
relation_id = 'identity-service:0'
remote_unit = 'unit/0'
@ -236,11 +258,14 @@ class TestKeystoneUtils(CharmTestCase):
self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/'
KeystoneManager.resolve_tenant_id.return_value = 'tenant_id'
utils.add_service_to_keystone(relation_id=relation_id, remote_unit=remote_unit)
utils.add_service_to_keystone(
relation_id=relation_id,
remote_unit=remote_unit)
ensure_valid_service.assert_called_with('nova')
add_endpoint.assert_called_with(region='RegionOne', service='nova',
publicurl='10.0.0.1', adminurl='10.0.0.2',
internalurl='192.168.1.2')
publicurl='10.0.0.1',
adminurl='10.0.0.2',
internalurl='192.168.1.2')
def test_ensure_valid_service_incorrect(self):
utils.ensure_valid_service('fakeservice')
@ -251,8 +276,17 @@ class TestKeystoneUtils(CharmTestCase):
publicurl = '10.0.0.1'
adminurl = '10.0.0.2'
internalurl = '10.0.0.3'
utils.add_endpoint('RegionOne', 'nova', publicurl, adminurl, internalurl)
self.create_service_entry.assert_called_with('nova', 'compute', 'Nova Compute Service')
self.create_endpoint_template.asssert_called_with(region='RegionOne', service='nova',
publicurl=publicurl, adminurl=adminurl,
internalurl=internalurl)
utils.add_endpoint(
'RegionOne',
'nova',
publicurl,
adminurl,
internalurl)
self.create_service_entry.assert_called_with(
'nova',
'compute',
'Nova Compute Service')
self.create_endpoint_template.asssert_called_with(
region='RegionOne', service='nova',
publicurl=publicurl, adminurl=adminurl,
internalurl=internalurl)