[james-page,ivoks,hazmat,yolanda.robla,r=james-page,t=*]

Redux to used charm helpers
Support for Icehouse on 12.04 and 14.04
Support for Active/Active and SSL RabbitMQ
Support for SSL MySQL
Support for SSL endpoints
Support for PostgreSQL
This commit is contained in:
James Page 2014-04-16 09:20:08 +01:00
commit 06faa77761
62 changed files with 5516 additions and 1871 deletions

6
.coveragerc Normal file
View File

@ -0,0 +1,6 @@
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
if __name__ == .__main__.:
include=
hooks/keystone_*

17
.project Normal file
View File

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>keystone</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

8
.pydevproject Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?><pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/keystone/hooks</path>
</pydev_pathproperty>
</pydev_project>

13
Makefile Normal file
View File

@ -0,0 +1,13 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks unit_tests
@charm proof
test:
@echo Starting tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
sync:
@charm-helper-sync -c charm-helpers.yaml

View File

@ -2,4 +2,12 @@ branch: lp:charm-helpers
destination: hooks/charmhelpers
include:
- core
- fetch
- contrib.openstack|inc=*
- contrib.storage
- contrib.hahelpers:
- apache
- cluster
- contrib.unison
- payload.execd
- contrib.peerstorage

View File

@ -122,3 +122,22 @@ options:
default: "False"
type: string
description: "Manage SSL certificates for all service endpoints."
use-https:
default: "no"
type: string
description: "Use SSL for Keystone itself. Set to 'yes' to enable it."
ssl_cert:
type: string
description: |
SSL certificate to install and use for API ports. Setting this value
and ssl_key will enable reverse proxying, point Keystone's entry in the
Keystone catalog to use https, and override any certficiate and key
issued by Keystone (if it is configured to do so).
ssl_key:
type: string
description: SSL key to use with certificate specified as ssl_cert.
ssl_ca:
type: string
description: |
SSL CA to use with the certificate and key provided - this is only
required if you are providing a privately signed ssl_cert and ssl_key.

View File

@ -0,0 +1,59 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = config_get('ssl_ca')
if ca_cert is None:
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if ca_cert is None:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -1,24 +1,31 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from lib.utils import (
juju_log,
relation_ids,
relation_list,
relation_get,
get_unit_hostname,
config_get
)
import subprocess
import os
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
ERROR,
unit_get,
)
class HAIncompleteConfig(Exception):
pass
def is_clustered():
for r_id in (relation_ids('ha') or []):
@ -35,7 +42,7 @@ def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
@ -67,12 +74,12 @@ def oldest_peer(peers):
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
juju_log('INFO', 'Deferring action to CRM leader.')
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
juju_log('INFO', 'Deferring action to oldest service unit.')
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
@ -90,10 +97,14 @@ def https():
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if (relation_get('https_keystone', rid=r_id, unit=unit) and
relation_get('ssl_cert', rid=r_id, unit=unit) and
relation_get('ssl_key', rid=r_id, unit=unit) and
relation_get('ca_cert', rid=r_id, unit=unit)):
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
if (None not in rel_state) and ('' not in rel_state):
return True
return False
@ -115,16 +126,58 @@ def determine_api_port(public_port):
return public_port - (i * 10)
def determine_haproxy_port(public_port):
def determine_apache_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
if len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)
def get_hacluster_config():
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
conf = {}
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
return conf
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)

View File

@ -0,0 +1,17 @@
''' Helper for managing alternatives for file conflict resolution '''
import subprocess
import shutil
import os
def install_alternative(name, target, source, priority=50):
''' Install alternative configuration '''
if (os.path.exists(target) and not os.path.islink(target)):
# Move existing file/directory away before installing
shutil.move(target, '{}.bak'.format(target))
cmd = [
'update-alternatives', '--force', '--install',
target, name, source, str(priority)
]
subprocess.check_call(cmd)

View File

@ -0,0 +1,700 @@
import json
import os
import time
from base64 import b64decode
from subprocess import (
check_call
)
from charmhelpers.fetch import (
apt_install,
filter_installed_packages,
)
from charmhelpers.core.hookenv import (
config,
local_unit,
log,
relation_get,
relation_ids,
related_units,
unit_get,
unit_private_ip,
ERROR,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
https,
is_clustered
)
from charmhelpers.contrib.hahelpers.apache import (
get_cert,
get_ca_cert,
)
from charmhelpers.contrib.openstack.neutron import (
neutron_plugin_attribute,
)
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
class OSContextError(Exception):
pass
def ensure_packages(packages):
'''Install but do not upgrade required plugin packages'''
required = filter_installed_packages(packages)
if required:
apt_install(required, fatal=True)
def context_complete(ctxt):
_missing = []
for k, v in ctxt.iteritems():
if v is None or v == '':
_missing.append(k)
if _missing:
log('Missing required data: %s' % ' '.join(_missing), level='INFO')
return False
return True
def config_flags_parser(config_flags):
if config_flags.find('==') >= 0:
log("config_flags is not in expected format (key=value)",
level=ERROR)
raise OSContextError
# strip the following from each value.
post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then
# split on '='.
split = config_flags.strip(' =').split('=')
limit = len(split)
flags = {}
for i in xrange(0, limit - 1):
current = split[i]
next = split[i + 1]
vindex = next.rfind(',')
if (i == limit - 2) or (vindex < 0):
value = next
else:
value = next[:vindex]
if i == 0:
key = current
else:
# if this not the first entry, expect an embedded key.
index = current.rfind(',')
if index < 0:
log("invalid config value(s) at index %s" % (i),
level=ERROR)
raise OSContextError
key = current[index + 1:]
# Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags
class OSContextGenerator(object):
interfaces = []
def __call__(self):
raise NotImplementedError
class SharedDBContext(OSContextGenerator):
interfaces = ['shared-db']
def __init__(self,
database=None, user=None, relation_prefix=None, ssl_dir=None):
'''
Allows inspecting relation for settings prefixed with relation_prefix.
This is useful for parsing access for multiple databases returned via
the shared-db interface (eg, nova_password, quantum_password)
'''
self.relation_prefix = relation_prefix
self.database = database
self.user = user
self.ssl_dir = ssl_dir
def __call__(self):
self.database = self.database or config('database')
self.user = self.user or config('database-user')
if None in [self.database, self.user]:
log('Could not generate shared_db context. '
'Missing required charm config options. '
'(database name and user)')
raise OSContextError
ctxt = {}
password_setting = 'password'
if self.relation_prefix:
password_setting = self.relation_prefix + '_password'
for rid in relation_ids('shared-db'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
ctxt = {
'database_host': rdata.get('db_host'),
'database': self.database,
'database_user': self.user,
'database_password': rdata.get(password_setting),
'database_type': 'mysql'
}
if context_complete(ctxt):
db_ssl(rdata, ctxt, self.ssl_dir)
return ctxt
return {}
class PostgresqlDBContext(OSContextGenerator):
interfaces = ['pgsql-db']
def __init__(self, database=None):
self.database = database
def __call__(self):
self.database = self.database or config('database')
if self.database is None:
log('Could not generate postgresql_db context. '
'Missing required charm config options. '
'(database name)')
raise OSContextError
ctxt = {}
for rid in relation_ids(self.interfaces[0]):
for unit in related_units(rid):
ctxt = {
'database_host': relation_get('host', rid=rid, unit=unit),
'database': self.database,
'database_user': relation_get('user', rid=rid, unit=unit),
'database_password': relation_get('password', rid=rid, unit=unit),
'database_type': 'postgresql',
}
if context_complete(ctxt):
return ctxt
return {}
def db_ssl(rdata, ctxt, ssl_dir):
if 'ssl_ca' in rdata and ssl_dir:
ca_path = os.path.join(ssl_dir, 'db-client.ca')
with open(ca_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_ca']))
ctxt['database_ssl_ca'] = ca_path
elif 'ssl_ca' in rdata:
log("Charm not setup for ssl support but ssl ca found")
return ctxt
if 'ssl_cert' in rdata:
cert_path = os.path.join(
ssl_dir, 'db-client.cert')
if not os.path.exists(cert_path):
log("Waiting 1m for ssl client cert validity")
time.sleep(60)
with open(cert_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_cert']))
ctxt['database_ssl_cert'] = cert_path
key_path = os.path.join(ssl_dir, 'db-client.key')
with open(key_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_key']))
ctxt['database_ssl_key'] = key_path
return ctxt
class IdentityServiceContext(OSContextGenerator):
interfaces = ['identity-service']
def __call__(self):
log('Generating template context for identity-service')
ctxt = {}
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
ctxt = {
'service_port': rdata.get('service_port'),
'service_host': rdata.get('service_host'),
'auth_host': rdata.get('auth_host'),
'auth_port': rdata.get('auth_port'),
'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'service_protocol':
rdata.get('service_protocol') or 'http',
'auth_protocol':
rdata.get('auth_protocol') or 'http',
}
if context_complete(ctxt):
# NOTE(jamespage) this is required for >= icehouse
# so a missing value just indicates keystone needs
# upgrading
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
return ctxt
return {}
class AMQPContext(OSContextGenerator):
interfaces = ['amqp']
def __init__(self, ssl_dir=None):
self.ssl_dir = ssl_dir
def __call__(self):
log('Generating template context for amqp')
conf = config()
try:
username = conf['rabbit-user']
vhost = conf['rabbit-vhost']
except KeyError as e:
log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e)
raise OSContextError
ctxt = {}
for rid in relation_ids('amqp'):
ha_vip_only = False
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
ctxt['clustered'] = True
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
unit=unit)
else:
ctxt['rabbitmq_host'] = relation_get('private-address',
rid=rid, unit=unit)
ctxt.update({
'rabbitmq_user': username,
'rabbitmq_password': relation_get('password', rid=rid,
unit=unit),
'rabbitmq_virtual_host': vhost,
})
ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
if ssl_port:
ctxt['rabbit_ssl_port'] = ssl_port
ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
if ssl_ca:
ctxt['rabbit_ssl_ca'] = ssl_ca
if relation_get('ha_queues', rid=rid, unit=unit) is not None:
ctxt['rabbitmq_ha_queues'] = True
ha_vip_only = relation_get('ha-vip-only',
rid=rid, unit=unit) is not None
if context_complete(ctxt):
if 'rabbit_ssl_ca' in ctxt:
if not self.ssl_dir:
log(("Charm not setup for ssl support "
"but ssl ca found"))
break
ca_path = os.path.join(
self.ssl_dir, 'rabbit-client-ca.pem')
with open(ca_path, 'w') as fh:
fh.write(b64decode(ctxt['rabbit_ssl_ca']))
ctxt['rabbit_ssl_ca'] = ca_path
# Sufficient information found = break out!
break
# Used for active/active rabbitmq >= grizzly
if ('clustered' not in ctxt or ha_vip_only) \
and len(related_units(rid)) > 1:
rabbitmq_hosts = []
for unit in related_units(rid):
rabbitmq_hosts.append(relation_get('private-address',
rid=rid, unit=unit))
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
if not context_complete(ctxt):
return {}
else:
return ctxt
class CephContext(OSContextGenerator):
interfaces = ['ceph']
def __call__(self):
'''This generates context for /etc/ceph/ceph.conf templates'''
if not relation_ids('ceph'):
return {}
log('Generating template context for ceph')
mon_hosts = []
auth = None
key = None
use_syslog = str(config('use-syslog')).lower()
for rid in relation_ids('ceph'):
for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit)
ctxt = {
'mon_hosts': ' '.join(mon_hosts),
'auth': auth,
'key': key,
'use_syslog': use_syslog
}
if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph')
if not context_complete(ctxt):
return {}
ensure_packages(['ceph-common'])
return ctxt
class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
def __call__(self):
'''
Builds half a context for the haproxy template, which describes
all peers to be included in the cluster. Each charm needs to include
its own context generator that describes the port mapping.
'''
if not relation_ids('cluster'):
return {}
cluster_hosts = {}
l_unit = local_unit().replace('/', '-')
cluster_hosts[l_unit] = unit_get('private-address')
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
addr = relation_get('private-address', rid=rid, unit=unit)
cluster_hosts[_unit] = addr
ctxt = {
'units': cluster_hosts,
}
if len(cluster_hosts.keys()) > 1:
# Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.')
with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n')
return ctxt
log('HAProxy context is incomplete, this unit has no peers.')
return {}
class ImageServiceContext(OSContextGenerator):
interfaces = ['image-service']
def __call__(self):
'''
Obtains the glance API server from the image-service relation. Useful
in nova and cinder (currently).
'''
log('Generating template context for image-service.')
rids = relation_ids('image-service')
if not rids:
return {}
for rid in rids:
for unit in related_units(rid):
api_server = relation_get('glance-api-server',
rid=rid, unit=unit)
if api_server:
return {'glance_api_servers': api_server}
log('ImageService context is incomplete. '
'Missing required relation data.')
return {}
class ApacheSSLContext(OSContextGenerator):
"""
Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context
looks something like:
{
'namespace': 'cinder',
'private_address': 'iscsi.mycinderhost.com',
'endpoints': [(8776, 8766), (8777, 8767)]
}
The endpoints list consists of a tuples mapping external ports
to internal ports.
"""
interfaces = ['https']
# charms should inherit this context and set external ports
# and service namespace accordingly.
external_ports = []
service_namespace = None
def enable_modules(self):
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
check_call(cmd)
def configure_cert(self):
if not os.path.isdir('/etc/apache2/ssl'):
os.mkdir('/etc/apache2/ssl')
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
if not os.path.isdir(ssl_dir):
os.mkdir(ssl_dir)
cert, key = get_cert()
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
cert_out.write(b64decode(cert))
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
key_out.write(b64decode(key))
ca_cert = get_ca_cert()
if ca_cert:
with open(CA_CERT_PATH, 'w') as ca_out:
ca_out.write(b64decode(ca_cert))
check_call(['update-ca-certificates'])
def __call__(self):
if isinstance(self.external_ports, basestring):
self.external_ports = [self.external_ports]
if (not self.external_ports or not https()):
return {}
self.configure_cert()
self.enable_modules()
ctxt = {
'namespace': self.service_namespace,
'private_address': unit_get('private-address'),
'endpoints': []
}
if is_clustered():
ctxt['private_address'] = config('vip')
for api_port in self.external_ports:
ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port)
portmap = (int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap)
return ctxt
class NeutronContext(OSContextGenerator):
interfaces = []
@property
def plugin(self):
return None
@property
def network_manager(self):
return None
@property
def packages(self):
return neutron_plugin_attribute(
self.plugin, 'packages', self.network_manager)
@property
def neutron_security_groups(self):
return None
def _ensure_packages(self):
[ensure_packages(pkgs) for pkgs in self.packages]
def _save_flag_file(self):
if self.network_manager == 'quantum':
_file = '/etc/nova/quantum_plugin.conf'
else:
_file = '/etc/nova/neutron_plugin.conf'
with open(_file, 'wb') as out:
out.write(self.plugin + '\n')
def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'ovs',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return ovs_ctxt
def nvp_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
nvp_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'nvp',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return nvp_ctxt
def neutron_ctxt(self):
if https():
proto = 'https'
else:
proto = 'http'
if is_clustered():
host = config('vip')
else:
host = unit_get('private-address')
url = '%s://%s:%s' % (proto, host, '9696')
ctxt = {
'network_manager': self.network_manager,
'neutron_url': url,
}
return ctxt
def __call__(self):
self._ensure_packages()
if self.network_manager not in ['quantum', 'neutron']:
return {}
if not self.plugin:
return {}
ctxt = self.neutron_ctxt()
if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt())
elif self.plugin == 'nvp':
ctxt.update(self.nvp_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
flags = config_flags_parser(alchemy_flags)
ctxt['neutron_alchemy_flags'] = flags
self._save_flag_file()
return ctxt
class OSConfigFlagContext(OSContextGenerator):
"""
Responsible for adding user-defined config-flags in charm config to a
template context.
NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support
comma-separated lists as values.
"""
def __call__(self):
config_flags = config('config-flags')
if not config_flags:
return {}
flags = config_flags_parser(config_flags)
return {'user_config_flags': flags}
class SubordinateConfigContext(OSContextGenerator):
"""
Responsible for inspecting relations to subordinates that
may be exporting required config via a json blob.
The subordinate interface allows subordinates to export their
configuration requirements to the principle for multiple config
files and multiple serivces. Ie, a subordinate that has interfaces
to both glance and nova may export to following yaml blob as json:
glance:
/etc/glance/glance-api.conf:
sections:
DEFAULT:
- [key1, value1]
/etc/glance/glance-registry.conf:
MYSECTION:
- [key2, value2]
nova:
/etc/nova/nova.conf:
sections:
DEFAULT:
- [key3, value3]
It is then up to the principle charms to subscribe this context to
the service+config file it is interestd in. Configuration data will
be available in the template context, in glance's case, as:
ctxt = {
... other context ...
'subordinate_config': {
'DEFAULT': {
'key1': 'value1',
},
'MYSECTION': {
'key2': 'value2',
},
}
}
"""
def __init__(self, service, config_file, interface):
"""
:param service : Service name key to query in any subordinate
data found
:param config_file : Service's config file to query sections
:param interface : Subordinate interface to inspect
"""
self.service = service
self.config_file = config_file
self.interface = interface
def __call__(self):
ctxt = {}
for rid in relation_ids(self.interface):
for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration',
rid=rid, unit=unit)
if sub_config and sub_config != '':
try:
sub_config = json.loads(sub_config)
except:
log('Could not parse JSON from subordinate_config '
'setting from %s' % rid, level=ERROR)
continue
if self.service not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s service' % (rid, self.service))
continue
sub_config = sub_config[self.service]
if self.config_file not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s' % (rid, self.config_file))
continue
sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems():
ctxt[k] = v
if not ctxt:
ctxt['sections'] = {}
return ctxt
class SyslogContext(OSContextGenerator):
def __call__(self):
ctxt = {
'use_syslog': config('use-syslog')
}
return ctxt

View File

@ -0,0 +1,171 @@
# Various utilies for dealing with Neutron and the renaming from Quantum.
from subprocess import check_output
from charmhelpers.core.hookenv import (
config,
log,
ERROR,
)
from charmhelpers.contrib.openstack.utils import os_release
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
def determine_dkms_package():
""" Determine which DKMS package should be used based on kernel version """
# NOTE: 3.13 kernels have support for GRE and VXLAN native
if kernel_version() >= (3, 13):
return []
else:
return ['openvswitch-datapath-dkms']
# legacy
def quantum_plugins():
from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
},
'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
}
}
NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
'config': '/etc/neutron/plugins/openvswitch/'
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
},
'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
}
}
# NOTE: patch in ml2 plugin for icehouse onwards
if release >= 'icehouse':
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
return plugins
def neutron_plugin_attribute(plugin, attr, net_manager=None):
manager = net_manager or network_manager()
if manager == 'quantum':
plugins = quantum_plugins()
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log('Error: Network manager does not support plugins.')
raise Exception
try:
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise Exception
try:
return _plugin[attr]
except KeyError:
return None
def network_manager():
'''
Deals with the renaming of Quantum to Neutron in H and any situations
that require compatability (eg, deploying H with network-manager=quantum,
upgrading from G).
'''
release = os_release('nova-common')
manager = config('network-manager').lower()
if manager not in ['quantum', 'neutron']:
return manager
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'
else:
# ensure accurate naming for all releases post-H
return 'neutron'

View File

@ -0,0 +1,2 @@
# dummy __init__.py to fool syncer into thinking this is a syncable python
# module

View File

@ -0,0 +1,15 @@
###############################################################################
# [ WARNING ]
# cinder configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
[global]
{% if auth -%}
auth_supported = {{ auth }}
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
{% endif -%}
log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}

View File

@ -0,0 +1,36 @@
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 20000
user haproxy
group haproxy
spread-checks 0
defaults
log global
mode tcp
option tcplog
option dontlognull
retries 3
timeout queue 1000
timeout connect 1000
timeout client 30000
timeout server 30000
listen stats :8888
mode http
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /
stats auth admin:password
{% if units -%}
{% for service, ports in service_ports.iteritems() -%}
listen {{ service }} 0.0.0.0:{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
{% endfor -%}
{% endif -%}

View File

@ -0,0 +1,23 @@
{% if endpoints -%}
{% for ext, int in endpoints -%}
Listen {{ ext }}
NameVirtualHost *:{{ ext }}
<VirtualHost *:{{ ext }}>
ServerName {{ private_address }}
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
ProxyPass / http://localhost:{{ int }}/
ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on
</VirtualHost>
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
<Location />
Order allow,deny
Allow from all
</Location>
{% endfor -%}
{% endif -%}

View File

@ -0,0 +1,23 @@
{% if endpoints -%}
{% for ext, int in endpoints -%}
Listen {{ ext }}
NameVirtualHost *:{{ ext }}
<VirtualHost *:{{ ext }}>
ServerName {{ private_address }}
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
ProxyPass / http://localhost:{{ int }}/
ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on
</VirtualHost>
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
<Location />
Order allow,deny
Allow from all
</Location>
{% endfor -%}
{% endif -%}

View File

@ -0,0 +1,280 @@
import os
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
log,
ERROR,
INFO
)
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
except ImportError:
# python-jinja2 may not be installed yet, or we're running unittests.
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
class OSConfigException(Exception):
pass
def get_loader(templates_dir, os_release):
"""
Create a jinja2.ChoiceLoader containing template dirs up to
and including os_release. If directory template directory
is missing at templates_dir, it will be omitted from the loader.
templates_dir is added to the bottom of the search list as a base
loading dir.
A charm may also ship a templates dir with this module
and it will be appended to the bottom of the search list, eg:
hooks/charmhelpers/contrib/openstack/templates.
:param templates_dir: str: Base template directory containing release
sub-directories.
:param os_release : str: OpenStack release codename to construct template
loader.
:returns : jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
"""
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()]
if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir,
level=ERROR)
raise OSConfigException
# the bottom contains tempaltes_dir and possibly a common templates dir
# shipped with the helper.
loaders = [FileSystemLoader(templates_dir)]
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
if os.path.isdir(helper_templates):
loaders.append(FileSystemLoader(helper_templates))
for rel, tmpl_dir in tmpl_dirs:
if os.path.isdir(tmpl_dir):
loaders.insert(0, FileSystemLoader(tmpl_dir))
if rel == os_release:
break
log('Creating choice loader with dirs: %s' %
[l.searchpath for l in loaders], level=INFO)
return ChoiceLoader(loaders)
class OSConfigTemplate(object):
"""
Associates a config file template with a list of context generators.
Responsible for constructing a template context based on those generators.
"""
def __init__(self, config_file, contexts):
self.config_file = config_file
if hasattr(contexts, '__call__'):
self.contexts = [contexts]
else:
self.contexts = contexts
self._complete_contexts = []
def context(self):
ctxt = {}
for context in self.contexts:
_ctxt = context()
if _ctxt:
ctxt.update(_ctxt)
# track interfaces for every complete context.
[self._complete_contexts.append(interface)
for interface in context.interfaces
if interface not in self._complete_contexts]
return ctxt
def complete_contexts(self):
'''
Return a list of interfaces that have atisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
self.context()
return self._complete_contexts
class OSConfigRenderer(object):
"""
This class provides a common templating system to be used by OpenStack
charms. It is intended to help charms share common code and templates,
and ease the burden of managing config templates across multiple OpenStack
releases.
Basic usage:
# import some common context generates from charmhelpers
from charmhelpers.contrib.openstack import context
# Create a renderer object for a specific OS release.
configs = OSConfigRenderer(templates_dir='/tmp/templates',
openstack_release='folsom')
# register some config files with context generators.
configs.register(config_file='/etc/nova/nova.conf',
contexts=[context.SharedDBContext(),
context.AMQPContext()])
configs.register(config_file='/etc/nova/api-paste.ini',
contexts=[context.IdentityServiceContext()])
configs.register(config_file='/etc/haproxy/haproxy.conf',
contexts=[context.HAProxyContext()])
# write out a single config
configs.write('/etc/nova/nova.conf')
# write out all registered configs
configs.write_all()
Details:
OpenStack Releases and template loading
---------------------------------------
When the object is instantiated, it is associated with a specific OS
release. This dictates how the template loader will be constructed.
The constructed loader attempts to load the template from several places
in the following order:
- from the most recent OS release-specific template dir (if one exists)
- the base templates_dir
- a template directory shipped in the charm with this helper file.
For the example above, '/tmp/templates' contains the following structure:
/tmp/templates/nova.conf
/tmp/templates/api-paste.ini
/tmp/templates/grizzly/api-paste.ini
/tmp/templates/havana/api-paste.ini
Since it was registered with the grizzly release, it first seraches
the grizzly directory for nova.conf, then the templates dir.
When writing api-paste.ini, it will find the template in the grizzly
directory.
If the object were created with folsom, it would fall back to the
base templates dir for its api-paste.ini template.
This system should help manage changes in config files through
openstack releases, allowing charms to fall back to the most recently
updated config template for a given release
The haproxy.conf, since it is not shipped in the templates dir, will
be loaded from the module directory's template directory, eg
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
us to ship common templates (haproxy, apache) with the helpers.
Context generators
---------------------------------------
Context generators are used to generate template contexts during hook
execution. Doing so may require inspecting service relations, charm
config, etc. When registered, a config file is associated with a list
of generators. When a template is rendered and written, all context
generates are called in a chain to generate the context dictionary
passed to the jinja2 template. See context.py for more info.
"""
def __init__(self, templates_dir, openstack_release):
if not os.path.isdir(templates_dir):
log('Could not locate templates dir %s' % templates_dir,
level=ERROR)
raise OSConfigException
self.templates_dir = templates_dir
self.openstack_release = openstack_release
self.templates = {}
self._tmpl_env = None
if None in [Environment, ChoiceLoader, FileSystemLoader]:
# if this code is running, the object is created pre-install hook.
# jinja2 shouldn't get touched until the module is reloaded on next
# hook execution, with proper jinja2 bits successfully imported.
apt_install('python-jinja2')
def register(self, config_file, contexts):
"""
Register a config file with a list of context generators to be called
during rendering.
"""
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
contexts=contexts)
log('Registered config file: %s' % config_file, level=INFO)
def _get_tmpl_env(self):
if not self._tmpl_env:
loader = get_loader(self.templates_dir, self.openstack_release)
self._tmpl_env = Environment(loader=loader)
def _get_template(self, template):
self._get_tmpl_env()
template = self._tmpl_env.get_template(template)
log('Loaded template from %s' % template.filename, level=INFO)
return template
def render(self, config_file):
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
ctxt = self.templates[config_file].context()
_tmpl = os.path.basename(config_file)
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound:
# if no template is found with basename, try looking for it
# using a munged full path, eg:
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
_tmpl = '_'.join(config_file.split('/')[1:])
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound as e:
log('Could not load template from %s by %s or %s.' %
(self.templates_dir, os.path.basename(config_file), _tmpl),
level=ERROR)
raise e
log('Rendering from template: %s' % _tmpl, level=INFO)
return template.render(ctxt)
def write(self, config_file):
"""
Write a single config file, raises if config file is not registered.
"""
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
_out = self.render(config_file)
with open(config_file, 'wb') as out:
out.write(_out)
log('Wrote template %s.' % config_file, level=INFO)
def write_all(self):
"""
Write out all registered config files.
"""
[self.write(k) for k in self.templates.iterkeys()]
def set_release(self, openstack_release):
"""
Resets the template environment and generates a new template loader
based on a the new openstack release.
"""
self._tmpl_env = None
self.openstack_release = openstack_release
self._get_tmpl_env()
def complete_contexts(self):
'''
Returns a list of context interfaces that yield a complete context.
'''
interfaces = []
[interfaces.extend(i.complete_contexts())
for i in self.templates.itervalues()]
return interfaces

View File

@ -0,0 +1,448 @@
#!/usr/bin/python
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
import apt_pkg as apt
import subprocess
import os
import socket
import sys
from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
ERROR,
INFO
)
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse')
])
OPENSTACK_CODENAMES = OrderedDict([
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
])
# The ugly duckling
SWIFT_CODENAMES = OrderedDict([
('1.4.3', 'diablo'),
('1.4.8', 'essex'),
('1.7.4', 'folsom'),
('1.8.0', 'grizzly'),
('1.7.7', 'grizzly'),
('1.7.6', 'grizzly'),
('1.10.0', 'havana'),
('1.9.1', 'havana'),
('1.9.0', 'havana'),
('1.13.1', 'icehouse'),
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src in ['distro', 'distro-proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
e = 'Could not derive openstack release for '\
'this Ubuntu release: %s' % ubuntu_rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in OPENSTACK_CODENAMES.iteritems():
if v in src:
return v
def get_os_version_install_source(src):
codename = get_os_codename_install_source(src)
return get_os_version_codename(codename)
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in OPENSTACK_CODENAMES.iteritems():
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
apt.init()
cache = apt.Cache()
try:
pkg = cache[package]
except:
if not fatal:
return None
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation '\
'candidate: %s' % package
error_out(e)
if not pkg.current_ver:
if not fatal:
return None
# package is known, but no version is currently installed.
e = 'Could not determine version of uninstalled package: %s' % package
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
swift_vers = vers[:5]
if swift_vers not in SWIFT_CODENAMES:
# Deal with 1.10.0 upward
swift_vers = vers[:6]
return SWIFT_CODENAMES[swift_vers]
else:
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in vers_map.iteritems():
if cname == codename:
return version
#e = "Could not determine OpenStack version for package: %s" % pkg
#error_out(e)
os_rel = None
def os_release(package, base='essex'):
'''
Returns OpenStack release codename from a cached global.
If the codename can not be determined from either an installed package or
the installation source, the earliest release supported by the charm should
be returned.
'''
global os_rel
if os_rel:
return os_rel
os_rel = (get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
return os_rel
def import_key(keyid):
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
def configure_installation_source(rel):
'''Configure apt installation source.'''
if rel == 'distro':
return
elif rel == 'distro-proposed':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
import_key(key)
elif l == 1:
src = rel
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
apt_install('ubuntu-cloud-keyring', fatal=True)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]
def openstack_upgrade_available(package):
"""
Determines if an OpenStack upgrade is available from installation
source, based on version of installed package.
:param package: str: Name of installed package.
:returns: bool: : Returns True if configured installation source offers
a newer version of package.
"""
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(available_vers, cur_vers) == 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev,
level=ERROR)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
def is_ip(address):
"""
Returns True if address is a valid IP address.
"""
try:
# Test to see if already an IPv4 address
socket.inet_aton(address)
return True
except socket.error:
return False
def ns_query(address):
try:
import dns.resolver
except ImportError:
apt_install('python-dnspython')
import dns.resolver
if isinstance(address, dns.name.Name):
rtype = 'PTR'
elif isinstance(address, basestring):
rtype = 'A'
answers = dns.resolver.query(address, rtype)
if answers:
return str(answers[0])
return None
def get_host_ip(hostname):
"""
Resolves the IP for a given hostname, or returns
the input if it is already an IP.
"""
if is_ip(hostname):
return hostname
return ns_query(hostname)
def get_hostname(address, fqdn=True):
"""
Resolves hostname for given IP, or returns the input
if it is already a hostname.
"""
if is_ip(address):
try:
import dns.reversename
except ImportError:
apt_install('python-dnspython')
import dns.reversename
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
else:
result = address
if fqdn:
# strip trailing .
if result.endswith('.'):
return result[:-1]
else:
return result
else:
return result.split('.')[0]

View File

@ -0,0 +1,83 @@
from charmhelpers.core.hookenv import (
relation_ids,
relation_get,
local_unit,
relation_set,
)
"""
This helper provides functions to support use of a peer relation
for basic key/value storage, with the added benefit that all storage
can be replicated across peer units, so this is really useful for
services that issue usernames/passwords to remote services.
def shared_db_changed()
# Only the lead unit should create passwords
if not is_leader():
return
username = relation_get('username')
key = '{}.password'.format(username)
# Attempt to retrieve any existing password for this user
password = peer_retrieve(key)
if password is None:
# New user, create password and store
password = pwgen(length=64)
peer_store(key, password)
create_access(username, password)
relation_set(password=password)
def cluster_changed()
# Echo any relation data other that *-address
# back onto the peer relation so all units have
# all *.password keys stored on their local relation
# for later retrieval.
peer_echo()
"""
def peer_retrieve(key, relation_name='cluster'):
""" Retrieve a named key from peer relation relation_name """
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
return relation_get(attribute=key, rid=cluster_rid,
unit=local_unit())
else:
raise ValueError('Unable to detect'
'peer relation {}'.format(relation_name))
def peer_store(key, value, relation_name='cluster'):
""" Store the key/value pair on the named peer relation relation_name """
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid,
relation_settings={key: value})
else:
raise ValueError('Unable to detect '
'peer relation {}'.format(relation_name))
def peer_echo(includes=None):
"""Echo filtered attributes back onto the same relation for storage
Note that this helper must only be called within a peer relation
changed hook
"""
rdata = relation_get()
echo_data = {}
if includes is None:
echo_data = rdata.copy()
for ex in ['private-address', 'public-address']:
if ex in echo_data:
echo_data.pop(ex)
else:
for attribute, value in rdata.iteritems():
for include in includes:
if include in attribute:
echo_data[attribute] = value
if len(echo_data) > 0:
relation_set(relation_settings=echo_data)

View File

@ -0,0 +1,387 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import os
import shutil
import json
import time
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
INFO,
WARNING,
ERROR
)
from charmhelpers.core.host import (
mount,
mounts,
service_start,
service_stop,
service_running,
umount,
)
from charmhelpers.fetch import (
apt_install,
)
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
CEPH_CONF = """[global]
auth supported = {auth}
keyring = {keyring}
mon host = {mon_hosts}
log to syslog = {use_syslog}
err to syslog = {use_syslog}
clog to syslog = {use_syslog}
"""
def install():
''' Basic Ceph client installation '''
ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
''' Check to see if a RADOS block device exists '''
try:
out = check_output(['rbd', 'list', '--id', service,
'--pool', pool])
except CalledProcessError:
return False
else:
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
''' Create a new RADOS block device '''
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd)
def pool_exists(service, name):
''' Check to see if a RADOS pool already exists '''
try:
out = check_output(['rados', '--id', service, 'lspools'])
except CalledProcessError:
return False
else:
return name in out
def get_osds(service):
'''
Return a list of all Ceph Object Storage Daemons
currently in the cluster
'''
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json']))
else:
return None
def create_pool(service, name, replicas=2):
''' Create a new RADOS pool '''
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 / replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'create',
name, str(pgnum)
]
check_call(cmd)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'set', name,
'size', str(replicas)
]
check_call(cmd)
def delete_pool(service, name):
''' Delete a RADOS pool from ceph '''
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'delete',
name, '--yes-i-really-really-mean-it'
]
check_call(cmd)
def _keyfile_path(service):
return KEYFILE.format(service)
def _keyring_path(service):
return KEYRING.format(service)
def create_keyring(service, key):
''' Create a new Ceph keyring containing key'''
keyring = _keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
return
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.{}'.format(service),
'--add-key={}'.format(key)
]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
''' Create a file containing key '''
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
''' Query named relation 'ceph' to detemine current nodes '''
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth, use_syslog):
''' Perform basic configuration of Ceph '''
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF.format(auth=auth,
keyring=_keyring_path(service),
mon_hosts=",".join(map(str, hosts)),
use_syslog=use_syslog))
modprobe('rbd')
def image_mapped(name):
''' Determine whether a RADOS block device is mapped locally '''
try:
out = check_output(['rbd', 'showmapped'])
except CalledProcessError:
return False
else:
return name in out
def map_block_storage(service, pool, image):
''' Map a RADOS block device for local use '''
cmd = [
'rbd',
'map',
'{}/{}'.format(pool, image),
'--user',
service,
'--secret',
_keyfile_path(service),
]
check_call(cmd)
def filesystem_mounted(fs):
''' Determine whether a filesytems is already mounted '''
return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
''' Make a new filesystem on the specified block device '''
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst):
''' Migrate data in data_src_dst to blk_device and then remount '''
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
copy_files(data_src_dst, '/mnt')
# umount block device
umount('/mnt')
# Grab user/group ID's from original source
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
# TODO: persist is currently a NO-OP in core.host
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
os.chown(data_src_dst, uid, gid)
# TODO: re-use
def modprobe(module):
''' Load a kernel module and configure for auto-load on reboot '''
log('ceph: Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def copy_files(src, dst, symlinks=False, ignore=None):
''' Copy files from src to dst '''
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being re-mounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool))
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img))
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if service_running(svc):
log('ceph: Stopping services {} prior to migrating data.'
.format(svc))
service_stop(svc)
place_data_on_block_device(blk_device, mount_point)
for svc in system_services:
log('ceph: Starting service {} after migrating data.'
.format(svc))
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
'''
Ensures a ceph keyring is created for a named service
and optionally ensures user and group ownership.
Returns False if no ceph key is available in relation state.
'''
key = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False
create_keyring(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
return True
def ceph_version():
''' Retrieve the local version of ceph '''
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None

View File

@ -0,0 +1,62 @@
import os
import re
from subprocess import (
check_call,
check_output,
)
##################################################
# loopback device helpers.
##################################################
def loopback_devices():
'''
Parse through 'losetup -a' output to determine currently mapped
loopback devices. Output is expected to look like:
/dev/loop0: [0807]:961814 (/tmp/my.img)
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
return loopbacks
def create_loopback(file_path):
'''
Create a loopback device for a given backing file.
:returns: str: Full path to new loopback device (eg, /dev/loop0)
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems():
if f == file_path:
return d
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in loopback_devices().iteritems():
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path)

View File

@ -0,0 +1,88 @@
from subprocess import (
CalledProcessError,
check_call,
check_output,
Popen,
PIPE,
)
##################################################
# LVM helpers.
##################################################
def deactivate_lvm_volume_group(block_device):
'''
Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume
'''
vg = list_lvm_volume_group(block_device)
if vg:
cmd = ['vgchange', '-an', vg]
check_call(cmd)
def is_lvm_physical_volume(block_device):
'''
Determine whether a block device is initialized as an LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: boolean: True if block device is a PV, False if not.
'''
try:
check_output(['pvdisplay', block_device])
return True
except CalledProcessError:
return False
def remove_lvm_physical_volume(block_device):
'''
Remove LVM PV signatures from a given block device.
:param block_device: str: Full path of block device to scrub.
'''
p = Popen(['pvremove', '-ff', block_device],
stdin=PIPE)
p.communicate(input='y\n')
def list_lvm_volume_group(block_device):
'''
List LVM volume group associated with a given block device.
Assumes block device is a valid LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: str: Name of volume group associated with block device or None
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
if l.strip().startswith('VG Name'):
vg = ' '.join(l.split()).split(' ').pop()
return vg
def create_lvm_physical_volume(block_device):
'''
Initialize a block device as an LVM physical volume.
:param block_device: str: Full path of block device to initialize.
'''
check_call(['pvcreate', block_device])
def create_lvm_volume_group(volume_group, block_device):
'''
Create an LVM volume group backed by a given block device.
Assumes block device has already been initialized as an LVM PV.
:param volume_group: str: Name of volume group to create.
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])

View File

@ -0,0 +1,26 @@
from os import stat
from stat import S_ISBLK
from subprocess import (
check_call
)
def is_block_device(path):
'''
Confirm device at path is a valid block device node.
:returns: boolean: True if path is a block device, False if not.
'''
return S_ISBLK(stat(path).st_mode)
def zap_disk(block_device):
'''
Clear a block device of partition table. Relies on sgdisk, which is
installed as pat of the 'gdisk' package in Ubuntu.
:param block_device: str: Full path of block device to clean.
'''
check_call(['sgdisk', '--zap-all', '--clear',
'--mbrtogpt', block_device])

View File

@ -0,0 +1,257 @@
# Easy file synchronization among peer units using ssh + unison.
#
# From *both* peer relation -joined and -changed, add a call to
# ssh_authorized_peers() describing the peer relation and the desired
# user + group. After all peer relations have settled, all hosts should
# be able to connect to on another via key auth'd ssh as the specified user.
#
# Other hooks are then free to synchronize files and directories using
# sync_to_peers().
#
# For a peer relation named 'cluster', for example:
#
# cluster-relation-joined:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# cluster-relation-changed:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# Hooks are now free to sync files as easily as:
#
# files = ['/etc/fstab', '/etc/apt.conf.d/']
# sync_to_peers(peer_interface='cluster',
# user='juju_ssh, paths=[files])
#
# It is assumed the charm itself has setup permissions on each unit
# such that 'juju_ssh' has read + write permissions. Also assumed
# that the calling charm takes care of leader delegation.
#
# Additionally files can be synchronized only to an specific unit:
# sync_to_peer(slave_address, user='juju_ssh',
# paths=[files], verbose=False)
import os
import pwd
from copy import copy
from subprocess import check_call, check_output
from charmhelpers.core.host import (
adduser,
add_user_to_group,
)
from charmhelpers.core.hookenv import (
log,
hook_name,
relation_ids,
related_units,
relation_set,
relation_get,
unit_private_ip,
ERROR,
)
BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
'-fastcheck=true', '-group=false', '-owner=false',
'-prefer=newer', '-times=true']
def get_homedir(user):
try:
user = pwd.getpwnam(user)
return user.pw_dir
except KeyError:
log('Could not get homedir for user %s: user exists?', ERROR)
raise Exception
def create_private_key(user, priv_key_path):
if not os.path.isfile(priv_key_path):
log('Generating new SSH key for user %s.' % user)
cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
'-f', priv_key_path]
check_call(cmd)
else:
log('SSH key already exists at %s.' % priv_key_path)
check_call(['chown', user, priv_key_path])
check_call(['chmod', '0600', priv_key_path])
def create_public_key(user, priv_key_path, pub_key_path):
if not os.path.isfile(pub_key_path):
log('Generating missing ssh public key @ %s.' % pub_key_path)
cmd = ['ssh-keygen', '-y', '-f', priv_key_path]
p = check_output(cmd).strip()
with open(pub_key_path, 'wb') as out:
out.write(p)
check_call(['chown', user, pub_key_path])
def get_keypair(user):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
priv_key = os.path.join(ssh_dir, 'id_rsa')
pub_key = '%s.pub' % priv_key
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir)
check_call(['chown', '-R', user, ssh_dir])
create_private_key(user, priv_key)
create_public_key(user, priv_key, pub_key)
with open(priv_key, 'r') as p:
_priv = p.read().strip()
with open(pub_key, 'r') as p:
_pub = p.read().strip()
return (_priv, _pub)
def write_authorized_keys(user, keys):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
log('Syncing authorized_keys @ %s.' % auth_keys)
with open(auth_keys, 'wb') as out:
for k in keys:
out.write('%s\n' % k)
def write_known_hosts(user, hosts):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
known_hosts = os.path.join(ssh_dir, 'known_hosts')
khosts = []
for host in hosts:
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
remote_key = check_output(cmd).strip()
khosts.append(remote_key)
log('Syncing known_hosts @ %s.' % known_hosts)
with open(known_hosts, 'wb') as out:
for host in khosts:
out.write('%s\n' % host)
def ensure_user(user, group=None):
adduser(user)
if group:
add_user_to_group(user, group)
def ssh_authorized_peers(peer_interface, user, group=None,
ensure_local_user=False):
"""
Main setup function, should be called from both peer -changed and -joined
hooks with the same parameters.
"""
if ensure_local_user:
ensure_user(user, group)
priv_key, pub_key = get_keypair(user)
hook = hook_name()
if hook == '%s-relation-joined' % peer_interface:
relation_set(ssh_pub_key=pub_key)
elif hook == '%s-relation-changed' % peer_interface:
hosts = []
keys = []
for r_id in relation_ids(peer_interface):
for unit in related_units(r_id):
ssh_pub_key = relation_get('ssh_pub_key',
rid=r_id,
unit=unit)
priv_addr = relation_get('private-address',
rid=r_id,
unit=unit)
if ssh_pub_key:
keys.append(ssh_pub_key)
hosts.append(priv_addr)
else:
log('ssh_authorized_peers(): ssh_pub_key '
'missing for unit %s, skipping.' % unit)
write_authorized_keys(user, keys)
write_known_hosts(user, hosts)
authed_hosts = ':'.join(hosts)
relation_set(ssh_authorized_hosts=authed_hosts)
def _run_as_user(user):
try:
user = pwd.getpwnam(user)
except KeyError:
log('Invalid user: %s' % user)
raise Exception
uid, gid = user.pw_uid, user.pw_gid
os.environ['HOME'] = user.pw_dir
def _inner():
os.setgid(gid)
os.setuid(uid)
return _inner
def run_as_user(user, cmd):
return check_output(cmd, preexec_fn=_run_as_user(user), cwd='/')
def collect_authed_hosts(peer_interface):
'''Iterate through the units on peer interface to find all that
have the calling host in its authorized hosts list'''
hosts = []
for r_id in (relation_ids(peer_interface) or []):
for unit in related_units(r_id):
private_addr = relation_get('private-address',
rid=r_id, unit=unit)
authed_hosts = relation_get('ssh_authorized_hosts',
rid=r_id, unit=unit)
if not authed_hosts:
log('Peer %s has not authorized *any* hosts yet, skipping.')
continue
if unit_private_ip() in authed_hosts.split(':'):
hosts.append(private_addr)
else:
log('Peer %s has not authorized *this* host yet, skipping.')
return hosts
def sync_path_to_host(path, host, user, verbose=False):
cmd = copy(BASE_CMD)
if not verbose:
cmd.append('-silent')
# removing trailing slash from directory paths, unison
# doesn't like these.
if path.endswith('/'):
path = path[:(len(path) - 1)]
cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
try:
log('Syncing local path %s to %s@%s:%s' % (path, user, host, path))
run_as_user(user, cmd)
except:
log('Error syncing remote files')
def sync_to_peer(host, user, paths=[], verbose=False):
'''Sync paths to an specific host'''
[sync_path_to_host(p, host, user, verbose) for p in paths]
def sync_to_peers(peer_interface, user, paths=[], verbose=False):
'''Sync all hosts to an specific path'''
for host in collect_authed_hosts(peer_interface):
sync_to_peer(host, user, paths, verbose)

View File

@ -8,6 +8,7 @@ import os
import json
import yaml
import subprocess
import sys
import UserDict
from subprocess import CalledProcessError
@ -149,6 +150,11 @@ def service_name():
return local_unit().split('/')[0]
def hook_name():
"""The name of the currently executing hook"""
return os.path.basename(sys.argv[0])
@cached
def config(scope=None):
"""Juju charm configuration"""

View File

@ -194,7 +194,7 @@ def file_hash(path):
return None
def restart_on_change(restart_map):
def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing
This function is used a decorator, for example
@ -219,8 +219,14 @@ def restart_on_change(restart_map):
for path in restart_map:
if checksums[path] != file_hash(path):
restarts += restart_map[path]
for service_name in list(OrderedDict.fromkeys(restarts)):
service('restart', service_name)
services_list = list(OrderedDict.fromkeys(restarts))
if not stopstart:
for service_name in services_list:
service('restart', service_name)
else:
for action in ['stop', 'start']:
for service_name in services_list:
service(action, service_name)
return wrapped_f
return wrap
@ -279,3 +285,13 @@ def get_nic_mtu(nic):
if 'mtu' in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd)
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr

View File

@ -0,0 +1,308 @@
import importlib
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
)
from urlparse import (
urlparse,
urlunparse,
)
import subprocess
from charmhelpers.core.hookenv import (
config,
log,
)
import apt_pkg
import os
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
"""
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precise-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
# Icehouse
'icehouse': 'precise-updates/icehouse',
'precise-icehouse': 'precise-updates/icehouse',
'precise-icehouse/updates': 'precise-updates/icehouse',
'precise-updates/icehouse': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
}
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
apt_pkg.init()
cache = apt_pkg.Cache()
_pkgs = []
for package in packages:
try:
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
log('Package {} has no installation candidate.'.format(package),
level='WARNING')
_pkgs.append(package)
return _pkgs
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd, env=env)
def apt_upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
if dist:
cmd.append('dist-upgrade')
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd, env=env)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Purging {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None):
if source is None:
log('Source is not present. Skipping')
return
if (source.startswith('ppa:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
if key:
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'keyserver.ubuntu.com', '--recv',
key])
class SourceConfigError(Exception):
pass
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""
Configure multiple sources from charm configuration
Example config:
install_sources:
- "ppa:foo"
- "http://example.com/repo precise main"
install_keys:
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load(config(sources_var))
keys = config(keys_var)
if keys is not None:
keys = safe_load(keys)
if isinstance(sources, basestring) and (
keys is None or isinstance(keys, basestring)):
add_source(sources, keys)
else:
if not len(sources) == len(keys):
msg = 'Install sources and keys lists are different lengths'
raise SourceConfigError(msg)
for src_num in range(len(sources)):
add_source(sources[src_num], keys[src_num])
if update:
apt_update(fatal=True)
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
)
class UnhandledSource(Exception):
pass
def install_remote(source):
"""
Install a file tree from a remote source
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules
Options supported are submodule-specific"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source)
except UnhandledSource:
pass
if not installed_to:
raise UnhandledSource("No handler found for source {}".format(source))
return installed_to
def install_from_config(config_var_name):
charm_config = config()
source = charm_config[config_var_name]
return install_remote(source)
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
plugin_list = []
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list

View File

@ -0,0 +1,63 @@
import os
import urllib2
import urlparse
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.host import mkdir
class ArchiveUrlFetchHandler(BaseFetchHandler):
"""Handler for archives via generic URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
return "Wrong source type"
if get_archive_handler(self.base_url(source)):
return True
return False
def download(self, source, dest):
# propogate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
if proto in ('http', 'https'):
auth, barehost = urllib2.splituser(netloc)
if auth is not None:
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
username, password = urllib2.splitpasswd(auth)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Realm is set to None in add_password to force the username and password
# to be used whatever the realm
passman.add_password(None, source, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
response = urllib2.urlopen(source)
try:
with open(dest, 'w') as dest_file:
dest_file.write(response.read())
except Exception as e:
if os.path.isfile(dest):
os.unlink(dest)
raise e
def install(self, source):
url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
try:
self.download(source, dld_file)
except urllib2.URLError as e:
raise UnhandledSource(e.reason)
except OSError as e:
raise UnhandledSource(e.strerror)
return extract(dld_file)

View File

@ -0,0 +1,49 @@
import os
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.core.host import mkdir
try:
from bzrlib.branch import Branch
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-bzrlib")
from bzrlib.branch import Branch
class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('bzr+ssh', 'lp'):
return False
else:
return True
def branch(self, source, dest):
url_parts = self.parse_url(source)
# If we use lp:branchname scheme we need to load plugins
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if url_parts.scheme == "lp":
from bzrlib.plugin import load_plugins
load_plugins()
try:
remote_branch = Branch.open(source)
remote_branch.bzrdir.sprout(dest).open_branch()
except Exception as e:
raise e
def install(self, source):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try:
self.branch(source, dest_dir)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

106
hooks/keystone_context.py Normal file
View File

@ -0,0 +1,106 @@
from charmhelpers.core.hookenv import (
config, unit_private_ip)
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
is_clustered,
)
from subprocess import (
check_call
)
import os
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
class ApacheSSLContext(context.ApacheSSLContext):
interfaces = ['https']
external_ports = []
service_namespace = 'keystone'
def __call__(self):
# late import to work around circular dependency
from keystone_utils import determine_ports
self.external_ports = determine_ports()
return super(ApacheSSLContext, self).__call__()
def configure_cert(self):
#import keystone_ssl as ssl
from keystone_utils import SSH_USER, get_ca
if not os.path.isdir('/etc/apache2/ssl'):
os.mkdir('/etc/apache2/ssl')
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
if not os.path.isdir(ssl_dir):
os.mkdir(ssl_dir)
if is_clustered():
https_cn = config('vip')
else:
https_cn = unit_private_ip()
ca = get_ca(user=SSH_USER)
cert, key = ca.get_cert_and_key(common_name=https_cn)
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
cert_out.write(cert)
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
key_out.write(key)
if ca:
with open(CA_CERT_PATH, 'w') as ca_out:
ca_out.write(ca.get_ca_bundle())
check_call(['update-ca-certificates'])
class HAProxyContext(context.HAProxyContext):
interfaces = []
def __call__(self):
'''
Extends the main charmhelpers HAProxyContext with a port mapping
specific to this charm.
Also used to extend nova.conf context with correct api_listening_ports
'''
from keystone_utils import api_port
ctxt = super(HAProxyContext, self).__call__()
# determine which port api processes should bind to, depending
# on existence of haproxy + apache frontends
listen_ports = {}
listen_ports['admin_port'] = api_port('keystone-admin')
listen_ports['public_port'] = api_port('keystone-public')
# Apache ports
a_admin_port = determine_apache_port(api_port('keystone-admin'))
a_public_port = determine_apache_port(api_port('keystone-public'))
port_mapping = {
'admin-port': [
api_port('keystone-admin'), a_admin_port],
'public-port': [
api_port('keystone-public'), a_public_port],
}
# for haproxy.conf
ctxt['service_ports'] = port_mapping
# for keystone.conf
ctxt['listen_ports'] = listen_ports
return ctxt
class KeystoneContext(context.OSContextGenerator):
interfaces = []
def __call__(self):
from keystone_utils import api_port, set_admin_token
ctxt = {}
ctxt['token'] = set_admin_token(config('admin-token'))
ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'))
ctxt['public_port'] = determine_api_port(api_port('keystone-public'))
ctxt['debug'] = config('debug') in ['yes', 'true', 'True']
ctxt['verbose'] = config('verbose') in ['yes', 'true', 'True']
if config('enable-pki') not in ['false', 'False', 'no', 'No']:
ctxt['signing'] = True
return ctxt

View File

@ -1,570 +1,291 @@
#!/usr/bin/python
import os
import sys
import time
import urlparse
from base64 import b64encode
from subprocess import check_call
from charmhelpers.contrib import unison
from charmhelpers.core.hookenv import (
Hooks,
UnregisteredHookError,
config,
is_relation_made,
log,
ERROR,
relation_get,
relation_ids,
relation_set,
related_units,
unit_get,
)
from charmhelpers.core.host import (
mkdir,
restart_on_change,
)
from charmhelpers.fetch import (
apt_install, apt_update,
filter_installed_packages
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available,
)
from keystone_utils import (
config_dirty,
config_get,
execute,
update_config_block,
set_admin_token,
ensure_initial_admin,
create_service_entry,
create_endpoint_template,
create_role,
get_admin_token,
get_service_password,
create_user,
grant_role,
get_ca,
synchronize_service_credentials,
add_service_to_keystone,
determine_packages,
do_openstack_upgrade,
configure_pki_tokens,
SSH_USER,
SSL_DIR,
ensure_initial_admin,
migrate_database,
save_script_rc,
synchronize_ca,
register_configs,
relation_list,
restart_map,
CLUSTER_RES,
https
)
KEYSTONE_CONF,
SSH_USER,
)
from lib.openstack_common import (
get_os_codename_install_source,
get_os_codename_package,
get_os_version_codename,
get_os_version_package,
save_script_rc
)
import lib.unison as unison
import lib.utils as utils
import lib.cluster_utils as cluster
import lib.haproxy_utils as haproxy
from charmhelpers.contrib.hahelpers.cluster import (
eligible_leader,
get_hacluster_config,
is_leader,
)
from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.peerstorage import peer_echo
config = config_get()
packages = [
"keystone", "python-mysqldb", "pwgen",
"haproxy", "python-jinja2", "openssl", "unison",
"python-sqlalchemy"
]
service = "keystone"
# used to verify joined services are valid openstack components.
# this should reflect the current "core" components of openstack
# and be expanded as we add support for them as a distro
valid_services = {
"nova": {
"type": "compute",
"desc": "Nova Compute Service"
},
"nova-volume": {
"type": "volume",
"desc": "Nova Volume Service"
},
"cinder": {
"type": "volume",
"desc": "Cinder Volume Service"
},
"ec2": {
"type": "ec2",
"desc": "EC2 Compatibility Layer"
},
"glance": {
"type": "image",
"desc": "Glance Image Service"
},
"s3": {
"type": "s3",
"desc": "S3 Compatible object-store"
},
"swift": {
"type": "object-store",
"desc": "Swift Object Storage Service"
},
"quantum": {
"type": "network",
"desc": "Quantum Networking Service"
},
"oxygen": {
"type": "oxygen",
"desc": "Oxygen Cloud Image Service"
},
"ceilometer": {
"type": "metering",
"desc": "Ceilometer Metering Service"
},
"heat": {
"type": "orchestration",
"desc": "Heat Orchestration API"
},
"heat-cfn": {
"type": "cloudformation",
"desc": "Heat CloudFormation API"
}
}
hooks = Hooks()
CONFIGS = register_configs()
def install_hook():
@hooks.hook()
def install():
execd_preinstall()
utils.configure_source()
utils.install(*packages)
update_config_block('DEFAULT',
public_port=cluster.determine_api_port(config["service-port"]))
update_config_block('DEFAULT',
admin_port=cluster.determine_api_port(config["admin-port"]))
update_config_block('DEFAULT', use_syslog=config["use-syslog"])
set_admin_token(config['admin-token'])
configure_installation_source(config('openstack-origin'))
apt_update()
apt_install(determine_packages(), fatal=True)
# set all backends to use sql+sqlite, if they are not already by default
update_config_block('sql',
connection='sqlite:////var/lib/keystone/keystone.db')
update_config_block('identity',
driver='keystone.identity.backends.sql.Identity')
update_config_block('catalog',
driver='keystone.catalog.backends.sql.Catalog')
update_config_block('token',
driver='keystone.token.backends.sql.Token')
update_config_block('ec2',
driver='keystone.contrib.ec2.backends.sql.Ec2')
utils.stop('keystone')
execute("keystone-manage db_sync")
utils.start('keystone')
# ensure user + permissions for peer relations that
# may be syncing data there via SSH_USER.
@hooks.hook('config-changed')
@restart_on_change(restart_map())
def config_changed():
unison.ensure_user(user=SSH_USER, group='keystone')
execute("chmod -R g+wrx /var/lib/keystone/")
homedir = unison.get_homedir(SSH_USER)
if not os.path.isdir(homedir):
mkdir(homedir, SSH_USER, 'keystone', 0o775)
time.sleep(5)
ensure_initial_admin(config)
if openstack_upgrade_available('keystone'):
do_openstack_upgrade(configs=CONFIGS)
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
def db_joined():
relation_data = {
"database": config["database"],
"username": config["database-user"],
"hostname": config["hostname"]
}
utils.relation_set(**relation_data)
def db_changed():
relation_data = utils.relation_get_dict()
if ('password' not in relation_data or
'db_host' not in relation_data):
utils.juju_log('INFO',
"db_host or password not set. Peer not ready, exit 0")
return
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
(config["database-user"],
relation_data["password"],
relation_data["db_host"],
config["database"]))
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader, performing db-sync')
execute("keystone-manage db_sync", echo=True)
if config_dirty():
utils.restart('keystone')
time.sleep(5)
if cluster.eligible_leader(CLUSTER_RES):
save_script_rc()
configure_https()
CONFIGS.write_all()
if eligible_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
# If the backend database has been switched to something new and there
# are existing identity-service relations,, service entries need to be
# recreated in the new database. Re-executing identity-service-changed
# will do this.
for rid in utils.relation_ids('identity-service'):
for unit in utils.relation_list(rid=rid):
utils.juju_log('INFO',
"Re-exec'ing identity-service-changed"
" for: %s - %s" % (rid, unit))
identity_changed(relation_id=rid, remote_unit=unit)
log('Firing identity_changed hook for all related services.')
# HTTPS may have been set - so fire all identity relations
# again
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
def ensure_valid_service(service):
if service not in valid_services.keys():
utils.juju_log('WARNING',
"Invalid service requested: '%s'" % service)
utils.relation_set(admin_token=-1)
return
@hooks.hook('shared-db-relation-joined')
def db_joined():
if is_relation_made('pgsql-db'):
# error, postgresql is used
e = ('Attempting to associate a mysql database when there is already '
'associated a postgresql one')
log(e, level=ERROR)
raise Exception(e)
relation_set(database=config('database'),
username=config('database-user'),
hostname=unit_get('private-address'))
def add_endpoint(region, service, publicurl, adminurl, internalurl):
desc = valid_services[service]["desc"]
service_type = valid_services[service]["type"]
create_service_entry(service, service_type, desc)
create_endpoint_template(region=region, service=service,
publicurl=publicurl,
adminurl=adminurl,
internalurl=internalurl)
@hooks.hook('pgsql-db-relation-joined')
def pgsql_db_joined():
if is_relation_made('shared-db'):
# raise error
e = ('Attempting to associate a postgresql database when there'
' is already associated a mysql one')
log(e, level=ERROR)
raise Exception(e)
relation_set(database=config('database'))
@hooks.hook('shared-db-relation-changed')
@restart_on_change(restart_map())
def db_changed():
if 'shared-db' not in CONFIGS.complete_contexts():
log('shared-db relation incomplete. Peer not ready?')
else:
CONFIGS.write(KEYSTONE_CONF)
if eligible_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
# Ensure any existing service entries are updated in the
# new database backend
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('pgsql-db-relation-changed')
@restart_on_change(restart_map())
def pgsql_db_changed():
if 'pgsql-db' not in CONFIGS.complete_contexts():
log('pgsql-db relation incomplete. Peer not ready?')
else:
CONFIGS.write(KEYSTONE_CONF)
if eligible_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
# Ensure any existing service entries are updated in the
# new database backend
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
identity_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('identity-service-relation-joined')
def identity_joined():
""" Do nothing until we get information about requested service """
pass
def get_requested_roles(settings):
''' Retrieve any valid requested_roles from dict settings '''
if ('requested_roles' in settings and
settings['requested_roles'] not in ['None', None]):
return settings['requested_roles'].split(',')
else:
return []
@hooks.hook('identity-service-relation-changed')
def identity_changed(relation_id=None, remote_unit=None):
""" A service has advertised its API endpoints, create an entry in the
service catalog.
Optionally allow this hook to be re-fired for an existing
relation+unit, for context see see db_changed().
"""
if not cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Deferring identity_changed() to service leader.')
return
settings = utils.relation_get_dict(relation_id=relation_id,
remote_unit=remote_unit)
# the minimum settings needed per endpoint
single = set(['service', 'region', 'public_url', 'admin_url',
'internal_url'])
if single.issubset(settings):
# other end of relation advertised only one endpoint
if 'None' in [v for k, v in settings.iteritems()]:
# Some backend services advertise no endpoint but require a
# hook execution to update auth strategy.
relation_data = {}
# Check if clustered and use vip + haproxy ports if so
if cluster.is_clustered():
relation_data["auth_host"] = config['vip']
relation_data["service_host"] = config['vip']
else:
relation_data["auth_host"] = config['hostname']
relation_data["service_host"] = config['hostname']
relation_data["auth_port"] = config['admin-port']
relation_data["service_port"] = config['service-port']
if config['https-service-endpoints'] in ['True', 'true']:
# Pass CA cert as client will need it to
# verify https connections
ca = get_ca(user=SSH_USER)
ca_bundle = ca.get_ca_bundle()
relation_data['https_keystone'] = 'True'
relation_data['ca_cert'] = b64encode(ca_bundle)
if relation_id:
relation_data['rid'] = relation_id
# Allow the remote service to request creation of any additional
# roles. Currently used by Horizon
for role in get_requested_roles(settings):
utils.juju_log('INFO',
"Creating requested role: %s" % role)
create_role(role)
utils.relation_set(**relation_data)
return
else:
ensure_valid_service(settings['service'])
add_endpoint(region=settings['region'],
service=settings['service'],
publicurl=settings['public_url'],
adminurl=settings['admin_url'],
internalurl=settings['internal_url'])
service_username = settings['service']
https_cn = urlparse.urlparse(settings['internal_url'])
https_cn = https_cn.hostname
if eligible_leader(CLUSTER_RES):
add_service_to_keystone(relation_id, remote_unit)
synchronize_ca()
else:
# assemble multiple endpoints from relation data. service name
# should be prepended to setting name, ie:
# realtion-set ec2_service=$foo ec2_region=$foo ec2_public_url=$foo
# relation-set nova_service=$foo nova_region=$foo nova_public_url=$foo
# Results in a dict that looks like:
# { 'ec2': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# 'nova': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# }
endpoints = {}
for k, v in settings.iteritems():
ep = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if ep not in endpoints:
endpoints[ep] = {}
endpoints[ep][x] = v
services = []
https_cn = None
for ep in endpoints:
# weed out any unrelated relation stuff Juju might have added
# by ensuring each possible endpiont has appropriate fields
# ['service', 'region', 'public_url', 'admin_url', 'internal_url']
if single.issubset(endpoints[ep]):
ep = endpoints[ep]
ensure_valid_service(ep['service'])
add_endpoint(region=ep['region'], service=ep['service'],
publicurl=ep['public_url'],
adminurl=ep['admin_url'],
internalurl=ep['internal_url'])
services.append(ep['service'])
if not https_cn:
https_cn = urlparse.urlparse(ep['internal_url'])
https_cn = https_cn.hostname
service_username = '_'.join(services)
if 'None' in [v for k, v in settings.iteritems()]:
return
if not service_username:
return
token = get_admin_token()
utils.juju_log('INFO',
"Creating service credentials for '%s'" % service_username)
service_password = get_service_password(service_username)
create_user(service_username, service_password, config['service-tenant'])
grant_role(service_username, config['admin-role'],
config['service-tenant'])
# Allow the remote service to request creation of any additional roles.
# Currently used by Swift and Ceilometer.
for role in get_requested_roles(settings):
utils.juju_log('INFO',
"Creating requested role: %s" % role)
create_role(role, service_username,
config['service-tenant'])
# As of https://review.openstack.org/#change,4675, all nodes hosting
# an endpoint(s) needs a service username and password assigned to
# the service tenant and granted admin role.
# note: config['service-tenant'] is created in utils.ensure_initial_admin()
# we return a token, information about our API endpoints, and the generated
# service credentials
relation_data = {
"admin_token": token,
"service_host": config["hostname"],
"service_port": config["service-port"],
"auth_host": config["hostname"],
"auth_port": config["admin-port"],
"service_username": service_username,
"service_password": service_password,
"service_tenant": config['service-tenant'],
"https_keystone": "False",
"ssl_cert": "",
"ssl_key": "",
"ca_cert": ""
}
if relation_id:
relation_data['rid'] = relation_id
# Check if clustered and use vip + haproxy ports if so
if cluster.is_clustered():
relation_data["auth_host"] = config['vip']
relation_data["service_host"] = config['vip']
# generate or get a new cert/key for service if set to manage certs.
if config['https-service-endpoints'] in ['True', 'true']:
ca = get_ca(user=SSH_USER)
cert, key = ca.get_cert_and_key(common_name=https_cn)
ca_bundle = ca.get_ca_bundle()
relation_data['ssl_cert'] = b64encode(cert)
relation_data['ssl_key'] = b64encode(key)
relation_data['ca_cert'] = b64encode(ca_bundle)
relation_data['https_keystone'] = 'True'
unison.sync_to_peers(peer_interface='cluster',
paths=[SSL_DIR], user=SSH_USER, verbose=True)
utils.relation_set(**relation_data)
synchronize_service_credentials()
def config_changed():
unison.ensure_user(user=SSH_USER, group='keystone')
execute("chmod -R g+wrx /var/lib/keystone/")
# Determine whether or not we should do an upgrade, based on the
# the version offered in keyston-release.
available = get_os_codename_install_source(config['openstack-origin'])
installed = get_os_codename_package('keystone')
if (available and
get_os_version_codename(available) > \
get_os_version_codename(installed)):
# TODO: fixup this call to work like utils.install()
do_openstack_upgrade(config['openstack-origin'], ' '.join(packages))
# Ensure keystone group permissions
execute("chmod -R g+wrx /var/lib/keystone/")
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
'OPENSTACK_PORT_ADMIN': cluster.determine_api_port(
config['admin-port']),
'OPENSTACK_PORT_PUBLIC': cluster.determine_api_port(
config['service-port'])}
save_script_rc(**env_vars)
set_admin_token(config['admin-token'])
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader - ensuring endpoint configuration'
' is up to date')
ensure_initial_admin(config)
update_config_block('logger_root', level=config['log-level'],
file='/etc/keystone/logging.conf')
update_config_block('DEFAULT', use_syslog=config["use-syslog"])
if get_os_version_package('keystone') >= '2013.1':
# PKI introduced in Grizzly
configure_pki_tokens(config)
if config_dirty():
utils.restart('keystone')
time.sleep(5);
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Firing identity_changed hook'
' for all related services.')
# HTTPS may have been set - so fire all identity relations
# again
for r_id in utils.relation_ids('identity-service'):
for unit in utils.relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
def upgrade_charm():
# Ensure all required packages are installed
utils.install(*packages)
cluster_changed()
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader - ensuring endpoint configuration'
' is up to date')
ensure_initial_admin(config)
log('Deferring identity_changed() to service leader.')
@hooks.hook('cluster-relation-joined')
def cluster_joined():
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
group='juju_keystone',
peer_interface='cluster',
ensure_local_user=True)
update_config_block('DEFAULT',
public_port=cluster.determine_api_port(config["service-port"]))
update_config_block('DEFAULT',
admin_port=cluster.determine_api_port(config["admin-port"]))
if config_dirty():
utils.restart('keystone')
service_ports = {
"keystone_admin": [
cluster.determine_haproxy_port(config['admin-port']),
cluster.determine_api_port(config["admin-port"])
],
"keystone_service": [
cluster.determine_haproxy_port(config['service-port']),
cluster.determine_api_port(config["service-port"])
]
}
haproxy.configure_haproxy(service_ports)
@hooks.hook('cluster-relation-changed',
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
# NOTE(jamespage) re-echo passwords for peer storage
peer_echo(includes=['_passwd'])
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
peer_interface='cluster',
ensure_local_user=True)
synchronize_service_credentials()
service_ports = {
"keystone_admin": [
cluster.determine_haproxy_port(config['admin-port']),
cluster.determine_api_port(config["admin-port"])
],
"keystone_service": [
cluster.determine_haproxy_port(config['service-port']),
cluster.determine_api_port(config["service-port"])
]
}
haproxy.configure_haproxy(service_ports)
synchronize_ca()
CONFIGS.write_all()
def ha_relation_changed():
relation_data = utils.relation_get_dict()
if ('clustered' in relation_data and
cluster.is_leader(CLUSTER_RES)):
utils.juju_log('INFO',
'Cluster configured, notifying other services'
' and updating keystone endpoint configuration')
# Update keystone endpoint to point at VIP
ensure_initial_admin(config)
# Tell all related services to start using
# the VIP and haproxy ports instead
for r_id in utils.relation_ids('identity-service'):
utils.relation_set(rid=r_id,
auth_host=config['vip'],
service_host=config['vip'])
def ha_relation_joined():
# Obtain the config values necessary for the cluster config. These
# include multicast port and interface to bind to.
corosync_bindiface = config['ha-bindiface']
corosync_mcastport = config['ha-mcastport']
vip = config['vip']
vip_cidr = config['vip_cidr']
vip_iface = config['vip_iface']
# Obtain resources
@hooks.hook('ha-relation-joined')
def ha_joined():
config = get_hacluster_config()
resources = {
'res_ks_vip': 'ocf:heartbeat:IPaddr2',
'res_ks_haproxy': 'lsb:haproxy'
}
'res_ks_haproxy': 'lsb:haproxy',
}
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
(config['vip'], config['vip_cidr'], config['vip_iface'])
resource_params = {
'res_ks_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
(vip, vip_cidr, vip_iface),
'res_ks_vip': vip_params,
'res_ks_haproxy': 'op monitor interval="5s"'
}
}
init_services = {
'res_ks_haproxy': 'haproxy'
}
}
clones = {
'cl_ks_haproxy': 'res_ks_haproxy'
}
utils.relation_set(init_services=init_services,
corosync_bindiface=corosync_bindiface,
corosync_mcastport=corosync_mcastport,
resources=resources,
resource_params=resource_params,
clones=clones)
}
relation_set(init_services=init_services,
corosync_bindiface=config['ha-bindiface'],
corosync_mcastport=config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
clones=clones)
hooks = {
"install": install_hook,
"shared-db-relation-joined": db_joined,
"shared-db-relation-changed": db_changed,
"identity-service-relation-joined": identity_joined,
"identity-service-relation-changed": identity_changed,
"config-changed": config_changed,
"cluster-relation-joined": cluster_joined,
"cluster-relation-changed": cluster_changed,
"cluster-relation-departed": cluster_changed,
"ha-relation-joined": ha_relation_joined,
"ha-relation-changed": ha_relation_changed,
"upgrade-charm": upgrade_charm
}
@hooks.hook('ha-relation-changed')
@restart_on_change(restart_map())
def ha_changed():
clustered = relation_get('clustered')
CONFIGS.write_all()
if (clustered is not None and
is_leader(CLUSTER_RES)):
ensure_initial_admin(config)
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
relation_set(relation_id=rid,
auth_host=config('vip'),
service_host=config('vip'))
utils.do_hooks(hooks)
def configure_https():
'''
Enables SSL API Apache config if appropriate and kicks identity-service
with any required api updates.
'''
# need to write all to ensure changes to the entire request pipeline
# propagate (c-api, haprxy, apache)
CONFIGS.write_all()
if 'https' in CONFIGS.complete_contexts():
cmd = ['a2ensite', 'openstack_https_frontend']
check_call(cmd)
else:
cmd = ['a2dissite', 'openstack_https_frontend']
check_call(cmd)
@hooks.hook('upgrade-charm')
@restart_on_change(restart_map(), stopstart=True)
def upgrade_charm():
apt_install(filter_installed_packages(determine_packages()))
unison.ssh_authorized_peers(user=SSH_USER,
group='keystone',
peer_interface='cluster',
ensure_local_user=True)
synchronize_ca()
if eligible_leader(CLUSTER_RES):
log('Cluster leader - ensuring endpoint configuration'
' is up to date')
time.sleep(10)
ensure_initial_admin(config)
# Deal with interface changes for icehouse
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
CONFIGS.write_all()
def main():
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
if __name__ == '__main__':
main()

7
hooks/keystone_ssl.py Executable file → Normal file
View File

@ -113,7 +113,7 @@ def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
if not os.path.exists(d):
print 'Creating %s.' % d
os.mkdir(d)
os.chmod(os.path.join(ca_dir, 'private'), 0710)
os.chmod(os.path.join(ca_dir, 'private'), 0o710)
if not os.path.isfile(os.path.join(ca_dir, 'serial')):
with open(os.path.join(ca_dir, 'serial'), 'wb') as out:
@ -161,7 +161,7 @@ def intermediate_ca_csr_key(ca_dir):
def sign_int_csr(ca_dir, csr, common_name):
print 'Signing certificate request %s.' % csr
crt = os.path.join(ca_dir, 'certs',
'%s.crt' % os.path.basename(csr).split('.')[0])
'%s.crt' % os.path.basename(csr).split('.')[0])
subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
cmd = ['openssl', 'ca', '-batch', '-config',
os.path.join(ca_dir, 'ca.cnf'),
@ -238,6 +238,7 @@ def tar_directory(path):
class JujuCA(object):
def __init__(self, name, ca_dir, root_ca_dir, user, group):
root_crt, root_key = init_root_ca(root_ca_dir,
'%s Certificate Authority' % name)
@ -288,7 +289,7 @@ class JujuCA(object):
key = open(key, 'r').read()
except:
print 'Could not load ssl private key for %s from %s' %\
(common_name, key)
(common_name, key)
exit(1)
return crt, key
crt, key = self._create_certificate(common_name, common_name)

835
hooks/keystone_utils.py Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,196 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from lib.utils import (
relation_ids,
relation_list,
relation_get,
render_template,
juju_log,
config_get,
install,
get_host_ip,
restart
)
from lib.cluster_utils import https
import os
import subprocess
from base64 import b64decode
APACHE_SITE_DIR = "/etc/apache2/sites-available"
SITE_TEMPLATE = "apache2_site.tmpl"
RELOAD_CHECK = "To activate the new configuration"
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
juju_log('INFO',
"Inspecting identity-service relations for SSL certificate.")
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
juju_log('INFO',
"Inspecting identity-service relations for CA SSL certificate.")
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])
def enable_https(port_maps, namespace, cert, key, ca_cert=None):
'''
For a given number of port mappings, configures apache2
HTTPs local reverse proxying using certficates and keys provided in
either configuration data (preferred) or relation data. Assumes ports
are not in use (calling charm should ensure that).
port_maps: dict: external to internal port mappings
namespace: str: name of charm
'''
def _write_if_changed(path, new_content):
content = None
if os.path.exists(path):
with open(path, 'r') as f:
content = f.read().strip()
if content != new_content:
with open(path, 'w') as f:
f.write(new_content)
return True
else:
return False
juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps))
http_restart = False
if cert:
cert = b64decode(cert)
if key:
key = b64decode(key)
if ca_cert:
ca_cert = b64decode(ca_cert)
if not cert and not key:
juju_log('ERROR',
"Expected but could not find SSL certificate data, not "
"configuring HTTPS!")
return False
install('apache2')
if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl',
'proxy', 'proxy_http']):
http_restart = True
ssl_dir = os.path.join('/etc/apache2/ssl', namespace)
if not os.path.exists(ssl_dir):
os.makedirs(ssl_dir)
if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)):
http_restart = True
if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)):
http_restart = True
os.chmod(os.path.join(ssl_dir, 'key'), 0600)
install_ca_cert(ca_cert)
sites_dir = '/etc/apache2/sites-available'
for ext_port, int_port in port_maps.items():
juju_log('INFO',
'Creating apache2 reverse proxy vhost'
' for {}:{}'.format(ext_port,
int_port))
site = "{}_{}".format(namespace, ext_port)
site_path = os.path.join(sites_dir, site)
with open(site_path, 'w') as fsite:
context = {
"ext": ext_port,
"int": int_port,
"namespace": namespace,
"private_address": get_host_ip()
}
fsite.write(render_template(SITE_TEMPLATE,
context))
if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]):
http_restart = True
if http_restart:
restart('apache2')
return True
def disable_https(port_maps, namespace):
'''
Ensure HTTPS reverse proxying is disables for given port mappings
port_maps: dict: of ext -> int port mappings
namespace: str: name of chamr
'''
juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))
if (not os.path.exists('/etc/apache2') or
not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
return
http_restart = False
for ext_port in port_maps.keys():
if os.path.exists(os.path.join(APACHE_SITE_DIR,
"{}_{}".format(namespace,
ext_port))):
juju_log('INFO',
"Disabling HTTPS reverse proxy"
" for {} {}.".format(namespace,
ext_port))
if (RELOAD_CHECK in
subprocess.check_output(['a2dissite',
'{}_{}'.format(namespace,
ext_port)])):
http_restart = True
if http_restart:
restart(['apache2'])
def setup_https(port_maps, namespace, cert, key, ca_cert=None):
'''
Ensures HTTPS is either enabled or disabled for given port
mapping.
port_maps: dict: of ext -> int port mappings
namespace: str: name of charm
'''
if not https:
disable_https(port_maps, namespace)
else:
enable_https(port_maps, namespace, cert, key, ca_cert)

View File

@ -1,55 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from lib.utils import (
relation_ids,
relation_list,
relation_get,
unit_get,
reload,
render_template
)
import os
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
HAPROXY_DEFAULT = '/etc/default/haproxy'
def configure_haproxy(service_ports):
'''
Configure HAProxy based on the current peers in the service
cluster using the provided port map:
"swift": [ 8080, 8070 ]
HAproxy will also be reloaded/started if required
service_ports: dict: dict of lists of [ frontend, backend ]
'''
cluster_hosts = {}
cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
unit_get('private-address')
for r_id in relation_ids('cluster'):
for unit in relation_list(r_id):
cluster_hosts[unit.replace('/', '-')] = \
relation_get(attribute='private-address',
rid=r_id,
unit=unit)
context = {
'units': cluster_hosts,
'service_ports': service_ports
}
with open(HAPROXY_CONF, 'w') as f:
f.write(render_template(os.path.basename(HAPROXY_CONF),
context))
with open(HAPROXY_DEFAULT, 'w') as f:
f.write('ENABLED=1')
reload('haproxy')

View File

@ -1,234 +0,0 @@
#!/usr/bin/python
# Common python helper functions used for OpenStack charms.
import apt_pkg as apt
import subprocess
import os
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
ubuntu_openstack_release = {
'oneiric': 'diablo',
'precise': 'essex',
'quantal': 'folsom',
'raring': 'grizzly',
'saucy': 'havana',
}
openstack_codenames = {
'2011.2': 'diablo',
'2012.1': 'essex',
'2012.2': 'folsom',
'2013.1': 'grizzly',
'2013.2': 'havana',
}
# The ugly duckling
swift_codenames = {
'1.4.3': 'diablo',
'1.4.8': 'essex',
'1.7.4': 'folsom',
'1.7.6': 'grizzly',
'1.7.7': 'grizzly',
'1.8.0': 'grizzly',
}
def juju_log(msg):
subprocess.check_call(['juju-log', msg])
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg)
exit(1)
def lsb_release():
'''Return /etc/lsb-release in a dict'''
lsb = open('/etc/lsb-release', 'r')
d = {}
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
return d
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src == 'distro':
try:
rel = ubuntu_openstack_release[ubuntu_rel]
except KeyError:
e = 'Code not derive openstack release for '\
'this Ubuntu release: %s' % rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in openstack_codenames.iteritems():
if v in src:
return v
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return openstack_codenames[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in openstack_codenames.iteritems():
if v == codename:
return k
e = 'Code not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(pkg):
'''Derive OpenStack release codename from an installed package.'''
apt.init()
cache = apt.Cache()
try:
pkg = cache[pkg]
except:
e = 'Could not determine version of installed package: %s' % pkg
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
vers = vers[:5]
return swift_codenames[vers]
else:
vers = vers[:6]
return openstack_codenames[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg)
if 'swift' in pkg:
vers_map = swift_codenames
else:
vers_map = openstack_codenames
for version, cname in vers_map.iteritems():
if cname == codename:
return version
e = "Could not determine OpenStack version for package: %s" % pkg
error_out(e)
def configure_installation_source(rel):
'''Configure apt installation source.'''
def _import_key(keyid):
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
if rel == 'distro':
return
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
_import_key(key)
elif l == 1:
src = rel
else:
error_out("Invalid openstack-release: %s" % rel)
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana'
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
_import_key(CLOUD_ARCHIVE_KEY_ID)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
charm_dir = os.getenv('CHARM_DIR')
juju_rc_path = "%s/%s" % (charm_dir, script_path)
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]

View File

@ -1,220 +0,0 @@
#!/usr/bin/python
#
# Easy file synchronization among peer units using ssh + unison.
#
# From *both* peer relation -joined and -changed, add a call to
# ssh_authorized_peers() describing the peer relation and the desired
# user + group. After all peer relations have settled, all hosts should
# be able to connect to on another via key auth'd ssh as the specified user.
#
# Other hooks are then free to synchronize files and directories using
# sync_to_peers().
#
# For a peer relation named 'cluster', for example:
#
# cluster-relation-joined:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# cluster-relation-changed:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# Hooks are now free to sync files as easily as:
#
# files = ['/etc/fstab', '/etc/apt.conf.d/']
# sync_to_peers(peer_interface='cluster',
# user='juju_ssh, paths=[files])
#
# It is assumed the charm itself has setup permissions on each unit
# such that 'juju_ssh' has read + write permissions. Also assumed
# that the calling charm takes care of leader delegation.
#
# TODO: Currently depends on the utils.py shipped with the keystone charm.
# Either copy required functionality to this library or depend on
# something more generic.
import os
import sys
import lib.utils as utils
import subprocess
import grp
import pwd
def get_homedir(user):
try:
user = pwd.getpwnam(user)
return user.pw_dir
except KeyError:
utils.juju_log('INFO',
'Could not get homedir for user %s: user exists?')
sys.exit(1)
def get_keypair(user):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir)
priv_key = os.path.join(ssh_dir, 'id_rsa')
if not os.path.isfile(priv_key):
utils.juju_log('INFO', 'Generating new ssh key for user %s.' % user)
cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
'-f', priv_key]
subprocess.check_call(cmd)
pub_key = '%s.pub' % priv_key
if not os.path.isfile(pub_key):
utils.juju_log('INFO', 'Generatring missing ssh public key @ %s.' % \
pub_key)
cmd = ['ssh-keygen', '-y', '-f', priv_key]
p = subprocess.check_output(cmd).strip()
with open(pub_key, 'wb') as out:
out.write(p)
subprocess.check_call(['chown', '-R', user, ssh_dir])
return open(priv_key, 'r').read().strip(), \
open(pub_key, 'r').read().strip()
def write_authorized_keys(user, keys):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
utils.juju_log('INFO', 'Syncing authorized_keys @ %s.' % auth_keys)
with open(auth_keys, 'wb') as out:
for k in keys:
out.write('%s\n' % k)
def write_known_hosts(user, hosts):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
known_hosts = os.path.join(ssh_dir, 'known_hosts')
khosts = []
for host in hosts:
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
remote_key = subprocess.check_output(cmd).strip()
khosts.append(remote_key)
utils.juju_log('INFO', 'Syncing known_hosts @ %s.' % known_hosts)
with open(known_hosts, 'wb') as out:
for host in khosts:
out.write('%s\n' % host)
def ensure_user(user, group=None):
# need to ensure a bash shell'd user exists.
try:
pwd.getpwnam(user)
except KeyError:
utils.juju_log('INFO', 'Creating new user %s.%s.' % (user, group))
cmd = ['adduser', '--system', '--shell', '/bin/bash', user]
if group:
try:
grp.getgrnam(group)
except KeyError:
subprocess.check_call(['addgroup', group])
cmd += ['--ingroup', group]
subprocess.check_call(cmd)
def ssh_authorized_peers(peer_interface, user, group=None, ensure_local_user=False):
"""
Main setup function, should be called from both peer -changed and -joined
hooks with the same parameters.
"""
if ensure_local_user:
ensure_user(user, group)
priv_key, pub_key = get_keypair(user)
hook = os.path.basename(sys.argv[0])
if hook == '%s-relation-joined' % peer_interface:
utils.relation_set(ssh_pub_key=pub_key)
print 'joined'
elif hook == '%s-relation-changed' % peer_interface:
hosts = []
keys = []
for r_id in utils.relation_ids(peer_interface):
for unit in utils.relation_list(r_id):
settings = utils.relation_get_dict(relation_id=r_id,
remote_unit=unit)
if 'ssh_pub_key' in settings:
keys.append(settings['ssh_pub_key'])
hosts.append(settings['private-address'])
else:
utils.juju_log('INFO',
'ssh_authorized_peers(): ssh_pub_key '\
'missing for unit %s, skipping.' % unit)
write_authorized_keys(user, keys)
write_known_hosts(user, hosts)
authed_hosts = ':'.join(hosts)
utils.relation_set(ssh_authorized_hosts=authed_hosts)
def _run_as_user(user):
try:
user = pwd.getpwnam(user)
except KeyError:
utils.juju_log('INFO', 'Invalid user: %s' % user)
sys.exit(1)
uid, gid = user.pw_uid, user.pw_gid
os.environ['HOME'] = user.pw_dir
def _inner():
os.setgid(gid)
os.setuid(uid)
return _inner
def run_as_user(user, cmd):
return subprocess.check_output(cmd, preexec_fn=_run_as_user(user), cwd='/')
def sync_to_peers(peer_interface, user, paths=[], verbose=False):
base_cmd = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
'-fastcheck=true', '-group=false', '-owner=false',
'-prefer=newer', '-times=true']
if not verbose:
base_cmd.append('-silent')
hosts = []
for r_id in (utils.relation_ids(peer_interface) or []):
for unit in utils.relation_list(r_id):
settings = utils.relation_get_dict(relation_id=r_id,
remote_unit=unit)
try:
authed_hosts = settings['ssh_authorized_hosts'].split(':')
except KeyError:
print 'unison sync_to_peers: peer has not authorized *any* '\
'hosts yet.'
return
unit_hostname = utils.unit_get('private-address')
add_host = None
for authed_host in authed_hosts:
if unit_hostname == authed_host:
add_host = settings['private-address']
if add_host:
hosts.append(settings['private-address'])
else:
print 'unison sync_to_peers: peer (%s) has not authorized '\
'*this* host yet, skipping.' %\
settings['private-address']
for path in paths:
# removing trailing slash from directory paths, unison
# doesn't like these.
if path.endswith('/'):
path = path[:(len(path) - 1)]
for host in hosts:
cmd = base_cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
utils.juju_log('INFO', 'Syncing local path %s to %s@%s:%s' %\
(path, user, host, path))
print ' '.join(cmd)
run_as_user(user, cmd)

View File

@ -1,333 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Paul Collins <paul.collins@canonical.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import json
import os
import subprocess
import socket
import sys
def do_hooks(hooks):
hook = os.path.basename(sys.argv[0])
try:
hook_func = hooks[hook]
except KeyError:
juju_log('INFO',
"This charm doesn't know how to handle '{}'.".format(hook))
else:
hook_func()
def install(*pkgs):
cmd = [
'apt-get',
'-y',
'install'
]
for pkg in pkgs:
cmd.append(pkg)
subprocess.check_call(cmd)
TEMPLATES_DIR = 'templates'
try:
import jinja2
except ImportError:
install('python-jinja2')
import jinja2
try:
import dns.resolver
except ImportError:
install('python-dnspython')
import dns.resolver
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
CLOUD_ARCHIVE = \
""" # Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
CLOUD_ARCHIVE_POCKETS = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
}
def configure_source():
source = str(config_get('openstack-origin'))
if not source:
return
if source.startswith('ppa:'):
cmd = [
'add-apt-repository',
source
]
subprocess.check_call(cmd)
if source.startswith('cloud:'):
# CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
# cloud:precise-folsom/updates or cloud:precise-folsom/proposed
install('ubuntu-cloud-keyring')
pocket = source.split(':')[1]
pocket = pocket.split('-')[1]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
if source.startswith('deb'):
l = len(source.split('|'))
if l == 2:
(apt_line, key) = source.split('|')
cmd = [
'apt-key',
'adv', '--keyserver keyserver.ubuntu.com',
'--recv-keys', key
]
subprocess.check_call(cmd)
elif l == 1:
apt_line = source
with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
apt.write(apt_line + "\n")
cmd = [
'apt-get',
'update'
]
subprocess.check_call(cmd)
# Protocols
TCP = 'TCP'
UDP = 'UDP'
def expose(port, protocol='TCP'):
cmd = [
'open-port',
'{}/{}'.format(port, protocol)
]
subprocess.check_call(cmd)
def juju_log(severity, message):
cmd = [
'juju-log',
'--log-level', severity,
message
]
subprocess.check_call(cmd)
cache = {}
def cached(func):
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
@cached
def relation_ids(relation):
cmd = [
'relation-ids',
relation
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_list(rid):
cmd = [
'relation-list',
'-r', rid,
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_get(attribute, unit=None, rid=None):
cmd = [
'relation-get',
]
if rid:
cmd.append('-r')
cmd.append(rid)
cmd.append(attribute)
if unit:
cmd.append(unit)
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def relation_get_dict(relation_id=None, remote_unit=None):
"""Obtain all relation data as dict by way of JSON"""
cmd = [
'relation-get', '--format=json'
]
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
if remote_unit:
cmd.append('-')
cmd.append(remote_unit)
j = subprocess.check_output(cmd)
d = json.loads(j)
settings = {}
# convert unicode to strings
for k, v in d.iteritems():
settings[str(k)] = str(v)
return settings
def relation_set(**kwargs):
cmd = [
'relation-set'
]
args = []
for k, v in kwargs.items():
if k == 'rid':
if v:
cmd.append('-r')
cmd.append(v)
else:
args.append('{}={}'.format(k, v))
cmd += args
subprocess.check_call(cmd)
@cached
def unit_get(attribute):
cmd = [
'unit-get',
attribute
]
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def config_get(attribute):
cmd = [
'config-get',
'--format',
'json',
]
out = subprocess.check_output(cmd).strip() # IGNORE:E1103
cfg = json.loads(out)
try:
return cfg[attribute]
except KeyError:
return None
@cached
def get_unit_hostname():
return socket.gethostname()
@cached
def get_host_ip(hostname=unit_get('private-address')):
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
return None
def _svc_control(service, action):
subprocess.check_call(['service', service, action])
def restart(*services):
for service in services:
_svc_control(service, 'restart')
def stop(*services):
for service in services:
_svc_control(service, 'stop')
def start(*services):
for service in services:
_svc_control(service, 'start')
def reload(*services):
for service in services:
try:
_svc_control(service, 'reload')
except subprocess.CalledProcessError:
# Reload failed - either service does not support reload
# or it was not running - restart will fixup most things
_svc_control(service, 'restart')
def running(service):
try:
output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or
"is running" in output):
return True
else:
return False
def is_relation_made(relation, key='private-address'):
for r_id in (relation_ids(relation) or []):
for unit in (relation_list(r_id) or []):
if relation_get(key, rid=r_id, unit=unit):
return True
return False

View File

@ -3,6 +3,7 @@ from keystoneclient.v2_0 import client
class KeystoneManager(object):
def __init__(self, endpoint, token):
self.api = client.Client(endpoint=endpoint, token=token)

View File

@ -0,0 +1 @@
keystone_hooks.py

View File

@ -0,0 +1 @@
keystone_hooks.py

1
hooks/start Symbolic link
View File

@ -0,0 +1 @@
keystone_hooks.py

1
hooks/stop Symbolic link
View File

@ -0,0 +1 @@
keystone_hooks.py

View File

@ -12,6 +12,8 @@ provides:
requires:
shared-db:
interface: mysql-shared
pgsql-db:
interface: pgsql
ha:
interface: hacluster
scope: container

View File

@ -1 +1 @@
229
230

5
setup.cfg Normal file
View File

@ -0,0 +1,5 @@
[nosetests]
verbosity=2
with-coverage=1
cover-erase=1
cover-package=hooks

View File

@ -0,0 +1,93 @@
# essex
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[sql]
{% if database_host -%}
connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[token]
driver = keystone.token.backends.sql.Token
expiration = 86400
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service
[pipeline:admin_api]
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension crud_extension admin_service
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = xml_body public_version_service
[pipeline:admin_version_api]
pipeline = xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/ = admin_version_api

View File

@ -0,0 +1,112 @@
# folsom
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[sql]
{% if database_host -%}
connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[token]
driver = keystone.token.backends.sql.Token
expiration = 86400
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[signing]
token_format = UUID
key_size = 2048
valid_days = 3650
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
[pipeline:admin_api]
pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = stats_monitoring url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = stats_monitoring url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/ = admin_version_api

View File

@ -0,0 +1,130 @@
# grizzly
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[sql]
{% if database_host -%}
connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[trust]
driver = keystone.trust.backends.sql.Trust
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[token]
driver = keystone.token.backends.sql.Token
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[signing]
[auth]
methods = password,token
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:sizelimit]
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[filter:access_log]
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:service_v3]
paste.app_factory = keystone.service:v3_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
[pipeline:admin_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
[pipeline:api_v3]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api

View File

@ -8,8 +8,8 @@ global
defaults
log global
mode http
option httplog
mode tcp
option tcplog
option dontlognull
retries 3
timeout queue 1000
@ -19,17 +19,20 @@ defaults
listen stats :8888
mode http
option httplog
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /
stats auth admin:password
{% if units %}
{% for service, ports in service_ports.iteritems() -%}
listen {{ service }} 0.0.0.0:{{ ports[0] }}
balance roundrobin
option tcplog
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
{% endfor %}
{% endif %}

View File

@ -0,0 +1,64 @@
# havana
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[sql]
{% if database_host -%}
connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[credential]
driver = keystone.credential.backends.sql.Credential
[trust]
driver = keystone.trust.backends.sql.Trust
[os_inherit]
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[endpoint_filter]
[token]
driver = keystone.token.backends.sql.Token
expiration = 86400
[cache]
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[assignment]
[oauth1]
[signing]
[auth]
methods = external,password,token,oauth1
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
oauth1 = keystone.auth.plugins.oauth1.OAuth
[paste_deploy]
config_file = keystone-paste.ini

View File

@ -0,0 +1,66 @@
# icehouse
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[database]
{% if database_host -%}
connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[credential]
driver = keystone.credential.backends.sql.Credential
[trust]
driver = keystone.trust.backends.sql.Trust
[os_inherit]
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[endpoint_filter]
[token]
driver = keystone.token.backends.sql.Token
[cache]
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[assignment]
[oauth1]
[signing]
[auth]
methods = external,password,token,oauth1
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
oauth1 = keystone.auth.plugins.oauth1.OAuth
[paste_deploy]
config_file = keystone-paste.ini
[extra_headers]
Distribution = Ubuntu

3
unit_tests/__init__.py Normal file
View File

@ -0,0 +1,3 @@
import sys
sys.path.append('hooks/')

View File

@ -0,0 +1,73 @@
import keystone_context as context
from mock import patch
from test_utils import (
CharmTestCase
)
TO_PATCH = [
'determine_apache_port',
'determine_api_port',
]
class TestKeystoneContexts(CharmTestCase):
def setUp(self):
super(TestKeystoneContexts, self).setUp(context, TO_PATCH)
@patch('charmhelpers.contrib.openstack.context.is_clustered')
@patch('charmhelpers.contrib.openstack.context.determine_apache_port')
@patch('charmhelpers.contrib.openstack.context.determine_api_port')
@patch('charmhelpers.contrib.openstack.context.unit_get')
@patch('charmhelpers.contrib.openstack.context.https')
def test_apache_ssl_context_service_enabled(self, mock_https,
mock_unit_get,
mock_determine_api_port,
mock_determine_apache_port,
mock_is_clustered):
mock_https.return_value = True
mock_unit_get.return_value = '1.2.3.4'
mock_determine_api_port.return_value = '12'
mock_determine_apache_port.return_value = '34'
mock_is_clustered.return_value = False
ctxt = context.ApacheSSLContext()
with patch.object(ctxt, 'enable_modules'):
with patch.object(ctxt, 'configure_cert'):
self.assertEquals(ctxt(), {'endpoints': [(34, 12)],
'private_address': '1.2.3.4',
'namespace': 'keystone'})
self.assertTrue(mock_https.called)
mock_unit_get.assert_called_with('private-address')
@patch('charmhelpers.contrib.openstack.context.relation_ids')
@patch('charmhelpers.contrib.openstack.context.unit_get')
@patch('charmhelpers.contrib.openstack.context.related_units')
@patch('charmhelpers.contrib.openstack.context.relation_get')
@patch('charmhelpers.contrib.openstack.context.log')
@patch('__builtin__.open')
def test_haproxy_context_service_enabled(
self, mock_open, mock_log, mock_relation_get, mock_related_units,
mock_unit_get, mock_relation_ids):
mock_relation_ids.return_value = ['identity-service:0', ]
mock_unit_get.return_value = '1.2.3.4'
mock_relation_get.return_value = '10.0.0.0'
mock_related_units.return_value = ['unit/0', ]
self.determine_apache_port.return_value = '34'
ctxt = context.HAProxyContext()
self.assertEquals(
ctxt(),
{'listen_ports': {'admin_port': 'keystone',
'public_port': 'keystone'},
'service_ports': {'admin-port': ['keystone', '34'],
'public-port': ['keystone', '34']},
'units': {'keystone': '1.2.3.4', 'unit-0': '10.0.0.0'}})
mock_unit_get.assert_called_with('private-address')
mock_relation_get.assert_called_with(
'private-address',
rid='identity-service:0',
unit='unit/0')
mock_open.assert_called_with('/etc/default/haproxy', 'w')

View File

@ -0,0 +1,396 @@
from mock import call, patch, MagicMock
import os
from test_utils import CharmTestCase
os.environ['JUJU_UNIT_NAME'] = 'keystone'
with patch('charmhelpers.core.hookenv.config') as config:
config.return_value = 'keystone'
import keystone_utils as utils
_reg = utils.register_configs
_map = utils.restart_map
utils.register_configs = MagicMock()
utils.restart_map = MagicMock()
import keystone_hooks as hooks
from charmhelpers.contrib import unison
utils.register_configs = _reg
utils.restart_map = _map
TO_PATCH = [
# charmhelpers.core.hookenv
'Hooks',
'config',
'is_relation_made',
'log',
'filter_installed_packages',
'relation_ids',
'relation_list',
'relation_set',
'relation_get',
'related_units',
'unit_get',
'peer_echo',
# charmhelpers.core.host
'apt_install',
'apt_update',
'restart_on_change',
# charmhelpers.contrib.openstack.utils
'configure_installation_source',
# charmhelpers.contrib.hahelpers.cluster_utils
'eligible_leader',
# keystone_utils
'restart_map',
'register_configs',
'do_openstack_upgrade',
'openstack_upgrade_available',
'save_script_rc',
'migrate_database',
'ensure_initial_admin',
'add_service_to_keystone',
'synchronize_ca',
'get_hacluster_config',
'is_leader',
# other
'check_call',
'execd_preinstall',
'mkdir',
'os',
'time',
]
class KeystoneRelationTests(CharmTestCase):
def setUp(self):
super(KeystoneRelationTests, self).setUp(hooks, TO_PATCH)
self.config.side_effect = self.test_config.get
self.ssh_user = 'juju_keystone'
def test_install_hook(self):
repo = 'cloud:precise-grizzly'
self.test_config.set('openstack-origin', repo)
hooks.install()
self.configure_installation_source.assert_called_with(repo)
self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(
['haproxy', 'unison', 'python-keystoneclient',
'uuid', 'python-mysqldb', 'openssl', 'apache2',
'pwgen', 'keystone', 'python-psycopg2'], fatal=True)
self.assertTrue(self.execd_preinstall.called)
def test_db_joined(self):
self.unit_get.return_value = 'keystone.foohost.com'
self.is_relation_made.return_value = False
hooks.db_joined()
self.relation_set.assert_called_with(database='keystone',
username='keystone',
hostname='keystone.foohost.com')
self.unit_get.assert_called_with('private-address')
def test_postgresql_db_joined(self):
self.unit_get.return_value = 'keystone.foohost.com'
self.is_relation_made.return_value = False
hooks.pgsql_db_joined()
self.relation_set.assert_called_with(database='keystone'),
def test_db_joined_with_postgresql(self):
self.is_relation_made.return_value = True
with self.assertRaises(Exception) as context:
hooks.db_joined()
self.assertEqual(
context.exception.message,
'Attempting to associate a mysql database when there '
'is already associated a postgresql one')
def test_postgresql_joined_with_db(self):
self.is_relation_made.return_value = True
with self.assertRaises(Exception) as context:
hooks.pgsql_db_joined()
self.assertEqual(
context.exception.message,
'Attempting to associate a postgresql database when there '
'is already associated a mysql one')
@patch.object(hooks, 'CONFIGS')
def test_db_changed_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.db_changed()
self.log.assert_called_with(
'shared-db relation incomplete. Peer not ready?'
)
@patch.object(hooks, 'CONFIGS')
def test_postgresql_db_changed_missing_relation_data(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = []
hooks.pgsql_db_changed()
self.log.assert_called_with(
'pgsql-db relation incomplete. Peer not ready?'
)
def _shared_db_test(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['shared-db']
configs.write = MagicMock()
hooks.db_changed()
def _postgresql_db_test(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['pgsql-db']
configs.write = MagicMock()
hooks.pgsql_db_changed()
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
def test_db_changed(self, identity_changed, configs):
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
self._shared_db_test(configs)
self.assertEquals([call('/etc/keystone/keystone.conf')],
configs.write.call_args_list)
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called)
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
def test_postgresql_db_changed(self, identity_changed, configs):
self.relation_ids.return_value = ['identity-service:0']
self.related_units.return_value = ['unit/0']
self._postgresql_db_test(configs)
self.assertEquals([call('/etc/keystone/keystone.conf')],
configs.write.call_args_list)
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called)
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_no_openstack_upgrade_leader(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user):
self.openstack_upgrade_available.return_value = False
self.eligible_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
self.relation_list.return_value = ['unit/0']
hooks.config_changed()
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
get_homedir.assert_called_with(self.ssh_user)
self.save_script_rc.assert_called_with()
configure_https.assert_called_with()
self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called)
self.log.assert_called_with(
'Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_no_openstack_upgrade_not_leader(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user):
self.openstack_upgrade_available.return_value = False
self.eligible_leader.return_value = False
hooks.config_changed()
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
get_homedir.assert_called_with(self.ssh_user)
self.save_script_rc.assert_called_with()
configure_https.assert_called_with()
self.assertTrue(configs.write_all.called)
self.assertFalse(self.migrate_database.called)
self.assertFalse(self.ensure_initial_admin.called)
self.assertFalse(identity_changed.called)
@patch.object(unison, 'ensure_user')
@patch.object(unison, 'get_homedir')
@patch.object(hooks, 'CONFIGS')
@patch.object(hooks, 'identity_changed')
@patch.object(hooks, 'configure_https')
def test_config_changed_with_openstack_upgrade(
self, configure_https, identity_changed,
configs, get_homedir, ensure_user):
self.openstack_upgrade_available.return_value = True
self.eligible_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
self.relation_list.return_value = ['unit/0']
hooks.config_changed()
ensure_user.assert_called_with(user=self.ssh_user, group='keystone')
get_homedir.assert_called_with(self.ssh_user)
self.assertTrue(self.do_openstack_upgrade.called)
self.save_script_rc.assert_called_with()
configure_https.assert_called_with()
self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.assertTrue(self.ensure_initial_admin.called)
self.log.assert_called_with(
'Firing identity_changed hook for all related services.')
identity_changed.assert_called_with(
relation_id='identity-service:0',
remote_unit='unit/0')
def test_identity_changed_leader(self):
self.eligible_leader.return_value = True
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
self.add_service_to_keystone.assert_called_with(
'identity-service:0',
'unit/0')
self.assertTrue(self.synchronize_ca.called)
def test_identity_changed_no_leader(self):
self.eligible_leader.return_value = False
hooks.identity_changed(
relation_id='identity-service:0',
remote_unit='unit/0')
self.assertFalse(self.add_service_to_keystone.called)
self.log.assert_called_with(
'Deferring identity_changed() to service leader.')
@patch.object(unison, 'ssh_authorized_peers')
def test_cluster_joined(self, ssh_authorized_peers):
hooks.cluster_joined()
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='juju_keystone',
peer_interface='cluster', ensure_local_user=True)
@patch.object(unison, 'ssh_authorized_peers')
@patch.object(hooks, 'CONFIGS')
def test_cluster_changed(self, configs, ssh_authorized_peers):
hooks.cluster_changed()
self.peer_echo.assert_called_with(includes=['_passwd'])
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.assertTrue(configs.write_all.called)
def test_ha_joined(self):
self.get_hacluster_config.return_value = {
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'vip': '10.10.10.10',
'vip_iface': 'em1',
'vip_cidr': '24'
}
hooks.ha_joined()
self.assertTrue(self.get_hacluster_config.called)
args = {
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ks_haproxy': 'haproxy'},
'resources': {'res_ks_vip': 'ocf:heartbeat:IPaddr2',
'res_ks_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_ks_vip': 'params ip="10.10.10.10"'
' cidr_netmask="24" nic="em1"',
'res_ks_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
}
self.relation_set.assert_called_with(**args)
@patch.object(hooks, 'CONFIGS')
def test_ha_relation_changed_not_clustered_not_leader(self, configs):
self.relation_get.return_value = False
self.is_leader.return_value = False
hooks.ha_changed()
self.assertTrue(configs.write_all.called)
@patch.object(hooks, 'CONFIGS')
def test_ha_relation_changed_clustered_leader(self, configs):
self.relation_get.return_value = True
self.is_leader.return_value = True
self.relation_ids.return_value = ['identity-service:0']
self.test_config.set('vip', '10.10.10.10')
hooks.ha_changed()
self.assertTrue(configs.write_all.called)
self.log.assert_called_with(
'Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
self.relation_set.assert_called_with(relation_id='identity-service:0',
auth_host='10.10.10.10',
service_host='10.10.10.10')
@patch.object(hooks, 'CONFIGS')
def test_configure_https_enable(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['https']
configs.write = MagicMock()
hooks.configure_https()
self.assertTrue(configs.write_all.called)
cmd = ['a2ensite', 'openstack_https_frontend']
self.check_call.assert_called_with(cmd)
@patch.object(hooks, 'CONFIGS')
def test_configure_https_disable(self, configs):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['']
configs.write = MagicMock()
hooks.configure_https()
self.assertTrue(configs.write_all.called)
cmd = ['a2dissite', 'openstack_https_frontend']
self.check_call.assert_called_with(cmd)
@patch.object(unison, 'ssh_authorized_peers')
def test_upgrade_charm_leader(self, ssh_authorized_peers):
self.eligible_leader.return_value = True
self.filter_installed_packages.return_value = []
hooks.upgrade_charm()
self.assertTrue(self.apt_install.called)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.log.assert_called_with(
'Cluster leader - ensuring endpoint configuration'
' is up to date')
self.assertTrue(self.ensure_initial_admin.called)
@patch.object(unison, 'ssh_authorized_peers')
def test_upgrade_charm_not_leader(self, ssh_authorized_peers):
self.eligible_leader.return_value = False
self.filter_installed_packages.return_value = []
hooks.upgrade_charm()
self.assertTrue(self.apt_install.called)
ssh_authorized_peers.assert_called_with(
user=self.ssh_user, group='keystone',
peer_interface='cluster', ensure_local_user=True)
self.assertTrue(self.synchronize_ca.called)
self.assertFalse(self.log.called)
self.assertFalse(self.ensure_initial_admin.called)

View File

@ -0,0 +1,292 @@
from mock import patch, call, MagicMock
from test_utils import CharmTestCase
import os
import manager
os.environ['JUJU_UNIT_NAME'] = 'keystone'
with patch('charmhelpers.core.hookenv.config') as config:
import keystone_utils as utils
import keystone_hooks as hooks
TO_PATCH = [
'api_port',
'config',
'create_user',
'os_release',
'log',
'get_ca',
'create_role',
'create_service_entry',
'create_endpoint_template',
'get_admin_token',
'get_local_endpoint',
'get_requested_roles',
'get_service_password',
'get_os_codename_install_source',
'grant_role',
'configure_installation_source',
'eligible_leader',
'https',
'is_clustered',
'service_stop',
'service_start',
'relation_get',
'relation_set',
'https',
'unit_private_ip',
# generic
'apt_update',
'apt_upgrade',
'apt_install',
'subprocess',
'time',
'pwgen',
]
class TestKeystoneUtils(CharmTestCase):
def setUp(self):
super(TestKeystoneUtils, self).setUp(utils, TO_PATCH)
self.config.side_effect = self.test_config.get
self.ctxt = MagicMock()
self.rsc_map = {
'/etc/keystone/keystone.conf': {
'services': ['keystone'],
'contexts': [self.ctxt],
},
'/etc/apache2/sites-available/openstack_https_frontend': {
'services': ['apache2'],
'contexts': [self.ctxt],
},
'/etc/apache2/sites-available/openstack_https_frontend.conf': {
'services': ['apache2'],
'contexts': [self.ctxt],
}
}
@patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer')
@patch('os.path.exists')
@patch.object(utils, 'resource_map')
def test_register_configs_apache(self, resource_map, exists, renderer):
exists.return_value = False
self.os_release.return_value = 'havana'
fake_renderer = MagicMock()
fake_renderer.register = MagicMock()
renderer.return_value = fake_renderer
resource_map.return_value = self.rsc_map
utils.register_configs()
renderer.assert_called_with(
openstack_release='havana', templates_dir='templates/')
ex_reg = [
call('/etc/keystone/keystone.conf', [self.ctxt]),
call(
'/etc/apache2/sites-available/openstack_https_frontend',
[self.ctxt]),
call(
'/etc/apache2/sites-available/openstack_https_frontend.conf',
[self.ctxt]),
]
self.assertEquals(fake_renderer.register.call_args_list, ex_reg)
def test_determine_ports(self):
self.test_config.set('admin-port', '80')
self.test_config.set('service-port', '81')
result = utils.determine_ports()
self.assertEquals(result, ['80', '81'])
def test_determine_packages(self):
result = utils.determine_packages()
ex = utils.BASE_PACKAGES + ['keystone', 'haproxy', 'apache2']
self.assertEquals(set(ex), set(result))
@patch.object(hooks, 'CONFIGS')
@patch.object(utils, 'determine_packages')
@patch.object(utils, 'migrate_database')
def test_openstack_upgrade_leader(
self, migrate_database, determine_packages, configs):
self.test_config.set('openstack-origin', 'precise')
determine_packages.return_value = []
self.eligible_leader.return_value = True
utils.do_openstack_upgrade(configs)
self.get_os_codename_install_source.assert_called_with('precise')
self.configure_installation_source.assert_called_with('precise')
self.assertTrue(self.apt_update.called)
dpkg_opts = [
'--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef',
]
self.apt_upgrade.assert_called_with(
options=dpkg_opts,
fatal=True,
dist=True)
self.apt_install.assert_called_with(
packages=[],
options=dpkg_opts,
fatal=True)
self.assertTrue(configs.set_release.called)
self.assertTrue(configs.write_all.called)
self.assertTrue(migrate_database.called)
def test_migrate_database(self):
utils.migrate_database()
self.service_stop.assert_called_with('keystone')
cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']
self.subprocess.check_output.assert_called_with(cmd)
self.service_start.assert_called_wkth('keystone')
@patch.object(utils, 'b64encode')
def test_add_service_to_keystone_clustered_https_none_values(
self, b64encode):
relation_id = 'identity-service:0'
remote_unit = 'unit/0'
self.is_clustered.return_value = True
self.https.return_value = True
self.test_config.set('https-service-endpoints', 'True')
self.test_config.set('vip', '10.10.10.10')
self.test_config.set('admin-port', 80)
self.test_config.set('service-port', 81)
b64encode.return_value = 'certificate'
self.get_requested_roles.return_value = ['role1', ]
self.relation_get.return_value = {'service': 'keystone',
'region': 'RegionOne',
'public_url': 'None',
'admin_url': '10.0.0.2',
'internal_url': '192.168.1.2'}
utils.add_service_to_keystone(
relation_id=relation_id,
remote_unit=remote_unit)
self.assertTrue(self.is_clustered.called)
self.assertTrue(self.https.called)
self.assertTrue(self.create_role.called)
relation_data = {'auth_host': '10.10.10.10',
'service_host': '10.10.10.10',
'auth_protocol': 'https',
'service_protocol': 'https',
'auth_port': 80,
'service_port': 81,
'https_keystone': 'True',
'ca_cert': 'certificate'}
self.relation_set.assert_called_with(
relation_id=relation_id,
**relation_data)
@patch.object(utils, 'ensure_valid_service')
@patch.object(utils, 'add_endpoint')
@patch.object(manager, 'KeystoneManager')
def test_add_service_to_keystone_no_clustered_no_https_complete_values(
self, KeystoneManager, add_endpoint, ensure_valid_service):
relation_id = 'identity-service:0'
remote_unit = 'unit/0'
self.get_admin_token.return_value = 'token'
self.get_service_password.return_value = 'password'
self.test_config.set('service-tenant', 'tenant')
self.test_config.set('admin-role', 'admin')
self.get_requested_roles.return_value = ['role1', ]
self.unit_private_ip.return_value = '10.0.0.3'
self.test_config.set('admin-port', 80)
self.test_config.set('service-port', 81)
self.is_clustered.return_value = False
self.https.return_value = False
self.test_config.set('https-service-endpoints', 'False')
self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/'
mock_keystone = MagicMock()
mock_keystone.resolve_tenant_id.return_value = 'tenant_id'
KeystoneManager.return_value = mock_keystone
self.relation_get.return_value = {'service': 'keystone',
'region': 'RegionOne',
'public_url': '10.0.0.1',
'admin_url': '10.0.0.2',
'internal_url': '192.168.1.2'}
utils.add_service_to_keystone(
relation_id=relation_id,
remote_unit=remote_unit)
ensure_valid_service.assert_called_with('keystone')
add_endpoint.assert_called_with(region='RegionOne', service='keystone',
publicurl='10.0.0.1',
adminurl='10.0.0.2',
internalurl='192.168.1.2')
self.assertTrue(self.get_admin_token.called)
self.get_service_password.assert_called_with('keystone')
self.create_user.assert_called_with('keystone', 'password', 'tenant')
self.grant_role.assert_called_with('keystone', 'admin', 'tenant')
self.create_role.assert_called_with('role1', 'keystone', 'tenant')
self.assertTrue(self.is_clustered.called)
relation_data = {'admin_token': 'token', 'service_port': 81,
'auth_port': 80, 'service_username': 'keystone',
'service_password': 'password',
'service_tenant': 'tenant',
'https_keystone': 'False',
'ssl_cert': '', 'ssl_key': '',
'ca_cert': '', 'auth_host': '10.0.0.3',
'service_host': '10.0.0.3',
'auth_protocol': 'http', 'service_protocol': 'http',
'service_tenant_id': 'tenant_id'}
self.relation_set.assert_called_with(
relation_id=relation_id,
**relation_data)
@patch.object(utils, 'ensure_valid_service')
@patch.object(utils, 'add_endpoint')
@patch.object(manager, 'KeystoneManager')
def test_add_service_to_keystone_nosubset(
self, KeystoneManager, add_endpoint, ensure_valid_service):
relation_id = 'identity-service:0'
remote_unit = 'unit/0'
self.relation_get.return_value = {'ec2_service': 'nova',
'ec2_region': 'RegionOne',
'ec2_public_url': '10.0.0.1',
'ec2_admin_url': '10.0.0.2',
'ec2_internal_url': '192.168.1.2'}
self.get_local_endpoint.return_value = 'http://localhost:80/v2.0/'
KeystoneManager.resolve_tenant_id.return_value = 'tenant_id'
utils.add_service_to_keystone(
relation_id=relation_id,
remote_unit=remote_unit)
ensure_valid_service.assert_called_with('nova')
add_endpoint.assert_called_with(region='RegionOne', service='nova',
publicurl='10.0.0.1',
adminurl='10.0.0.2',
internalurl='192.168.1.2')
def test_ensure_valid_service_incorrect(self):
utils.ensure_valid_service('fakeservice')
self.log.assert_called_with("Invalid service requested: 'fakeservice'")
self.relation_set.assert_called_with(admin_token=-1)
def test_add_endpoint(self):
publicurl = '10.0.0.1'
adminurl = '10.0.0.2'
internalurl = '10.0.0.3'
utils.add_endpoint(
'RegionOne',
'nova',
publicurl,
adminurl,
internalurl)
self.create_service_entry.assert_called_with(
'nova',
'compute',
'Nova Compute Service')
self.create_endpoint_template.asssert_called_with(
region='RegionOne', service='nova',
publicurl=publicurl, adminurl=adminurl,
internalurl=internalurl)

119
unit_tests/test_utils.py Normal file
View File

@ -0,0 +1,119 @@
import logging
import os
import unittest
import yaml
from contextlib import contextmanager
from mock import patch, MagicMock
def load_config():
'''Walk backwords from __file__ looking for config.yaml,
load and return the 'options' section'
'''
config = None
f = __file__
while config is None:
d = os.path.dirname(f)
if os.path.isfile(os.path.join(d, 'config.yaml')):
config = os.path.join(d, 'config.yaml')
break
f = d
if not config:
logging.error('Could not find config.yaml in any parent directory '
'of %s. ' % file)
raise Exception
return yaml.safe_load(open(config).read())['options']
def get_default_config():
'''Load default charm config from config.yaml return as a dict.
If no default is set in config.yaml, its value is None.
'''
default_config = {}
config = load_config()
for k, v in config.iteritems():
if 'default' in v:
default_config[k] = v['default']
else:
default_config[k] = None
return default_config
class CharmTestCase(unittest.TestCase):
def setUp(self, obj, patches):
super(CharmTestCase, self).setUp()
self.patches = patches
self.obj = obj
self.test_config = TestConfig()
self.test_relation = TestRelation()
self.patch_all()
def patch(self, method):
_m = patch.object(self.obj, method)
mock = _m.start()
self.addCleanup(_m.stop)
return mock
def patch_all(self):
for method in self.patches:
setattr(self, method, self.patch(method))
class TestConfig(object):
def __init__(self):
self.config = get_default_config()
def get(self, attr=None):
if not attr:
return self.get_all()
try:
return self.config[attr]
except KeyError:
return None
def get_all(self):
return self.config
def set(self, attr, value):
if attr not in self.config:
raise KeyError
self.config[attr] = value
class TestRelation(object):
def __init__(self, relation_data={}):
self.relation_data = relation_data
def set(self, relation_data):
self.relation_data = relation_data
def get(self, attr=None, unit=None, rid=None):
if attr is None:
return self.relation_data
elif attr in self.relation_data:
return self.relation_data[attr]
return None
@contextmanager
def patch_open():
'''Patch open() to allow mocking both open() itself and the file that is
yielded.
Yields the mock for "open" and "file", respectively.
'''
mock_open = MagicMock(spec=open)
mock_file = MagicMock(spec=file)
@contextmanager
def stub_open(*args, **kwargs):
mock_open(*args, **kwargs)
yield mock_file
with patch('__builtin__.open', stub_open):
yield mock_open, mock_file