Merge ssl-everywhere branch (may break stuff)

This commit is contained in:
James Page 2014-03-27 10:54:38 +00:00
commit 057d4b1d8c
42 changed files with 4467 additions and 801 deletions

17
.project Normal file
View File

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>keystone</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

8
.pydevproject Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?><pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/keystone/hooks</path>
</pydev_pathproperty>
</pydev_project>

14
Makefile Normal file
View File

@ -0,0 +1,14 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers unit_tests
@charm proof
test:
@echo Starting tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
sync:
@charm-helper-sync -c charm-helpers.yaml

View File

@ -2,4 +2,11 @@ branch: lp:charm-helpers
destination: hooks/charmhelpers destination: hooks/charmhelpers
include: include:
- core - core
- fetch
- contrib.openstack|inc=*
- contrib.storage
- contrib.hahelpers:
- apache
- cluster
- contrib.unison
- payload.execd - payload.execd

View File

@ -122,3 +122,22 @@ options:
default: "False" default: "False"
type: string type: string
description: "Manage SSL certificates for all service endpoints." description: "Manage SSL certificates for all service endpoints."
use-https:
default: "no"
type: string
description: "Use SSL for Keystone itself. Set to 'yes' to enable it."
ssl_cert:
type: string
description: |
SSL certificate to install and use for API ports. Setting this value
and ssl_key will enable reverse proxying, point Keystone's entry in the
Keystone catalog to use https, and override any certficiate and key
issued by Keystone (if it is configured to do so).
ssl_key:
type: string
description: SSL key to use with certificate specified as ssl_cert.
ssl_ca:
type: string
description: |
SSL CA to use with the certificate and key provided - this is only
required if you are providing a privately signed ssl_cert and ssl_key.

View File

View File

@ -0,0 +1,59 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = config_get('ssl_ca')
if ca_cert is None:
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if ca_cert is None:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -0,0 +1,183 @@
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
import os
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
ERROR,
unit_get,
)
class HAIncompleteConfig(Exception):
pass
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
if (None not in rel_state) and ('' not in rel_state):
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_apache_port(public_port):
'''
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)
def get_hacluster_config():
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
conf = {}
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
return conf
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)

View File

@ -0,0 +1,17 @@
''' Helper for managing alternatives for file conflict resolution '''
import subprocess
import shutil
import os
def install_alternative(name, target, source, priority=50):
''' Install alternative configuration '''
if (os.path.exists(target) and not os.path.islink(target)):
# Move existing file/directory away before installing
shutil.move(target, '{}.bak'.format(target))
cmd = [
'update-alternatives', '--force', '--install',
target, name, source, str(priority)
]
subprocess.check_call(cmd)

View File

@ -0,0 +1,666 @@
import json
import os
import time
from base64 import b64decode
from subprocess import (
check_call
)
from charmhelpers.fetch import (
apt_install,
filter_installed_packages,
)
from charmhelpers.core.hookenv import (
config,
local_unit,
log,
relation_get,
relation_ids,
related_units,
unit_get,
unit_private_ip,
ERROR,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
https,
is_clustered
)
from charmhelpers.contrib.hahelpers.apache import (
get_cert,
get_ca_cert,
)
from charmhelpers.contrib.openstack.neutron import (
neutron_plugin_attribute,
)
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
class OSContextError(Exception):
pass
def ensure_packages(packages):
'''Install but do not upgrade required plugin packages'''
required = filter_installed_packages(packages)
if required:
apt_install(required, fatal=True)
def context_complete(ctxt):
_missing = []
for k, v in ctxt.iteritems():
if v is None or v == '':
_missing.append(k)
if _missing:
log('Missing required data: %s' % ' '.join(_missing), level='INFO')
return False
return True
def config_flags_parser(config_flags):
if config_flags.find('==') >= 0:
log("config_flags is not in expected format (key=value)",
level=ERROR)
raise OSContextError
# strip the following from each value.
post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then
# split on '='.
split = config_flags.strip(' =').split('=')
limit = len(split)
flags = {}
for i in xrange(0, limit - 1):
current = split[i]
next = split[i + 1]
vindex = next.rfind(',')
if (i == limit - 2) or (vindex < 0):
value = next
else:
value = next[:vindex]
if i == 0:
key = current
else:
# if this not the first entry, expect an embedded key.
index = current.rfind(',')
if index < 0:
log("invalid config value(s) at index %s" % (i),
level=ERROR)
raise OSContextError
key = current[index + 1:]
# Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags
class OSContextGenerator(object):
interfaces = []
def __call__(self):
raise NotImplementedError
class SharedDBContext(OSContextGenerator):
interfaces = ['shared-db']
def __init__(self,
database=None, user=None, relation_prefix=None, ssl_dir=None):
'''
Allows inspecting relation for settings prefixed with relation_prefix.
This is useful for parsing access for multiple databases returned via
the shared-db interface (eg, nova_password, quantum_password)
'''
self.relation_prefix = relation_prefix
self.database = database
self.user = user
self.ssl_dir = ssl_dir
def __call__(self):
self.database = self.database or config('database')
self.user = self.user or config('database-user')
if None in [self.database, self.user]:
log('Could not generate shared_db context. '
'Missing required charm config options. '
'(database name and user)')
raise OSContextError
ctxt = {}
password_setting = 'password'
if self.relation_prefix:
password_setting = self.relation_prefix + '_password'
for rid in relation_ids('shared-db'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
ctxt = {
'database_host': rdata.get('db_host'),
'database': self.database,
'database_user': self.user,
'database_password': rdata.get(password_setting)
}
if context_complete(ctxt):
db_ssl(rdata, ctxt, self.ssl_dir)
return ctxt
return {}
def db_ssl(rdata, ctxt, ssl_dir):
if 'ssl_ca' in rdata and ssl_dir:
ca_path = os.path.join(ssl_dir, 'db-client.ca')
with open(ca_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_ca']))
ctxt['database_ssl_ca'] = ca_path
elif 'ssl_ca' in rdata:
log("Charm not setup for ssl support but ssl ca found")
return ctxt
if 'ssl_cert' in rdata:
cert_path = os.path.join(
ssl_dir, 'db-client.cert')
if not os.path.exists(cert_path):
log("Waiting 1m for ssl client cert validity")
time.sleep(60)
with open(cert_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_cert']))
ctxt['database_ssl_cert'] = cert_path
key_path = os.path.join(ssl_dir, 'db-client.key')
with open(key_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_key']))
ctxt['database_ssl_key'] = key_path
return ctxt
class IdentityServiceContext(OSContextGenerator):
interfaces = ['identity-service']
def __call__(self):
log('Generating template context for identity-service')
ctxt = {}
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
ctxt = {
'service_port': rdata.get('service_port'),
'service_host': rdata.get('service_host'),
'auth_host': rdata.get('auth_host'),
'auth_port': rdata.get('auth_port'),
'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'service_protocol':
rdata.get('service_protocol') or 'http',
'auth_protocol':
rdata.get('auth_protocol') or 'http',
}
if context_complete(ctxt):
return ctxt
return {}
class AMQPContext(OSContextGenerator):
interfaces = ['amqp']
def __init__(self, ssl_dir=None):
self.ssl_dir = ssl_dir
def __call__(self):
log('Generating template context for amqp')
conf = config()
try:
username = conf['rabbit-user']
vhost = conf['rabbit-vhost']
except KeyError as e:
log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e)
raise OSContextError
ctxt = {}
for rid in relation_ids('amqp'):
ha_vip_only = False
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
ctxt['clustered'] = True
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
unit=unit)
else:
ctxt['rabbitmq_host'] = relation_get('private-address',
rid=rid, unit=unit)
ctxt.update({
'rabbitmq_user': username,
'rabbitmq_password': relation_get('password', rid=rid,
unit=unit),
'rabbitmq_virtual_host': vhost,
})
ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
if ssl_port:
ctxt['rabbit_ssl_port'] = ssl_port
ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
if ssl_ca:
ctxt['rabbit_ssl_ca'] = ssl_ca
if relation_get('ha_queues', rid=rid, unit=unit) is not None:
ctxt['rabbitmq_ha_queues'] = True
ha_vip_only = relation_get('ha-vip-only',
rid=rid, unit=unit) is not None
if context_complete(ctxt):
if 'rabbit_ssl_ca' in ctxt:
if not self.ssl_dir:
log(("Charm not setup for ssl support "
"but ssl ca found"))
break
ca_path = os.path.join(
self.ssl_dir, 'rabbit-client-ca.pem')
with open(ca_path, 'w') as fh:
fh.write(b64decode(ctxt['rabbit_ssl_ca']))
ctxt['rabbit_ssl_ca'] = ca_path
# Sufficient information found = break out!
break
# Used for active/active rabbitmq >= grizzly
if ('clustered' not in ctxt or ha_vip_only) \
and len(related_units(rid)) > 1:
rabbitmq_hosts = []
for unit in related_units(rid):
rabbitmq_hosts.append(relation_get('private-address',
rid=rid, unit=unit))
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
if not context_complete(ctxt):
return {}
else:
return ctxt
class CephContext(OSContextGenerator):
interfaces = ['ceph']
def __call__(self):
'''This generates context for /etc/ceph/ceph.conf templates'''
if not relation_ids('ceph'):
return {}
log('Generating template context for ceph')
mon_hosts = []
auth = None
key = None
use_syslog = str(config('use-syslog')).lower()
for rid in relation_ids('ceph'):
for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit)
ctxt = {
'mon_hosts': ' '.join(mon_hosts),
'auth': auth,
'key': key,
'use_syslog': use_syslog
}
if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph')
if not context_complete(ctxt):
return {}
ensure_packages(['ceph-common'])
return ctxt
class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
def __call__(self):
'''
Builds half a context for the haproxy template, which describes
all peers to be included in the cluster. Each charm needs to include
its own context generator that describes the port mapping.
'''
if not relation_ids('cluster'):
return {}
cluster_hosts = {}
l_unit = local_unit().replace('/', '-')
cluster_hosts[l_unit] = unit_get('private-address')
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
addr = relation_get('private-address', rid=rid, unit=unit)
cluster_hosts[_unit] = addr
ctxt = {
'units': cluster_hosts,
}
if len(cluster_hosts.keys()) > 1:
# Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.')
with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n')
return ctxt
log('HAProxy context is incomplete, this unit has no peers.')
return {}
class ImageServiceContext(OSContextGenerator):
interfaces = ['image-service']
def __call__(self):
'''
Obtains the glance API server from the image-service relation. Useful
in nova and cinder (currently).
'''
log('Generating template context for image-service.')
rids = relation_ids('image-service')
if not rids:
return {}
for rid in rids:
for unit in related_units(rid):
api_server = relation_get('glance-api-server',
rid=rid, unit=unit)
if api_server:
return {'glance_api_servers': api_server}
log('ImageService context is incomplete. '
'Missing required relation data.')
return {}
class ApacheSSLContext(OSContextGenerator):
"""
Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context
looks something like:
{
'namespace': 'cinder',
'private_address': 'iscsi.mycinderhost.com',
'endpoints': [(8776, 8766), (8777, 8767)]
}
The endpoints list consists of a tuples mapping external ports
to internal ports.
"""
interfaces = ['https']
# charms should inherit this context and set external ports
# and service namespace accordingly.
external_ports = []
service_namespace = None
def enable_modules(self):
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
check_call(cmd)
def configure_cert(self):
if not os.path.isdir('/etc/apache2/ssl'):
os.mkdir('/etc/apache2/ssl')
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
if not os.path.isdir(ssl_dir):
os.mkdir(ssl_dir)
cert, key = get_cert()
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
cert_out.write(b64decode(cert))
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
key_out.write(b64decode(key))
ca_cert = get_ca_cert()
if ca_cert:
with open(CA_CERT_PATH, 'w') as ca_out:
ca_out.write(b64decode(ca_cert))
check_call(['update-ca-certificates'])
def __call__(self):
if isinstance(self.external_ports, basestring):
self.external_ports = [self.external_ports]
if (not self.external_ports or not https()):
return {}
self.configure_cert()
self.enable_modules()
ctxt = {
'namespace': self.service_namespace,
'private_address': unit_get('private-address'),
'endpoints': []
}
if is_clustered():
ctxt['private_address'] = config('vip')
for api_port in self.external_ports:
ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port)
portmap = (int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap)
return ctxt
class NeutronContext(OSContextGenerator):
interfaces = []
@property
def plugin(self):
return None
@property
def network_manager(self):
return None
@property
def packages(self):
return neutron_plugin_attribute(
self.plugin, 'packages', self.network_manager)
@property
def neutron_security_groups(self):
return None
def _ensure_packages(self):
[ensure_packages(pkgs) for pkgs in self.packages]
def _save_flag_file(self):
if self.network_manager == 'quantum':
_file = '/etc/nova/quantum_plugin.conf'
else:
_file = '/etc/nova/neutron_plugin.conf'
with open(_file, 'wb') as out:
out.write(self.plugin + '\n')
def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'ovs',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return ovs_ctxt
def nvp_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
nvp_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'nvp',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return nvp_ctxt
def neutron_ctxt(self):
if https():
proto = 'https'
else:
proto = 'http'
if is_clustered():
host = config('vip')
else:
host = unit_get('private-address')
url = '%s://%s:%s' % (proto, host, '9696')
ctxt = {
'network_manager': self.network_manager,
'neutron_url': url,
}
return ctxt
def __call__(self):
self._ensure_packages()
if self.network_manager not in ['quantum', 'neutron']:
return {}
if not self.plugin:
return {}
ctxt = self.neutron_ctxt()
if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt())
elif self.plugin == 'nvp':
ctxt.update(self.nvp_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
flags = config_flags_parser(alchemy_flags)
ctxt['neutron_alchemy_flags'] = flags
self._save_flag_file()
return ctxt
class OSConfigFlagContext(OSContextGenerator):
"""
Responsible for adding user-defined config-flags in charm config to a
template context.
NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support
comma-separated lists as values.
"""
def __call__(self):
config_flags = config('config-flags')
if not config_flags:
return {}
flags = config_flags_parser(config_flags)
return {'user_config_flags': flags}
class SubordinateConfigContext(OSContextGenerator):
"""
Responsible for inspecting relations to subordinates that
may be exporting required config via a json blob.
The subordinate interface allows subordinates to export their
configuration requirements to the principle for multiple config
files and multiple serivces. Ie, a subordinate that has interfaces
to both glance and nova may export to following yaml blob as json:
glance:
/etc/glance/glance-api.conf:
sections:
DEFAULT:
- [key1, value1]
/etc/glance/glance-registry.conf:
MYSECTION:
- [key2, value2]
nova:
/etc/nova/nova.conf:
sections:
DEFAULT:
- [key3, value3]
It is then up to the principle charms to subscribe this context to
the service+config file it is interestd in. Configuration data will
be available in the template context, in glance's case, as:
ctxt = {
... other context ...
'subordinate_config': {
'DEFAULT': {
'key1': 'value1',
},
'MYSECTION': {
'key2': 'value2',
},
}
}
"""
def __init__(self, service, config_file, interface):
"""
:param service : Service name key to query in any subordinate
data found
:param config_file : Service's config file to query sections
:param interface : Subordinate interface to inspect
"""
self.service = service
self.config_file = config_file
self.interface = interface
def __call__(self):
ctxt = {}
for rid in relation_ids(self.interface):
for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration',
rid=rid, unit=unit)
if sub_config and sub_config != '':
try:
sub_config = json.loads(sub_config)
except:
log('Could not parse JSON from subordinate_config '
'setting from %s' % rid, level=ERROR)
continue
if self.service not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s service' % (rid, self.service))
continue
sub_config = sub_config[self.service]
if self.config_file not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s' % (rid, self.config_file))
continue
sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems():
ctxt[k] = v
if not ctxt:
ctxt['sections'] = {}
return ctxt
class SyslogContext(OSContextGenerator):
def __call__(self):
ctxt = {
'use_syslog': config('use-syslog')
}
return ctxt

View File

@ -0,0 +1,171 @@
# Various utilies for dealing with Neutron and the renaming from Quantum.
from subprocess import check_output
from charmhelpers.core.hookenv import (
config,
log,
ERROR,
)
from charmhelpers.contrib.openstack.utils import os_release
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
def determine_dkms_package():
""" Determine which DKMS package should be used based on kernel version """
# NOTE: 3.13 kernels have support for GRE and VXLAN native
if kernel_version() >= (3, 13):
return []
else:
return ['openvswitch-datapath-dkms']
# legacy
def quantum_plugins():
from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
},
'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
}
}
NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
'config': '/etc/neutron/plugins/openvswitch/'
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
},
'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
}
}
# NOTE: patch in ml2 plugin for icehouse onwards
if release >= 'icehouse':
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
return plugins
def neutron_plugin_attribute(plugin, attr, net_manager=None):
manager = net_manager or network_manager()
if manager == 'quantum':
plugins = quantum_plugins()
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log('Error: Network manager does not support plugins.')
raise Exception
try:
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise Exception
try:
return _plugin[attr]
except KeyError:
return None
def network_manager():
'''
Deals with the renaming of Quantum to Neutron in H and any situations
that require compatability (eg, deploying H with network-manager=quantum,
upgrading from G).
'''
release = os_release('nova-common')
manager = config('network-manager').lower()
if manager not in ['quantum', 'neutron']:
return manager
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'
else:
# ensure accurate naming for all releases post-H
return 'neutron'

View File

@ -0,0 +1,2 @@
# dummy __init__.py to fool syncer into thinking this is a syncable python
# module

View File

@ -0,0 +1,15 @@
###############################################################################
# [ WARNING ]
# cinder configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
[global]
{% if auth -%}
auth_supported = {{ auth }}
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
{% endif -%}
log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}

View File

@ -0,0 +1,36 @@
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 20000
user haproxy
group haproxy
spread-checks 0
defaults
log global
mode tcp
option tcplog
option dontlognull
retries 3
timeout queue 1000
timeout connect 1000
timeout client 30000
timeout server 30000
listen stats :8888
mode http
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /
stats auth admin:password
{% if units -%}
{% for service, ports in service_ports.iteritems() -%}
listen {{ service }} 0.0.0.0:{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
{% endfor -%}
{% endif -%}

View File

@ -0,0 +1,23 @@
{% if endpoints -%}
{% for ext, int in endpoints -%}
Listen {{ ext }}
NameVirtualHost *:{{ ext }}
<VirtualHost *:{{ ext }}>
ServerName {{ private_address }}
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
ProxyPass / http://localhost:{{ int }}/
ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on
</VirtualHost>
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
<Location />
Order allow,deny
Allow from all
</Location>
{% endfor -%}
{% endif -%}

View File

@ -0,0 +1,23 @@
{% if endpoints -%}
{% for ext, int in endpoints -%}
Listen {{ ext }}
NameVirtualHost *:{{ ext }}
<VirtualHost *:{{ ext }}>
ServerName {{ private_address }}
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
ProxyPass / http://localhost:{{ int }}/
ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on
</VirtualHost>
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
<Location />
Order allow,deny
Allow from all
</Location>
{% endfor -%}
{% endif -%}

View File

@ -0,0 +1,280 @@
import os
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
log,
ERROR,
INFO
)
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
except ImportError:
# python-jinja2 may not be installed yet, or we're running unittests.
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
class OSConfigException(Exception):
pass
def get_loader(templates_dir, os_release):
"""
Create a jinja2.ChoiceLoader containing template dirs up to
and including os_release. If directory template directory
is missing at templates_dir, it will be omitted from the loader.
templates_dir is added to the bottom of the search list as a base
loading dir.
A charm may also ship a templates dir with this module
and it will be appended to the bottom of the search list, eg:
hooks/charmhelpers/contrib/openstack/templates.
:param templates_dir: str: Base template directory containing release
sub-directories.
:param os_release : str: OpenStack release codename to construct template
loader.
:returns : jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
"""
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()]
if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir,
level=ERROR)
raise OSConfigException
# the bottom contains tempaltes_dir and possibly a common templates dir
# shipped with the helper.
loaders = [FileSystemLoader(templates_dir)]
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
if os.path.isdir(helper_templates):
loaders.append(FileSystemLoader(helper_templates))
for rel, tmpl_dir in tmpl_dirs:
if os.path.isdir(tmpl_dir):
loaders.insert(0, FileSystemLoader(tmpl_dir))
if rel == os_release:
break
log('Creating choice loader with dirs: %s' %
[l.searchpath for l in loaders], level=INFO)
return ChoiceLoader(loaders)
class OSConfigTemplate(object):
"""
Associates a config file template with a list of context generators.
Responsible for constructing a template context based on those generators.
"""
def __init__(self, config_file, contexts):
self.config_file = config_file
if hasattr(contexts, '__call__'):
self.contexts = [contexts]
else:
self.contexts = contexts
self._complete_contexts = []
def context(self):
ctxt = {}
for context in self.contexts:
_ctxt = context()
if _ctxt:
ctxt.update(_ctxt)
# track interfaces for every complete context.
[self._complete_contexts.append(interface)
for interface in context.interfaces
if interface not in self._complete_contexts]
return ctxt
def complete_contexts(self):
'''
Return a list of interfaces that have atisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
self.context()
return self._complete_contexts
class OSConfigRenderer(object):
"""
This class provides a common templating system to be used by OpenStack
charms. It is intended to help charms share common code and templates,
and ease the burden of managing config templates across multiple OpenStack
releases.
Basic usage:
# import some common context generates from charmhelpers
from charmhelpers.contrib.openstack import context
# Create a renderer object for a specific OS release.
configs = OSConfigRenderer(templates_dir='/tmp/templates',
openstack_release='folsom')
# register some config files with context generators.
configs.register(config_file='/etc/nova/nova.conf',
contexts=[context.SharedDBContext(),
context.AMQPContext()])
configs.register(config_file='/etc/nova/api-paste.ini',
contexts=[context.IdentityServiceContext()])
configs.register(config_file='/etc/haproxy/haproxy.conf',
contexts=[context.HAProxyContext()])
# write out a single config
configs.write('/etc/nova/nova.conf')
# write out all registered configs
configs.write_all()
Details:
OpenStack Releases and template loading
---------------------------------------
When the object is instantiated, it is associated with a specific OS
release. This dictates how the template loader will be constructed.
The constructed loader attempts to load the template from several places
in the following order:
- from the most recent OS release-specific template dir (if one exists)
- the base templates_dir
- a template directory shipped in the charm with this helper file.
For the example above, '/tmp/templates' contains the following structure:
/tmp/templates/nova.conf
/tmp/templates/api-paste.ini
/tmp/templates/grizzly/api-paste.ini
/tmp/templates/havana/api-paste.ini
Since it was registered with the grizzly release, it first seraches
the grizzly directory for nova.conf, then the templates dir.
When writing api-paste.ini, it will find the template in the grizzly
directory.
If the object were created with folsom, it would fall back to the
base templates dir for its api-paste.ini template.
This system should help manage changes in config files through
openstack releases, allowing charms to fall back to the most recently
updated config template for a given release
The haproxy.conf, since it is not shipped in the templates dir, will
be loaded from the module directory's template directory, eg
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
us to ship common templates (haproxy, apache) with the helpers.
Context generators
---------------------------------------
Context generators are used to generate template contexts during hook
execution. Doing so may require inspecting service relations, charm
config, etc. When registered, a config file is associated with a list
of generators. When a template is rendered and written, all context
generates are called in a chain to generate the context dictionary
passed to the jinja2 template. See context.py for more info.
"""
def __init__(self, templates_dir, openstack_release):
if not os.path.isdir(templates_dir):
log('Could not locate templates dir %s' % templates_dir,
level=ERROR)
raise OSConfigException
self.templates_dir = templates_dir
self.openstack_release = openstack_release
self.templates = {}
self._tmpl_env = None
if None in [Environment, ChoiceLoader, FileSystemLoader]:
# if this code is running, the object is created pre-install hook.
# jinja2 shouldn't get touched until the module is reloaded on next
# hook execution, with proper jinja2 bits successfully imported.
apt_install('python-jinja2')
def register(self, config_file, contexts):
"""
Register a config file with a list of context generators to be called
during rendering.
"""
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
contexts=contexts)
log('Registered config file: %s' % config_file, level=INFO)
def _get_tmpl_env(self):
if not self._tmpl_env:
loader = get_loader(self.templates_dir, self.openstack_release)
self._tmpl_env = Environment(loader=loader)
def _get_template(self, template):
self._get_tmpl_env()
template = self._tmpl_env.get_template(template)
log('Loaded template from %s' % template.filename, level=INFO)
return template
def render(self, config_file):
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
ctxt = self.templates[config_file].context()
_tmpl = os.path.basename(config_file)
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound:
# if no template is found with basename, try looking for it
# using a munged full path, eg:
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
_tmpl = '_'.join(config_file.split('/')[1:])
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound as e:
log('Could not load template from %s by %s or %s.' %
(self.templates_dir, os.path.basename(config_file), _tmpl),
level=ERROR)
raise e
log('Rendering from template: %s' % _tmpl, level=INFO)
return template.render(ctxt)
def write(self, config_file):
"""
Write a single config file, raises if config file is not registered.
"""
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
_out = self.render(config_file)
with open(config_file, 'wb') as out:
out.write(_out)
log('Wrote template %s.' % config_file, level=INFO)
def write_all(self):
"""
Write out all registered config files.
"""
[self.write(k) for k in self.templates.iterkeys()]
def set_release(self, openstack_release):
"""
Resets the template environment and generates a new template loader
based on a the new openstack release.
"""
self._tmpl_env = None
self.openstack_release = openstack_release
self._get_tmpl_env()
def complete_contexts(self):
'''
Returns a list of context interfaces that yield a complete context.
'''
interfaces = []
[interfaces.extend(i.complete_contexts())
for i in self.templates.itervalues()]
return interfaces

View File

@ -0,0 +1,447 @@
#!/usr/bin/python
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
import apt_pkg as apt
import subprocess
import os
import socket
import sys
from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
ERROR,
INFO
)
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse')
])
OPENSTACK_CODENAMES = OrderedDict([
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
])
# The ugly duckling
SWIFT_CODENAMES = OrderedDict([
('1.4.3', 'diablo'),
('1.4.8', 'essex'),
('1.7.4', 'folsom'),
('1.8.0', 'grizzly'),
('1.7.7', 'grizzly'),
('1.7.6', 'grizzly'),
('1.10.0', 'havana'),
('1.9.1', 'havana'),
('1.9.0', 'havana'),
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src in ['distro', 'distro-proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
e = 'Could not derive openstack release for '\
'this Ubuntu release: %s' % ubuntu_rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in OPENSTACK_CODENAMES.iteritems():
if v in src:
return v
def get_os_version_install_source(src):
codename = get_os_codename_install_source(src)
return get_os_version_codename(codename)
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in OPENSTACK_CODENAMES.iteritems():
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
apt.init()
cache = apt.Cache()
try:
pkg = cache[package]
except:
if not fatal:
return None
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation '\
'candidate: %s' % package
error_out(e)
if not pkg.current_ver:
if not fatal:
return None
# package is known, but no version is currently installed.
e = 'Could not determine version of uninstalled package: %s' % package
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
swift_vers = vers[:5]
if swift_vers not in SWIFT_CODENAMES:
# Deal with 1.10.0 upward
swift_vers = vers[:6]
return SWIFT_CODENAMES[swift_vers]
else:
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in vers_map.iteritems():
if cname == codename:
return version
#e = "Could not determine OpenStack version for package: %s" % pkg
#error_out(e)
os_rel = None
def os_release(package, base='essex'):
'''
Returns OpenStack release codename from a cached global.
If the codename can not be determined from either an installed package or
the installation source, the earliest release supported by the charm should
be returned.
'''
global os_rel
if os_rel:
return os_rel
os_rel = (get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
return os_rel
def import_key(keyid):
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
def configure_installation_source(rel):
'''Configure apt installation source.'''
if rel == 'distro':
return
elif rel == 'distro-proposed':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
import_key(key)
elif l == 1:
src = rel
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
apt_install('ubuntu-cloud-keyring', fatal=True)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]
def openstack_upgrade_available(package):
"""
Determines if an OpenStack upgrade is available from installation
source, based on version of installed package.
:param package: str: Name of installed package.
:returns: bool: : Returns True if configured installation source offers
a newer version of package.
"""
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(available_vers, cur_vers) == 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev,
level=ERROR)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
def is_ip(address):
"""
Returns True if address is a valid IP address.
"""
try:
# Test to see if already an IPv4 address
socket.inet_aton(address)
return True
except socket.error:
return False
def ns_query(address):
try:
import dns.resolver
except ImportError:
apt_install('python-dnspython')
import dns.resolver
if isinstance(address, dns.name.Name):
rtype = 'PTR'
elif isinstance(address, basestring):
rtype = 'A'
answers = dns.resolver.query(address, rtype)
if answers:
return str(answers[0])
return None
def get_host_ip(hostname):
"""
Resolves the IP for a given hostname, or returns
the input if it is already an IP.
"""
if is_ip(hostname):
return hostname
return ns_query(hostname)
def get_hostname(address, fqdn=True):
"""
Resolves hostname for given IP, or returns the input
if it is already a hostname.
"""
if is_ip(address):
try:
import dns.reversename
except ImportError:
apt_install('python-dnspython')
import dns.reversename
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
else:
result = address
if fqdn:
# strip trailing .
if result.endswith('.'):
return result[:-1]
else:
return result
else:
return result.split('.')[0]

View File

@ -0,0 +1,387 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import os
import shutil
import json
import time
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
INFO,
WARNING,
ERROR
)
from charmhelpers.core.host import (
mount,
mounts,
service_start,
service_stop,
service_running,
umount,
)
from charmhelpers.fetch import (
apt_install,
)
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
CEPH_CONF = """[global]
auth supported = {auth}
keyring = {keyring}
mon host = {mon_hosts}
log to syslog = {use_syslog}
err to syslog = {use_syslog}
clog to syslog = {use_syslog}
"""
def install():
''' Basic Ceph client installation '''
ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
''' Check to see if a RADOS block device exists '''
try:
out = check_output(['rbd', 'list', '--id', service,
'--pool', pool])
except CalledProcessError:
return False
else:
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
''' Create a new RADOS block device '''
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd)
def pool_exists(service, name):
''' Check to see if a RADOS pool already exists '''
try:
out = check_output(['rados', '--id', service, 'lspools'])
except CalledProcessError:
return False
else:
return name in out
def get_osds(service):
'''
Return a list of all Ceph Object Storage Daemons
currently in the cluster
'''
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json']))
else:
return None
def create_pool(service, name, replicas=2):
''' Create a new RADOS pool '''
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 / replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'create',
name, str(pgnum)
]
check_call(cmd)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'set', name,
'size', str(replicas)
]
check_call(cmd)
def delete_pool(service, name):
''' Delete a RADOS pool from ceph '''
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'delete',
name, '--yes-i-really-really-mean-it'
]
check_call(cmd)
def _keyfile_path(service):
return KEYFILE.format(service)
def _keyring_path(service):
return KEYRING.format(service)
def create_keyring(service, key):
''' Create a new Ceph keyring containing key'''
keyring = _keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
return
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.{}'.format(service),
'--add-key={}'.format(key)
]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
''' Create a file containing key '''
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
''' Query named relation 'ceph' to detemine current nodes '''
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth, use_syslog):
''' Perform basic configuration of Ceph '''
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF.format(auth=auth,
keyring=_keyring_path(service),
mon_hosts=",".join(map(str, hosts)),
use_syslog=use_syslog))
modprobe('rbd')
def image_mapped(name):
''' Determine whether a RADOS block device is mapped locally '''
try:
out = check_output(['rbd', 'showmapped'])
except CalledProcessError:
return False
else:
return name in out
def map_block_storage(service, pool, image):
''' Map a RADOS block device for local use '''
cmd = [
'rbd',
'map',
'{}/{}'.format(pool, image),
'--user',
service,
'--secret',
_keyfile_path(service),
]
check_call(cmd)
def filesystem_mounted(fs):
''' Determine whether a filesytems is already mounted '''
return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
''' Make a new filesystem on the specified block device '''
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst):
''' Migrate data in data_src_dst to blk_device and then remount '''
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
copy_files(data_src_dst, '/mnt')
# umount block device
umount('/mnt')
# Grab user/group ID's from original source
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
# TODO: persist is currently a NO-OP in core.host
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
os.chown(data_src_dst, uid, gid)
# TODO: re-use
def modprobe(module):
''' Load a kernel module and configure for auto-load on reboot '''
log('ceph: Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def copy_files(src, dst, symlinks=False, ignore=None):
''' Copy files from src to dst '''
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being re-mounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool))
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img))
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if service_running(svc):
log('ceph: Stopping services {} prior to migrating data.'
.format(svc))
service_stop(svc)
place_data_on_block_device(blk_device, mount_point)
for svc in system_services:
log('ceph: Starting service {} after migrating data.'
.format(svc))
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
'''
Ensures a ceph keyring is created for a named service
and optionally ensures user and group ownership.
Returns False if no ceph key is available in relation state.
'''
key = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False
create_keyring(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
return True
def ceph_version():
''' Retrieve the local version of ceph '''
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None

View File

@ -0,0 +1,62 @@
import os
import re
from subprocess import (
check_call,
check_output,
)
##################################################
# loopback device helpers.
##################################################
def loopback_devices():
'''
Parse through 'losetup -a' output to determine currently mapped
loopback devices. Output is expected to look like:
/dev/loop0: [0807]:961814 (/tmp/my.img)
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
return loopbacks
def create_loopback(file_path):
'''
Create a loopback device for a given backing file.
:returns: str: Full path to new loopback device (eg, /dev/loop0)
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems():
if f == file_path:
return d
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in loopback_devices().iteritems():
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path)

View File

@ -0,0 +1,88 @@
from subprocess import (
CalledProcessError,
check_call,
check_output,
Popen,
PIPE,
)
##################################################
# LVM helpers.
##################################################
def deactivate_lvm_volume_group(block_device):
'''
Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume
'''
vg = list_lvm_volume_group(block_device)
if vg:
cmd = ['vgchange', '-an', vg]
check_call(cmd)
def is_lvm_physical_volume(block_device):
'''
Determine whether a block device is initialized as an LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: boolean: True if block device is a PV, False if not.
'''
try:
check_output(['pvdisplay', block_device])
return True
except CalledProcessError:
return False
def remove_lvm_physical_volume(block_device):
'''
Remove LVM PV signatures from a given block device.
:param block_device: str: Full path of block device to scrub.
'''
p = Popen(['pvremove', '-ff', block_device],
stdin=PIPE)
p.communicate(input='y\n')
def list_lvm_volume_group(block_device):
'''
List LVM volume group associated with a given block device.
Assumes block device is a valid LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: str: Name of volume group associated with block device or None
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
if l.strip().startswith('VG Name'):
vg = ' '.join(l.split()).split(' ').pop()
return vg
def create_lvm_physical_volume(block_device):
'''
Initialize a block device as an LVM physical volume.
:param block_device: str: Full path of block device to initialize.
'''
check_call(['pvcreate', block_device])
def create_lvm_volume_group(volume_group, block_device):
'''
Create an LVM volume group backed by a given block device.
Assumes block device has already been initialized as an LVM PV.
:param volume_group: str: Name of volume group to create.
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])

View File

@ -0,0 +1,26 @@
from os import stat
from stat import S_ISBLK
from subprocess import (
check_call
)
def is_block_device(path):
'''
Confirm device at path is a valid block device node.
:returns: boolean: True if path is a block device, False if not.
'''
return S_ISBLK(stat(path).st_mode)
def zap_disk(block_device):
'''
Clear a block device of partition table. Relies on sgdisk, which is
installed as pat of the 'gdisk' package in Ubuntu.
:param block_device: str: Full path of block device to clean.
'''
check_call(['sgdisk', '--zap-all', '--clear',
'--mbrtogpt', block_device])

View File

@ -0,0 +1,257 @@
# Easy file synchronization among peer units using ssh + unison.
#
# From *both* peer relation -joined and -changed, add a call to
# ssh_authorized_peers() describing the peer relation and the desired
# user + group. After all peer relations have settled, all hosts should
# be able to connect to on another via key auth'd ssh as the specified user.
#
# Other hooks are then free to synchronize files and directories using
# sync_to_peers().
#
# For a peer relation named 'cluster', for example:
#
# cluster-relation-joined:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# cluster-relation-changed:
# ...
# ssh_authorized_peers(peer_interface='cluster',
# user='juju_ssh', group='juju_ssh',
# ensure_user=True)
# ...
#
# Hooks are now free to sync files as easily as:
#
# files = ['/etc/fstab', '/etc/apt.conf.d/']
# sync_to_peers(peer_interface='cluster',
# user='juju_ssh, paths=[files])
#
# It is assumed the charm itself has setup permissions on each unit
# such that 'juju_ssh' has read + write permissions. Also assumed
# that the calling charm takes care of leader delegation.
#
# Additionally files can be synchronized only to an specific unit:
# sync_to_peer(slave_address, user='juju_ssh',
# paths=[files], verbose=False)
import os
import pwd
from copy import copy
from subprocess import check_call, check_output
from charmhelpers.core.host import (
adduser,
add_user_to_group,
)
from charmhelpers.core.hookenv import (
log,
hook_name,
relation_ids,
related_units,
relation_set,
relation_get,
unit_private_ip,
ERROR,
)
BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
'-fastcheck=true', '-group=false', '-owner=false',
'-prefer=newer', '-times=true']
def get_homedir(user):
try:
user = pwd.getpwnam(user)
return user.pw_dir
except KeyError:
log('Could not get homedir for user %s: user exists?', ERROR)
raise Exception
def create_private_key(user, priv_key_path):
if not os.path.isfile(priv_key_path):
log('Generating new SSH key for user %s.' % user)
cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
'-f', priv_key_path]
check_call(cmd)
else:
log('SSH key already exists at %s.' % priv_key_path)
check_call(['chown', user, priv_key_path])
check_call(['chmod', '0600', priv_key_path])
def create_public_key(user, priv_key_path, pub_key_path):
if not os.path.isfile(pub_key_path):
log('Generating missing ssh public key @ %s.' % pub_key_path)
cmd = ['ssh-keygen', '-y', '-f', priv_key_path]
p = check_output(cmd).strip()
with open(pub_key_path, 'wb') as out:
out.write(p)
check_call(['chown', user, pub_key_path])
def get_keypair(user):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
priv_key = os.path.join(ssh_dir, 'id_rsa')
pub_key = '%s.pub' % priv_key
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir)
check_call(['chown', '-R', user, ssh_dir])
create_private_key(user, priv_key)
create_public_key(user, priv_key, pub_key)
with open(priv_key, 'r') as p:
_priv = p.read().strip()
with open(pub_key, 'r') as p:
_pub = p.read().strip()
return (_priv, _pub)
def write_authorized_keys(user, keys):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
log('Syncing authorized_keys @ %s.' % auth_keys)
with open(auth_keys, 'wb') as out:
for k in keys:
out.write('%s\n' % k)
def write_known_hosts(user, hosts):
home_dir = get_homedir(user)
ssh_dir = os.path.join(home_dir, '.ssh')
known_hosts = os.path.join(ssh_dir, 'known_hosts')
khosts = []
for host in hosts:
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
remote_key = check_output(cmd).strip()
khosts.append(remote_key)
log('Syncing known_hosts @ %s.' % known_hosts)
with open(known_hosts, 'wb') as out:
for host in khosts:
out.write('%s\n' % host)
def ensure_user(user, group=None):
adduser(user)
if group:
add_user_to_group(user, group)
def ssh_authorized_peers(peer_interface, user, group=None,
ensure_local_user=False):
"""
Main setup function, should be called from both peer -changed and -joined
hooks with the same parameters.
"""
if ensure_local_user:
ensure_user(user, group)
priv_key, pub_key = get_keypair(user)
hook = hook_name()
if hook == '%s-relation-joined' % peer_interface:
relation_set(ssh_pub_key=pub_key)
elif hook == '%s-relation-changed' % peer_interface:
hosts = []
keys = []
for r_id in relation_ids(peer_interface):
for unit in related_units(r_id):
ssh_pub_key = relation_get('ssh_pub_key',
rid=r_id,
unit=unit)
priv_addr = relation_get('private-address',
rid=r_id,
unit=unit)
if ssh_pub_key:
keys.append(ssh_pub_key)
hosts.append(priv_addr)
else:
log('ssh_authorized_peers(): ssh_pub_key '
'missing for unit %s, skipping.' % unit)
write_authorized_keys(user, keys)
write_known_hosts(user, hosts)
authed_hosts = ':'.join(hosts)
relation_set(ssh_authorized_hosts=authed_hosts)
def _run_as_user(user):
try:
user = pwd.getpwnam(user)
except KeyError:
log('Invalid user: %s' % user)
raise Exception
uid, gid = user.pw_uid, user.pw_gid
os.environ['HOME'] = user.pw_dir
def _inner():
os.setgid(gid)
os.setuid(uid)
return _inner
def run_as_user(user, cmd):
return check_output(cmd, preexec_fn=_run_as_user(user), cwd='/')
def collect_authed_hosts(peer_interface):
'''Iterate through the units on peer interface to find all that
have the calling host in its authorized hosts list'''
hosts = []
for r_id in (relation_ids(peer_interface) or []):
for unit in related_units(r_id):
private_addr = relation_get('private-address',
rid=r_id, unit=unit)
authed_hosts = relation_get('ssh_authorized_hosts',
rid=r_id, unit=unit)
if not authed_hosts:
log('Peer %s has not authorized *any* hosts yet, skipping.')
continue
if unit_private_ip() in authed_hosts.split(':'):
hosts.append(private_addr)
else:
log('Peer %s has not authorized *this* host yet, skipping.')
return hosts
def sync_path_to_host(path, host, user, verbose=False):
cmd = copy(BASE_CMD)
if not verbose:
cmd.append('-silent')
# removing trailing slash from directory paths, unison
# doesn't like these.
if path.endswith('/'):
path = path[:(len(path) - 1)]
cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
try:
log('Syncing local path %s to %s@%s:%s' % (path, user, host, path))
run_as_user(user, cmd)
except:
log('Error syncing remote files')
def sync_to_peer(host, user, paths=[], verbose=False):
'''Sync paths to an specific host'''
[sync_path_to_host(p, host, user, verbose) for p in paths]
def sync_to_peers(peer_interface, user, paths=[], verbose=False):
'''Sync all hosts to an specific path'''
for host in collect_authed_hosts(peer_interface):
sync_to_peer(host, user, paths, verbose)

View File

@ -8,6 +8,7 @@ import os
import json import json
import yaml import yaml
import subprocess import subprocess
import sys
import UserDict import UserDict
from subprocess import CalledProcessError from subprocess import CalledProcessError
@ -149,6 +150,11 @@ def service_name():
return local_unit().split('/')[0] return local_unit().split('/')[0]
def hook_name():
"""The name of the currently executing hook"""
return os.path.basename(sys.argv[0])
@cached @cached
def config(scope=None): def config(scope=None):
"""Juju charm configuration""" """Juju charm configuration"""

View File

@ -194,7 +194,7 @@ def file_hash(path):
return None return None
def restart_on_change(restart_map): def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing """Restart services based on configuration files changing
This function is used a decorator, for example This function is used a decorator, for example
@ -219,8 +219,14 @@ def restart_on_change(restart_map):
for path in restart_map: for path in restart_map:
if checksums[path] != file_hash(path): if checksums[path] != file_hash(path):
restarts += restart_map[path] restarts += restart_map[path]
for service_name in list(OrderedDict.fromkeys(restarts)): services_list = list(OrderedDict.fromkeys(restarts))
service('restart', service_name) if not stopstart:
for service_name in services_list:
service('restart', service_name)
else:
for action in ['stop', 'start']:
for service_name in services_list:
service(action, service_name)
return wrapped_f return wrapped_f
return wrap return wrap

View File

@ -0,0 +1,308 @@
import importlib
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
)
from urlparse import (
urlparse,
urlunparse,
)
import subprocess
from charmhelpers.core.hookenv import (
config,
log,
)
import apt_pkg
import os
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
"""
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precise-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
# Icehouse
'icehouse': 'precise-updates/icehouse',
'precise-icehouse': 'precise-updates/icehouse',
'precise-icehouse/updates': 'precise-updates/icehouse',
'precise-updates/icehouse': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
}
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
apt_pkg.init()
cache = apt_pkg.Cache()
_pkgs = []
for package in packages:
try:
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
log('Package {} has no installation candidate.'.format(package),
level='WARNING')
_pkgs.append(package)
return _pkgs
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd, env=env)
def apt_upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
if dist:
cmd.append('dist-upgrade')
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd, env=env)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Purging {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None):
if source is None:
log('Source is not present. Skipping')
return
if (source.startswith('ppa:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
if key:
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'keyserver.ubuntu.com', '--recv',
key])
class SourceConfigError(Exception):
pass
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""
Configure multiple sources from charm configuration
Example config:
install_sources:
- "ppa:foo"
- "http://example.com/repo precise main"
install_keys:
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load(config(sources_var))
keys = config(keys_var)
if keys is not None:
keys = safe_load(keys)
if isinstance(sources, basestring) and (
keys is None or isinstance(keys, basestring)):
add_source(sources, keys)
else:
if not len(sources) == len(keys):
msg = 'Install sources and keys lists are different lengths'
raise SourceConfigError(msg)
for src_num in range(len(sources)):
add_source(sources[src_num], keys[src_num])
if update:
apt_update(fatal=True)
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
)
class UnhandledSource(Exception):
pass
def install_remote(source):
"""
Install a file tree from a remote source
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules
Options supported are submodule-specific"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source)
except UnhandledSource:
pass
if not installed_to:
raise UnhandledSource("No handler found for source {}".format(source))
return installed_to
def install_from_config(config_var_name):
charm_config = config()
source = charm_config[config_var_name]
return install_remote(source)
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
plugin_list = []
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list

View File

@ -0,0 +1,63 @@
import os
import urllib2
import urlparse
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.host import mkdir
class ArchiveUrlFetchHandler(BaseFetchHandler):
"""Handler for archives via generic URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
return "Wrong source type"
if get_archive_handler(self.base_url(source)):
return True
return False
def download(self, source, dest):
# propogate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
if proto in ('http', 'https'):
auth, barehost = urllib2.splituser(netloc)
if auth is not None:
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
username, password = urllib2.splitpasswd(auth)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Realm is set to None in add_password to force the username and password
# to be used whatever the realm
passman.add_password(None, source, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
response = urllib2.urlopen(source)
try:
with open(dest, 'w') as dest_file:
dest_file.write(response.read())
except Exception as e:
if os.path.isfile(dest):
os.unlink(dest)
raise e
def install(self, source):
url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
try:
self.download(source, dld_file)
except urllib2.URLError as e:
raise UnhandledSource(e.reason)
except OSError as e:
raise UnhandledSource(e.strerror)
return extract(dld_file)

View File

@ -0,0 +1,49 @@
import os
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.core.host import mkdir
try:
from bzrlib.branch import Branch
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-bzrlib")
from bzrlib.branch import Branch
class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('bzr+ssh', 'lp'):
return False
else:
return True
def branch(self, source, dest):
url_parts = self.parse_url(source)
# If we use lp:branchname scheme we need to load plugins
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if url_parts.scheme == "lp":
from bzrlib.plugin import load_plugins
load_plugins()
try:
remote_branch = Branch.open(source)
remote_branch.bzrdir.sprout(dest).open_branch()
except Exception as e:
raise e
def install(self, source):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try:
self.branch(source, dest_dir)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

106
hooks/keystone_context.py Normal file
View File

@ -0,0 +1,106 @@
from charmhelpers.core.hookenv import (
config, unit_private_ip)
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
is_clustered,
)
from subprocess import (
check_call
)
import os
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
class ApacheSSLContext(context.ApacheSSLContext):
interfaces = ['https']
external_ports = []
service_namespace = 'keystone'
def __call__(self):
# late import to work around circular dependency
from keystone_utils import determine_ports
self.external_ports = determine_ports()
return super(ApacheSSLContext, self).__call__()
def configure_cert(self):
#import keystone_ssl as ssl
from keystone_utils import SSH_USER, get_ca
if not os.path.isdir('/etc/apache2/ssl'):
os.mkdir('/etc/apache2/ssl')
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
if not os.path.isdir(ssl_dir):
os.mkdir(ssl_dir)
if is_clustered():
https_cn = config('vip')
else:
https_cn = unit_private_ip()
ca = get_ca(user=SSH_USER)
cert, key = ca.get_cert_and_key(common_name=https_cn)
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
cert_out.write(cert)
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
key_out.write(key)
if ca:
with open(CA_CERT_PATH, 'w') as ca_out:
ca_out.write(ca.get_ca_bundle())
check_call(['update-ca-certificates'])
class HAProxyContext(context.HAProxyContext):
interfaces = []
def __call__(self):
'''
Extends the main charmhelpers HAProxyContext with a port mapping
specific to this charm.
Also used to extend nova.conf context with correct api_listening_ports
'''
from keystone_utils import api_port
ctxt = super(HAProxyContext, self).__call__()
# determine which port api processes should bind to, depending
# on existence of haproxy + apache frontends
listen_ports = {}
listen_ports['admin_port'] = api_port('keystone-admin')
listen_ports['public_port'] = api_port('keystone-public')
# Apache ports
a_admin_port = determine_apache_port(api_port('keystone-admin'))
a_public_port = determine_apache_port(api_port('keystone-public'))
port_mapping = {
'admin-port': [
api_port('keystone-admin'), a_admin_port],
'public-port': [
api_port('keystone-public'), a_public_port],
}
# for haproxy.conf
ctxt['service_ports'] = port_mapping
# for keystone.conf
ctxt['listen_ports'] = listen_ports
return ctxt
class KeystoneContext(context.OSContextGenerator):
interfaces = []
def __call__(self):
from keystone_utils import api_port, set_admin_token
ctxt = {}
ctxt['token'] = set_admin_token()
ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'))
ctxt['public_port'] = determine_api_port(api_port('keystone-public'))
ctxt['debug'] = config('debug') in ['yes', 'true', 'True']
ctxt['verbose'] = config('verbose') in ['yes', 'true', 'True']
if config('enable-pki') not in ['false', 'False', 'no', 'No']:
ctxt['signing'] = True
return ctxt

688
hooks/keystone_hooks.py Executable file → Normal file
View File

@ -1,577 +1,235 @@
#!/usr/bin/python #!/usr/bin/python
import os
import sys
import time import time
import urlparse
from base64 import b64encode from subprocess import check_call
from charmhelpers.contrib import unison
from charmhelpers.core.hookenv import (
Hooks,
UnregisteredHookError,
config,
log,
relation_get,
relation_ids,
relation_set,
unit_get,
)
from charmhelpers.core.host import (
mkdir,
restart_on_change,
)
from charmhelpers.fetch import (
apt_install, apt_update,
filter_installed_packages
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available,
)
from keystone_utils import ( from keystone_utils import (
config_dirty, add_service_to_keystone,
config_get, determine_packages,
execute,
update_config_block,
set_admin_token,
ensure_initial_admin,
create_service_entry,
create_endpoint_template,
create_role,
get_admin_token,
get_service_password,
create_user,
grant_role,
get_ca,
synchronize_service_credentials,
do_openstack_upgrade, do_openstack_upgrade,
configure_pki_tokens, ensure_initial_admin,
SSH_USER, migrate_database,
SSL_DIR, save_script_rc,
synchronize_service_credentials,
register_configs,
relation_list,
restart_map,
CLUSTER_RES, CLUSTER_RES,
https KEYSTONE_CONF,
) SSH_USER,
)
from lib.openstack_common import ( from charmhelpers.contrib.hahelpers.cluster import (
get_os_codename_install_source, eligible_leader,
get_os_codename_package, get_hacluster_config,
get_os_version_codename, is_leader,
get_os_version_package, )
save_script_rc
)
import lib.unison as unison
import lib.utils as utils
import lib.cluster_utils as cluster
import lib.haproxy_utils as haproxy
from charmhelpers.payload.execd import execd_preinstall from charmhelpers.payload.execd import execd_preinstall
config = config_get() hooks = Hooks()
CONFIGS = register_configs()
packages = [
"keystone", "python-mysqldb", "pwgen",
"haproxy", "python-jinja2", "openssl", "unison",
"python-sqlalchemy"
]
service = "keystone"
# used to verify joined services are valid openstack components.
# this should reflect the current "core" components of openstack
# and be expanded as we add support for them as a distro
valid_services = {
"nova": {
"type": "compute",
"desc": "Nova Compute Service"
},
"novav3": {
"type": "computev3",
"desc": "Nova Compute Service (v3 API)"
},
"nova-volume": {
"type": "volume",
"desc": "Nova Volume Service"
},
"cinder": {
"type": "volume",
"desc": "Cinder Volume Service"
},
"ec2": {
"type": "ec2",
"desc": "EC2 Compatibility Layer"
},
"glance": {
"type": "image",
"desc": "Glance Image Service"
},
"s3": {
"type": "s3",
"desc": "S3 Compatible object-store"
},
"swift": {
"type": "object-store",
"desc": "Swift Object Storage Service"
},
"quantum": {
"type": "network",
"desc": "Quantum Networking Service"
},
"neutron": {
"type": "network",
"desc": "Neutron Networking Service"
},
"oxygen": {
"type": "oxygen",
"desc": "Oxygen Cloud Image Service"
},
"ceilometer": {
"type": "metering",
"desc": "Ceilometer Metering Service"
},
"heat": {
"type": "orchestration",
"desc": "Heat Orchestration API"
},
"heat-cfn": {
"type": "cloudformation",
"desc": "Heat CloudFormation API"
}
}
def install_hook(): @hooks.hook()
def install():
execd_preinstall() execd_preinstall()
utils.configure_source() configure_installation_source(config('openstack-origin'))
utils.install(*packages) apt_update()
update_config_block('DEFAULT', apt_install(determine_packages(), fatal=True)
public_port=cluster.determine_api_port(config["service-port"]))
update_config_block('DEFAULT',
admin_port=cluster.determine_api_port(config["admin-port"]))
update_config_block('DEFAULT', use_syslog=config["use-syslog"])
set_admin_token(config['admin-token'])
# set all backends to use sql+sqlite, if they are not already by default
update_config_block('sql',
connection='sqlite:////var/lib/keystone/keystone.db')
update_config_block('identity',
driver='keystone.identity.backends.sql.Identity')
update_config_block('catalog',
driver='keystone.catalog.backends.sql.Catalog')
update_config_block('token',
driver='keystone.token.backends.sql.Token')
update_config_block('ec2',
driver='keystone.contrib.ec2.backends.sql.Ec2')
utils.stop('keystone') @hooks.hook('config-changed')
execute("keystone-manage db_sync") @restart_on_change(restart_map())
utils.start('keystone') def config_changed():
# ensure user + permissions for peer relations that
# may be syncing data there via SSH_USER.
unison.ensure_user(user=SSH_USER, group='keystone') unison.ensure_user(user=SSH_USER, group='keystone')
execute("chmod -R g+wrx /var/lib/keystone/") homedir = unison.get_homedir(SSH_USER)
if not os.path.isdir(homedir):
mkdir(homedir, SSH_USER, 'keystone', 0o775)
time.sleep(5) if openstack_upgrade_available('keystone'):
ensure_initial_admin(config) do_openstack_upgrade(configs=CONFIGS)
check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])
def db_joined(): save_script_rc()
relation_data = { configure_https()
"database": config["database"], CONFIGS.write_all()
"username": config["database-user"], if eligible_leader(CLUSTER_RES):
"hostname": config["hostname"] migrate_database()
}
utils.relation_set(**relation_data)
def db_changed():
relation_data = utils.relation_get_dict()
if ('password' not in relation_data or
'db_host' not in relation_data):
utils.juju_log('INFO',
"db_host or password not set. Peer not ready, exit 0")
return
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
(config["database-user"],
relation_data["password"],
relation_data["db_host"],
config["database"]))
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader, performing db-sync')
execute("keystone-manage db_sync", echo=True)
if config_dirty():
utils.restart('keystone')
time.sleep(5)
if cluster.eligible_leader(CLUSTER_RES):
ensure_initial_admin(config) ensure_initial_admin(config)
# If the backend database has been switched to something new and there log('Firing identity_changed hook for all related services.')
# are existing identity-service relations,, service entries need to be # HTTPS may have been set - so fire all identity relations
# recreated in the new database. Re-executing identity-service-changed # again
# will do this. for r_id in relation_ids('identity-service'):
for rid in utils.relation_ids('identity-service'): for unit in relation_list(r_id):
for unit in utils.relation_list(rid=rid): identity_changed(relation_id=r_id,
utils.juju_log('INFO', remote_unit=unit)
"Re-exec'ing identity-service-changed"
" for: %s - %s" % (rid, unit))
identity_changed(relation_id=rid, remote_unit=unit)
def ensure_valid_service(service): @hooks.hook('shared-db-relation-joined')
if service not in valid_services.keys(): def db_joined():
utils.juju_log('WARNING', relation_set(database=config('database'),
"Invalid service requested: '%s'" % service) username=config('database-user'),
utils.relation_set(admin_token=-1) hostname=unit_get('private-address'))
return
def add_endpoint(region, service, publicurl, adminurl, internalurl): @hooks.hook('shared-db-relation-changed')
desc = valid_services[service]["desc"] @restart_on_change(restart_map())
service_type = valid_services[service]["type"] def db_changed():
create_service_entry(service, service_type, desc) if 'shared-db' not in CONFIGS.complete_contexts():
create_endpoint_template(region=region, service=service, log('shared-db relation incomplete. Peer not ready?')
publicurl=publicurl, else:
adminurl=adminurl, CONFIGS.write(KEYSTONE_CONF)
internalurl=internalurl) if eligible_leader(CLUSTER_RES):
migrate_database()
ensure_initial_admin(config)
@hooks.hook('identity-service-relation-joined')
def identity_joined(): def identity_joined():
""" Do nothing until we get information about requested service """ """ Do nothing until we get information about requested service """
pass pass
def get_requested_roles(settings): @hooks.hook('identity-service-relation-changed')
''' Retrieve any valid requested_roles from dict settings '''
if ('requested_roles' in settings and
settings['requested_roles'] not in ['None', None]):
return settings['requested_roles'].split(',')
else:
return []
def identity_changed(relation_id=None, remote_unit=None): def identity_changed(relation_id=None, remote_unit=None):
""" A service has advertised its API endpoints, create an entry in the if eligible_leader(CLUSTER_RES):
service catalog. add_service_to_keystone(relation_id, remote_unit)
Optionally allow this hook to be re-fired for an existing synchronize_service_credentials()
relation+unit, for context see see db_changed().
"""
if not cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Deferring identity_changed() to service leader.')
return
settings = utils.relation_get_dict(relation_id=relation_id,
remote_unit=remote_unit)
# the minimum settings needed per endpoint
single = set(['service', 'region', 'public_url', 'admin_url',
'internal_url'])
if single.issubset(settings):
# other end of relation advertised only one endpoint
if 'None' in [v for k, v in settings.iteritems()]:
# Some backend services advertise no endpoint but require a
# hook execution to update auth strategy.
relation_data = {}
# Check if clustered and use vip + haproxy ports if so
if cluster.is_clustered():
relation_data["auth_host"] = config['vip']
relation_data["service_host"] = config['vip']
else:
relation_data["auth_host"] = config['hostname']
relation_data["service_host"] = config['hostname']
relation_data["auth_port"] = config['admin-port']
relation_data["service_port"] = config['service-port']
if config['https-service-endpoints'] in ['True', 'true']:
# Pass CA cert as client will need it to
# verify https connections
ca = get_ca(user=SSH_USER)
ca_bundle = ca.get_ca_bundle()
relation_data['https_keystone'] = 'True'
relation_data['ca_cert'] = b64encode(ca_bundle)
if relation_id:
relation_data['rid'] = relation_id
# Allow the remote service to request creation of any additional
# roles. Currently used by Horizon
for role in get_requested_roles(settings):
utils.juju_log('INFO',
"Creating requested role: %s" % role)
create_role(role)
utils.relation_set(**relation_data)
return
else:
ensure_valid_service(settings['service'])
add_endpoint(region=settings['region'],
service=settings['service'],
publicurl=settings['public_url'],
adminurl=settings['admin_url'],
internalurl=settings['internal_url'])
service_username = settings['service']
https_cn = urlparse.urlparse(settings['internal_url'])
https_cn = https_cn.hostname
else: else:
# assemble multiple endpoints from relation data. service name log('Deferring identity_changed() to service leader.')
# should be prepended to setting name, ie:
# realtion-set ec2_service=$foo ec2_region=$foo ec2_public_url=$foo
# relation-set nova_service=$foo nova_region=$foo nova_public_url=$foo
# Results in a dict that looks like:
# { 'ec2': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# 'nova': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# }
endpoints = {}
for k, v in settings.iteritems():
ep = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if ep not in endpoints:
endpoints[ep] = {}
endpoints[ep][x] = v
services = []
https_cn = None
for ep in endpoints:
# weed out any unrelated relation stuff Juju might have added
# by ensuring each possible endpiont has appropriate fields
# ['service', 'region', 'public_url', 'admin_url', 'internal_url']
if single.issubset(endpoints[ep]):
ep = endpoints[ep]
ensure_valid_service(ep['service'])
add_endpoint(region=ep['region'], service=ep['service'],
publicurl=ep['public_url'],
adminurl=ep['admin_url'],
internalurl=ep['internal_url'])
services.append(ep['service'])
if not https_cn:
https_cn = urlparse.urlparse(ep['internal_url'])
https_cn = https_cn.hostname
service_username = '_'.join(services)
if 'None' in [v for k, v in settings.iteritems()]:
return
if not service_username:
return
token = get_admin_token()
utils.juju_log('INFO',
"Creating service credentials for '%s'" % service_username)
service_password = get_service_password(service_username)
create_user(service_username, service_password, config['service-tenant'])
grant_role(service_username, config['admin-role'],
config['service-tenant'])
# Allow the remote service to request creation of any additional roles.
# Currently used by Swift and Ceilometer.
for role in get_requested_roles(settings):
utils.juju_log('INFO',
"Creating requested role: %s" % role)
create_role(role, service_username,
config['service-tenant'])
# As of https://review.openstack.org/#change,4675, all nodes hosting
# an endpoint(s) needs a service username and password assigned to
# the service tenant and granted admin role.
# note: config['service-tenant'] is created in utils.ensure_initial_admin()
# we return a token, information about our API endpoints, and the generated
# service credentials
relation_data = {
"admin_token": token,
"service_host": config["hostname"],
"service_port": config["service-port"],
"auth_host": config["hostname"],
"auth_port": config["admin-port"],
"service_username": service_username,
"service_password": service_password,
"service_tenant": config['service-tenant'],
"https_keystone": "False",
"ssl_cert": "",
"ssl_key": "",
"ca_cert": ""
}
if relation_id:
relation_data['rid'] = relation_id
# Check if clustered and use vip + haproxy ports if so
if cluster.is_clustered():
relation_data["auth_host"] = config['vip']
relation_data["service_host"] = config['vip']
# generate or get a new cert/key for service if set to manage certs.
if config['https-service-endpoints'] in ['True', 'true']:
ca = get_ca(user=SSH_USER)
cert, key = ca.get_cert_and_key(common_name=https_cn)
ca_bundle = ca.get_ca_bundle()
relation_data['ssl_cert'] = b64encode(cert)
relation_data['ssl_key'] = b64encode(key)
relation_data['ca_cert'] = b64encode(ca_bundle)
relation_data['https_keystone'] = 'True'
unison.sync_to_peers(peer_interface='cluster',
paths=[SSL_DIR], user=SSH_USER, verbose=True)
utils.relation_set(**relation_data)
synchronize_service_credentials()
def config_changed():
unison.ensure_user(user=SSH_USER, group='keystone')
execute("chmod -R g+wrx /var/lib/keystone/")
# Determine whether or not we should do an upgrade, based on the
# the version offered in keyston-release.
available = get_os_codename_install_source(config['openstack-origin'])
installed = get_os_codename_package('keystone')
if (available and
get_os_version_codename(available) > \
get_os_version_codename(installed)):
# TODO: fixup this call to work like utils.install()
do_openstack_upgrade(config['openstack-origin'], ' '.join(packages))
# Ensure keystone group permissions
execute("chmod -R g+wrx /var/lib/keystone/")
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
'OPENSTACK_PORT_ADMIN': cluster.determine_api_port(
config['admin-port']),
'OPENSTACK_PORT_PUBLIC': cluster.determine_api_port(
config['service-port'])}
save_script_rc(**env_vars)
set_admin_token(config['admin-token'])
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader - ensuring endpoint configuration'
' is up to date')
ensure_initial_admin(config)
update_config_block('logger_root', level=config['log-level'],
file='/etc/keystone/logging.conf')
update_config_block('DEFAULT', use_syslog=config["use-syslog"])
if get_os_version_package('keystone') >= '2013.1':
# PKI introduced in Grizzly
configure_pki_tokens(config)
if config_dirty():
utils.restart('keystone')
time.sleep(10)
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Firing identity_changed hook'
' for all related services.')
# HTTPS may have been set - so fire all identity relations
# again
for r_id in utils.relation_ids('identity-service'):
for unit in utils.relation_list(r_id):
identity_changed(relation_id=r_id,
remote_unit=unit)
def upgrade_charm():
# Ensure all required packages are installed
utils.install(*packages)
cluster_changed()
if cluster.eligible_leader(CLUSTER_RES):
utils.juju_log('INFO',
'Cluster leader - ensuring endpoint configuration'
' is up to date')
ensure_initial_admin(config)
@hooks.hook('cluster-relation-joined')
def cluster_joined(): def cluster_joined():
unison.ssh_authorized_peers(user=SSH_USER, unison.ssh_authorized_peers(user=SSH_USER,
group='keystone', group='juju_keystone',
peer_interface='cluster', peer_interface='cluster',
ensure_local_user=True) ensure_local_user=True)
update_config_block('DEFAULT',
public_port=cluster.determine_api_port(config["service-port"]))
update_config_block('DEFAULT',
admin_port=cluster.determine_api_port(config["admin-port"]))
if config_dirty():
utils.restart('keystone')
service_ports = {
"keystone_admin": [
cluster.determine_haproxy_port(config['admin-port']),
cluster.determine_api_port(config["admin-port"])
],
"keystone_service": [
cluster.determine_haproxy_port(config['service-port']),
cluster.determine_api_port(config["service-port"])
]
}
haproxy.configure_haproxy(service_ports)
@hooks.hook('cluster-relation-changed',
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed(): def cluster_changed():
unison.ssh_authorized_peers(user=SSH_USER, unison.ssh_authorized_peers(user=SSH_USER,
group='keystone', group='keystone',
peer_interface='cluster', peer_interface='cluster',
ensure_local_user=True) ensure_local_user=True)
synchronize_service_credentials() synchronize_service_credentials()
service_ports = { CONFIGS.write_all()
"keystone_admin": [
cluster.determine_haproxy_port(config['admin-port']),
cluster.determine_api_port(config["admin-port"])
],
"keystone_service": [
cluster.determine_haproxy_port(config['service-port']),
cluster.determine_api_port(config["service-port"])
]
}
haproxy.configure_haproxy(service_ports)
def ha_relation_changed(): @hooks.hook('ha-relation-joined')
relation_data = utils.relation_get_dict() def ha_joined():
if ('clustered' in relation_data and config = get_hacluster_config()
cluster.is_leader(CLUSTER_RES)):
utils.juju_log('INFO',
'Cluster configured, notifying other services'
' and updating keystone endpoint configuration')
# Update keystone endpoint to point at VIP
ensure_initial_admin(config)
# Tell all related services to start using
# the VIP and haproxy ports instead
for r_id in utils.relation_ids('identity-service'):
utils.relation_set(rid=r_id,
auth_host=config['vip'],
service_host=config['vip'])
def ha_relation_joined():
# Obtain the config values necessary for the cluster config. These
# include multicast port and interface to bind to.
corosync_bindiface = config['ha-bindiface']
corosync_mcastport = config['ha-mcastport']
vip = config['vip']
vip_cidr = config['vip_cidr']
vip_iface = config['vip_iface']
# Obtain resources
resources = { resources = {
'res_ks_vip': 'ocf:heartbeat:IPaddr2', 'res_ks_vip': 'ocf:heartbeat:IPaddr2',
'res_ks_haproxy': 'lsb:haproxy' 'res_ks_haproxy': 'lsb:haproxy',
} }
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
(config['vip'], config['vip_cidr'], config['vip_iface'])
resource_params = { resource_params = {
'res_ks_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ 'res_ks_vip': vip_params,
(vip, vip_cidr, vip_iface),
'res_ks_haproxy': 'op monitor interval="5s"' 'res_ks_haproxy': 'op monitor interval="5s"'
} }
init_services = { init_services = {
'res_ks_haproxy': 'haproxy' 'res_ks_haproxy': 'haproxy'
} }
clones = { clones = {
'cl_ks_haproxy': 'res_ks_haproxy' 'cl_ks_haproxy': 'res_ks_haproxy'
} }
relation_set(init_services=init_services,
utils.relation_set(init_services=init_services, corosync_bindiface=config['ha-bindiface'],
corosync_bindiface=corosync_bindiface, corosync_mcastport=config['ha-mcastport'],
corosync_mcastport=corosync_mcastport, resources=resources,
resources=resources, resource_params=resource_params,
resource_params=resource_params, clones=clones)
clones=clones)
hooks = { @hooks.hook('ha-relation-changed')
"install": install_hook, @restart_on_change(restart_map())
"shared-db-relation-joined": db_joined, def ha_changed():
"shared-db-relation-changed": db_changed, clustered = relation_get('clustered')
"identity-service-relation-joined": identity_joined, CONFIGS.write_all()
"identity-service-relation-changed": identity_changed, if (clustered is not None and
"config-changed": config_changed, is_leader(CLUSTER_RES)):
"cluster-relation-joined": cluster_joined, ensure_initial_admin(config)
"cluster-relation-changed": cluster_changed, log('Cluster configured, notifying other services and updating '
"cluster-relation-departed": cluster_changed, 'keystone endpoint configuration')
"ha-relation-joined": ha_relation_joined, for rid in relation_ids('identity-service'):
"ha-relation-changed": ha_relation_changed, relation_set(rid=rid,
"upgrade-charm": upgrade_charm auth_host=config('vip'),
} service_host=config('vip'))
utils.do_hooks(hooks)
def configure_https():
'''
Enables SSL API Apache config if appropriate and kicks identity-service
with any required api updates.
'''
# need to write all to ensure changes to the entire request pipeline
# propagate (c-api, haprxy, apache)
CONFIGS.write_all()
if 'https' in CONFIGS.complete_contexts():
cmd = ['a2ensite', 'openstack_https_frontend']
check_call(cmd)
else:
cmd = ['a2dissite', 'openstack_https_frontend']
check_call(cmd)
@hooks.hook('upgrade-charm')
@restart_on_change(restart_map(), stopstart=True)
def upgrade_charm():
apt_install(filter_installed_packages(determine_packages()))
cluster_changed()
if eligible_leader(CLUSTER_RES):
log('Cluster leader - ensuring endpoint configuration'
' is up to date')
time.sleep(10)
ensure_initial_admin(config)
CONFIGS.write_all()
def main():
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
if __name__ == '__main__':
main()

View File

@ -1,10 +1,12 @@
#!/usr/bin/python #!/usr/bin/python
import base64
import os import os
import shutil import shutil
import subprocess import subprocess
import tarfile import tarfile
import tempfile import tempfile
import zipfile
CA_EXPIRY = '365' CA_EXPIRY = '365'
ORG_NAME = 'Ubuntu' ORG_NAME = 'Ubuntu'
@ -113,7 +115,7 @@ def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
if not os.path.exists(d): if not os.path.exists(d):
print 'Creating %s.' % d print 'Creating %s.' % d
os.mkdir(d) os.mkdir(d)
os.chmod(os.path.join(ca_dir, 'private'), 0710) os.chmod(os.path.join(ca_dir, 'private'), 0o710)
if not os.path.isfile(os.path.join(ca_dir, 'serial')): if not os.path.isfile(os.path.join(ca_dir, 'serial')):
with open(os.path.join(ca_dir, 'serial'), 'wb') as out: with open(os.path.join(ca_dir, 'serial'), 'wb') as out:
@ -161,7 +163,7 @@ def intermediate_ca_csr_key(ca_dir):
def sign_int_csr(ca_dir, csr, common_name): def sign_int_csr(ca_dir, csr, common_name):
print 'Signing certificate request %s.' % csr print 'Signing certificate request %s.' % csr
crt = os.path.join(ca_dir, 'certs', crt = os.path.join(ca_dir, 'certs',
'%s.crt' % os.path.basename(csr).split('.')[0]) '%s.crt' % os.path.basename(csr).split('.')[0])
subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name) subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
cmd = ['openssl', 'ca', '-batch', '-config', cmd = ['openssl', 'ca', '-batch', '-config',
os.path.join(ca_dir, 'ca.cnf'), os.path.join(ca_dir, 'ca.cnf'),
@ -238,6 +240,7 @@ def tar_directory(path):
class JujuCA(object): class JujuCA(object):
def __init__(self, name, ca_dir, root_ca_dir, user, group): def __init__(self, name, ca_dir, root_ca_dir, user, group):
root_crt, root_key = init_root_ca(root_ca_dir, root_crt, root_key = init_root_ca(root_ca_dir,
'%s Certificate Authority' % name) '%s Certificate Authority' % name)
@ -288,7 +291,7 @@ class JujuCA(object):
key = open(key, 'r').read() key = open(key, 'r').read()
except: except:
print 'Could not load ssl private key for %s from %s' %\ print 'Could not load ssl private key for %s from %s' %\
(common_name, key) (common_name, key)
exit(1) exit(1)
return crt, key return crt, key
crt, key = self._create_certificate(common_name, common_name) crt, key = self._create_certificate(common_name, common_name)

789
hooks/keystone_utils.py Executable file → Normal file
View File

@ -1,172 +1,305 @@
#!/usr/bin/python #!/usr/bin/python
import ConfigParser
import sys
import json
import time
import subprocess import subprocess
import os import os
import urlparse
import time
from lib.openstack_common import( from base64 import b64encode
get_os_codename_install_source, from collections import OrderedDict
get_os_codename_package, from copy import deepcopy
from charmhelpers.contrib.hahelpers.cluster import(
eligible_leader,
determine_api_port,
https,
is_clustered
)
from charmhelpers.contrib.openstack import context, templating
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
error_out, error_out,
configure_installation_source get_os_codename_install_source,
) os_release,
save_script_rc as _save_script_rc)
import charmhelpers.contrib.unison as unison
from charmhelpers.core.hookenv import (
config,
log,
relation_get,
relation_set,
unit_private_ip,
INFO,
)
from charmhelpers.fetch import (
apt_install,
apt_update,
)
from charmhelpers.core.host import (
service_stop,
service_start,
)
import keystone_context
import keystone_ssl as ssl import keystone_ssl as ssl
import lib.unison as unison
import lib.utils as utils
import lib.cluster_utils as cluster
TEMPLATES = 'templates/'
keystone_conf = "/etc/keystone/keystone.conf" # removed from original: charm-helper-sh
stored_passwd = "/var/lib/keystone/keystone.passwd" BASE_PACKAGES = [
stored_token = "/var/lib/keystone/keystone.token" 'apache2',
'haproxy',
'openssl',
'python-keystoneclient',
'python-mysqldb',
'pwgen',
'unison',
'uuid',
]
BASE_SERVICES = [
'keystone',
]
API_PORTS = {
'keystone-admin': config('admin-port'),
'keystone-public': config('service-port')
}
KEYSTONE_CONF = "/etc/keystone/keystone.conf"
KEYSTONE_CONF_DIR = os.path.dirname(KEYSTONE_CONF)
STORED_PASSWD = "/var/lib/keystone/keystone.passwd"
STORED_TOKEN = "/var/lib/keystone/keystone.token"
SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd' SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd'
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
SSL_DIR = '/var/lib/keystone/juju_ssl/' SSL_DIR = '/var/lib/keystone/juju_ssl/'
SSL_CA_NAME = 'Ubuntu Cloud' SSL_CA_NAME = 'Ubuntu Cloud'
CLUSTER_RES = 'res_ks_vip' CLUSTER_RES = 'res_ks_vip'
SSH_USER = 'juju_keystone' SSH_USER = 'juju_keystone'
BASE_RESOURCE_MAP = OrderedDict([
(KEYSTONE_CONF, {
'services': BASE_SERVICES,
'contexts': [keystone_context.KeystoneContext(),
context.SharedDBContext(ssl_dir=KEYSTONE_CONF_DIR),
context.SyslogContext(),
keystone_context.HAProxyContext()],
}),
(HAPROXY_CONF, {
'contexts': [context.HAProxyContext(),
keystone_context.HAProxyContext()],
'services': ['haproxy'],
}),
(APACHE_CONF, {
'contexts': [keystone_context.ApacheSSLContext()],
'services': ['apache2'],
}),
(APACHE_24_CONF, {
'contexts': [keystone_context.ApacheSSLContext()],
'services': ['apache2'],
}),
])
def execute(cmd, die=False, echo=False): CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
""" Executes a command
if die=True, script will exit(1) if command does not return 0 valid_services = {
if echo=True, output of command will be printed to stdout "nova": {
"type": "compute",
returns a tuple: (stdout, stderr, return code) "desc": "Nova Compute Service"
""" },
p = subprocess.Popen(cmd.split(" "), "nova-volume": {
stdout=subprocess.PIPE, "type": "volume",
stdin=subprocess.PIPE, "desc": "Nova Volume Service"
stderr=subprocess.PIPE) },
stdout = "" "cinder": {
stderr = "" "type": "volume",
"desc": "Cinder Volume Service"
def print_line(l): },
if echo: "ec2": {
print l.strip('\n') "type": "ec2",
sys.stdout.flush() "desc": "EC2 Compatibility Layer"
},
for l in iter(p.stdout.readline, ''): "glance": {
print_line(l) "type": "image",
stdout += l "desc": "Glance Image Service"
for l in iter(p.stderr.readline, ''): },
print_line(l) "s3": {
stderr += l "type": "s3",
"desc": "S3 Compatible object-store"
p.communicate() },
rc = p.returncode "swift": {
"type": "object-store",
if die and rc != 0: "desc": "Swift Object Storage Service"
error_out("ERROR: command %s return non-zero.\n" % cmd) },
return (stdout, stderr, rc) "quantum": {
"type": "network",
"desc": "Quantum Networking Service"
},
"oxygen": {
"type": "oxygen",
"desc": "Oxygen Cloud Image Service"
},
"ceilometer": {
"type": "metering",
"desc": "Ceilometer Metering Service"
},
"heat": {
"type": "orchestration",
"desc": "Heat Orchestration API"
},
"heat-cfn": {
"type": "cloudformation",
"desc": "Heat CloudFormation API"
}
}
def config_get(): def resource_map():
""" Obtain the units config via 'config-get' '''
Returns a dict representing current config. Dynamically generate a map of resources that will be managed for a single
private-address and IP of the unit is also tacked on for hook execution.
convienence '''
""" resource_map = deepcopy(BASE_RESOURCE_MAP)
output = execute("config-get --format json")[0]
config = json.loads(output) if os.path.exists('/etc/apache2/conf-available'):
# make sure no config element is blank after config-get resource_map.pop(APACHE_CONF)
for c in config.keys(): else:
if config[c] is None: resource_map.pop(APACHE_24_CONF)
error_out("ERROR: Config option has no paramter: %s" % c) return resource_map
# tack on our private address and ip
config["hostname"] = utils.unit_get('private-address')
return config
@utils.cached def register_configs():
release = os_release('keystone')
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)
for cfg, rscs in resource_map().iteritems():
configs.register(cfg, rscs['contexts'])
return configs
def restart_map():
return OrderedDict([(cfg, v['services'])
for cfg, v in resource_map().iteritems()
if v['services']])
def determine_ports():
'''Assemble a list of API ports for services we are managing'''
ports = [config('admin-port'), config('service-port')]
return list(set(ports))
def api_port(service):
return API_PORTS[service]
def determine_packages():
# currently all packages match service names
packages = [] + BASE_PACKAGES
for k, v in resource_map().iteritems():
packages.extend(v['services'])
return list(set(packages))
def save_script_rc():
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
'OPENSTACK_PORT_ADMIN': determine_api_port(
api_port('keystone-admin')),
'OPENSTACK_PORT_PUBLIC': determine_api_port(
api_port('keystone-public'))}
_save_script_rc(**env_vars)
def do_openstack_upgrade(configs):
new_src = config('openstack-origin')
new_os_rel = get_os_codename_install_source(new_src)
log('Performing OpenStack upgrade to %s.' % (new_os_rel))
configure_installation_source(new_src)
apt_update()
dpkg_opts = [
'--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef',
]
apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True)
# set CONFIGS to load templates from new release and regenerate config
configs.set_release(openstack_release=new_os_rel)
configs.write_all()
if eligible_leader(CLUSTER_RES):
migrate_database()
def migrate_database():
'''Runs keystone-manage to initialize a new database or migrate existing'''
log('Migrating the keystone database.', level=INFO)
service_stop('keystone')
cmd = ['keystone-manage', 'db_sync']
subprocess.check_output(cmd)
service_start('keystone')
time.sleep(10)
# OLD
def get_local_endpoint(): def get_local_endpoint():
""" Returns the URL for the local end-point bypassing haproxy/ssl """ """ Returns the URL for the local end-point bypassing haproxy/ssl """
local_endpoint = 'http://localhost:{}/v2.0/'.format( local_endpoint = 'http://localhost:{}/v2.0/'.format(
cluster.determine_api_port(utils.config_get('admin-port')) determine_api_port(api_port('keystone-admin'))
) )
return local_endpoint return local_endpoint
def set_admin_token(admin_token): def set_admin_token(admin_token='None'):
"""Set admin token according to deployment config or use a randomly """Set admin token according to deployment config or use a randomly
generated token if none is specified (default). generated token if none is specified (default).
""" """
if admin_token != 'None': if admin_token != 'None':
utils.juju_log('INFO', log('Configuring Keystone to use a pre-configured admin token.')
'Configuring Keystone to use'
' a pre-configured admin token.')
token = admin_token token = admin_token
else: else:
utils.juju_log('INFO', log('Configuring Keystone to use a random admin token.')
'Configuring Keystone to use a random admin token.') if os.path.isfile(STORED_TOKEN):
if os.path.isfile(stored_token):
msg = 'Loading a previously generated' \ msg = 'Loading a previously generated' \
' admin token from %s' % stored_token ' admin token from %s' % STORED_TOKEN
utils.juju_log('INFO', msg) log(msg)
f = open(stored_token, 'r') f = open(STORED_TOKEN, 'r')
token = f.read().strip() token = f.read().strip()
f.close() f.close()
else: else:
token = execute('pwgen -c 32 1', die=True)[0].strip() cmd = ['pwgen', '-c', '32', '1']
out = open(stored_token, 'w') token = str(subprocess.check_output(cmd)).strip()
out = open(STORED_TOKEN, 'w')
out.write('%s\n' % token) out.write('%s\n' % token)
out.close() out.close()
update_config_block('DEFAULT', admin_token=token) return(token)
def get_admin_token(): def get_admin_token():
"""Temporary utility to grab the admin token as configured in """Temporary utility to grab the admin token as configured in
keystone.conf keystone.conf
""" """
with open(keystone_conf, 'r') as f: with open(KEYSTONE_CONF, 'r') as f:
for l in f.readlines(): for l in f.readlines():
if l.split(' ')[0] == 'admin_token': if l.split(' ')[0] == 'admin_token':
try: try:
return l.split('=')[1].strip() return l.split('=')[1].strip()
except: except:
error_out('Could not parse admin_token line from %s' % error_out('Could not parse admin_token line from %s' %
keystone_conf) KEYSTONE_CONF)
error_out('Could not find admin_token line in %s' % keystone_conf) error_out('Could not find admin_token line in %s' % KEYSTONE_CONF)
# Track all updated config settings.
_config_dirty = [False]
def config_dirty():
return True in _config_dirty
def update_config_block(section, **kwargs):
""" Updates keystone.conf blocks given kwargs.
Update a config setting in a specific setting of a config
file (/etc/keystone/keystone.conf, by default)
"""
if 'file' in kwargs:
conf_file = kwargs['file']
del kwargs['file']
else:
conf_file = keystone_conf
config = ConfigParser.RawConfigParser()
config.read(conf_file)
if section != 'DEFAULT' and not config.has_section(section):
config.add_section(section)
_config_dirty[0] = True
for k, v in kwargs.iteritems():
try:
cur = config.get(section, k)
if cur != v:
_config_dirty[0] = True
except (ConfigParser.NoSectionError,
ConfigParser.NoOptionError):
_config_dirty[0] = True
config.set(section, k, v)
with open(conf_file, 'wb') as out:
config.write(out)
def create_service_entry(service_name, service_type, service_desc, owner=None): def create_service_entry(service_name, service_type, service_desc, owner=None):
@ -176,17 +309,15 @@ def create_service_entry(service_name, service_type, service_desc, owner=None):
token=get_admin_token()) token=get_admin_token())
for service in [s._info for s in manager.api.services.list()]: for service in [s._info for s in manager.api.services.list()]:
if service['name'] == service_name: if service['name'] == service_name:
utils.juju_log('INFO', log("Service entry for '%s' already exists." % service_name)
"Service entry for '%s' already exists." % \
service_name)
return return
manager.api.services.create(name=service_name, manager.api.services.create(name=service_name,
service_type=service_type, service_type=service_type,
description=service_desc) description=service_desc)
utils.juju_log('INFO', "Created new service entry '%s'" % service_name) log("Created new service entry '%s'" % service_name)
def create_endpoint_template(region, service, publicurl, adminurl, def create_endpoint_template(region, service, publicurl, adminurl,
internalurl): internalurl):
""" Create a new endpoint template for service if one does not already """ Create a new endpoint template for service if one does not already
exist matching name *and* region """ exist matching name *and* region """
@ -196,9 +327,8 @@ def create_endpoint_template(region, service, publicurl, adminurl,
service_id = manager.resolve_service_id(service) service_id = manager.resolve_service_id(service)
for ep in [e._info for e in manager.api.endpoints.list()]: for ep in [e._info for e in manager.api.endpoints.list()]:
if ep['service_id'] == service_id and ep['region'] == region: if ep['service_id'] == service_id and ep['region'] == region:
utils.juju_log('INFO', log("Endpoint template already exists for '%s' in '%s'"
"Endpoint template already exists for '%s' in '%s'" % (service, region))
% (service, region))
up_to_date = True up_to_date = True
for k in ['publicurl', 'adminurl', 'internalurl']: for k in ['publicurl', 'adminurl', 'internalurl']:
@ -209,9 +339,7 @@ def create_endpoint_template(region, service, publicurl, adminurl,
return return
else: else:
# delete endpoint and recreate if endpoint urls need updating. # delete endpoint and recreate if endpoint urls need updating.
utils.juju_log('INFO', log("Updating endpoint template with new endpoint urls.")
"Updating endpoint template with"
" new endpoint urls.")
manager.api.endpoints.delete(ep['id']) manager.api.endpoints.delete(ep['id'])
manager.api.endpoints.create(region=region, manager.api.endpoints.create(region=region,
@ -219,8 +347,7 @@ def create_endpoint_template(region, service, publicurl, adminurl,
publicurl=publicurl, publicurl=publicurl,
adminurl=adminurl, adminurl=adminurl,
internalurl=internalurl) internalurl=internalurl)
utils.juju_log('INFO', "Created new endpoint template for '%s' in '%s'" % log("Created new endpoint template for '%s' in '%s'" % (region, service))
(region, service))
def create_tenant(name): def create_tenant(name):
@ -232,9 +359,9 @@ def create_tenant(name):
if not tenants or name not in [t['name'] for t in tenants]: if not tenants or name not in [t['name'] for t in tenants]:
manager.api.tenants.create(tenant_name=name, manager.api.tenants.create(tenant_name=name,
description='Created by Juju') description='Created by Juju')
utils.juju_log('INFO', "Created new tenant: %s" % name) log("Created new tenant: %s" % name)
return return
utils.juju_log('INFO', "Tenant '%s' already exists." % name) log("Tenant '%s' already exists." % name)
def create_user(name, password, tenant): def create_user(name, password, tenant):
@ -251,10 +378,9 @@ def create_user(name, password, tenant):
password=password, password=password,
email='juju@localhost', email='juju@localhost',
tenant_id=tenant_id) tenant_id=tenant_id)
utils.juju_log('INFO', "Created new user '%s' tenant: %s" % \ log("Created new user '%s' tenant: %s" % (name, tenant_id))
(name, tenant_id))
return return
utils.juju_log('INFO', "A user named '%s' already exists" % name) log("A user named '%s' already exists" % name)
def create_role(name, user=None, tenant=None): def create_role(name, user=None, tenant=None):
@ -265,9 +391,9 @@ def create_role(name, user=None, tenant=None):
roles = [r._info for r in manager.api.roles.list()] roles = [r._info for r in manager.api.roles.list()]
if not roles or name not in [r['name'] for r in roles]: if not roles or name not in [r['name'] for r in roles]:
manager.api.roles.create(name=name) manager.api.roles.create(name=name)
utils.juju_log('INFO', "Created new role '%s'" % name) log("Created new role '%s'" % name)
else: else:
utils.juju_log('INFO', "A role named '%s' already exists" % name) log("A role named '%s' already exists" % name)
if not user and not tenant: if not user and not tenant:
return return
@ -279,7 +405,7 @@ def create_role(name, user=None, tenant=None):
if None in [user_id, role_id, tenant_id]: if None in [user_id, role_id, tenant_id]:
error_out("Could not resolve [%s, %s, %s]" % error_out("Could not resolve [%s, %s, %s]" %
(user_id, role_id, tenant_id)) (user_id, role_id, tenant_id))
grant_role(user, name, tenant) grant_role(user, name, tenant)
@ -289,8 +415,8 @@ def grant_role(user, role, tenant):
import manager import manager
manager = manager.KeystoneManager(endpoint=get_local_endpoint(), manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
token=get_admin_token()) token=get_admin_token())
utils.juju_log('INFO', "Granting user '%s' role '%s' on tenant '%s'" % \ log("Granting user '%s' role '%s' on tenant '%s'" %
(user, role, tenant)) (user, role, tenant))
user_id = manager.resolve_user_id(user) user_id = manager.resolve_user_id(user)
role_id = manager.resolve_role_id(role) role_id = manager.resolve_role_id(role)
tenant_id = manager.resolve_tenant_id(tenant) tenant_id = manager.resolve_tenant_id(tenant)
@ -300,28 +426,11 @@ def grant_role(user, role, tenant):
manager.api.roles.add_user_role(user=user_id, manager.api.roles.add_user_role(user=user_id,
role=role_id, role=role_id,
tenant=tenant_id) tenant=tenant_id)
utils.juju_log('INFO', "Granted user '%s' role '%s' on tenant '%s'" % \ log("Granted user '%s' role '%s' on tenant '%s'" %
(user, role, tenant)) (user, role, tenant))
else: else:
utils.juju_log('INFO', log("User '%s' already has role '%s' on tenant '%s'" %
"User '%s' already has role '%s' on tenant '%s'" % \ (user, role, tenant))
(user, role, tenant))
def generate_admin_token(config):
""" generate and add an admin token """
import manager
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
token='ADMIN')
if config["admin-token"] == "None":
import random
token = random.randrange(1000000000000, 9999999999999)
else:
return config["admin-token"]
manager.api.add_token(token, config["admin-user"],
"admin", config["token-expiry"])
utils.juju_log('INFO', "Generated and added new random admin token.")
return token
def ensure_initial_admin(config): def ensure_initial_admin(config):
@ -335,48 +444,53 @@ def ensure_initial_admin(config):
changes? changes?
""" """
create_tenant("admin") create_tenant("admin")
create_tenant(config["service-tenant"]) create_tenant(config("service-tenant"))
passwd = "" passwd = ""
if config["admin-password"] != "None": if config("admin-password") != "None":
passwd = config["admin-password"] passwd = config("admin-password")
elif os.path.isfile(stored_passwd): elif os.path.isfile(STORED_PASSWD):
utils.juju_log('INFO', "Loading stored passwd from %s" % stored_passwd) log("Loading stored passwd from %s" % STORED_PASSWD)
passwd = open(stored_passwd, 'r').readline().strip('\n') passwd = open(STORED_PASSWD, 'r').readline().strip('\n')
if passwd == "": if passwd == "":
utils.juju_log('INFO', "Generating new passwd for user: %s" % \ log("Generating new passwd for user: %s" %
config["admin-user"]) config("admin-user"))
passwd = execute("pwgen -c 16 1", die=True)[0] cmd = ['pwgen', '-c', '16', '1']
open(stored_passwd, 'w+').writelines("%s\n" % passwd) passwd = str(subprocess.check_output(cmd)).strip()
open(STORED_PASSWD, 'w+').writelines("%s\n" % passwd)
create_user(config['admin-user'], passwd, tenant='admin') create_user(config('admin-user'), passwd, tenant='admin')
update_user_password(config['admin-user'], passwd) update_user_password(config('admin-user'), passwd)
create_role(config['admin-role'], config['admin-user'], 'admin') create_role(config('admin-role'), config('admin-user'), 'admin')
# TODO(adam_g): The following roles are likely not needed since redux merge # TODO(adam_g): The following roles are likely not needed since redux merge
create_role("KeystoneAdmin", config["admin-user"], 'admin') create_role("KeystoneAdmin", config("admin-user"), 'admin')
create_role("KeystoneServiceAdmin", config["admin-user"], 'admin') create_role("KeystoneServiceAdmin", config("admin-user"), 'admin')
create_service_entry("keystone", "identity", "Keystone Identity Service") create_service_entry("keystone", "identity", "Keystone Identity Service")
if cluster.is_clustered(): if is_clustered():
utils.juju_log('INFO', "Creating endpoint for clustered configuration") log("Creating endpoint for clustered configuration")
service_host = auth_host = config["vip"] service_host = auth_host = config("vip")
else: else:
utils.juju_log('INFO', "Creating standard endpoint") log("Creating standard endpoint")
service_host = auth_host = config["hostname"] service_host = auth_host = unit_private_ip()
for region in config['region'].split(): for region in config('region').split():
create_keystone_endpoint(service_host=service_host, create_keystone_endpoint(service_host=service_host,
service_port=config["service-port"], service_port=config("service-port"),
auth_host=auth_host, auth_host=auth_host,
auth_port=config["admin-port"], auth_port=config("admin-port"),
region=region) region=region)
def create_keystone_endpoint(service_host, service_port, def create_keystone_endpoint(service_host, service_port,
auth_host, auth_port, region): auth_host, auth_port, region):
public_url = "http://%s:%s/v2.0" % (service_host, service_port) proto = 'http'
admin_url = "http://%s:%s/v2.0" % (auth_host, auth_port) if https():
internal_url = "http://%s:%s/v2.0" % (service_host, service_port) log("Setting https keystone endpoint")
proto = 'https'
public_url = "%s://%s:%s/v2.0" % (proto, service_host, service_port)
admin_url = "%s://%s:%s/v2.0" % (proto, auth_host, auth_port)
internal_url = "%s://%s:%s/v2.0" % (proto, service_host, service_port)
create_endpoint_template(region, "keystone", public_url, create_endpoint_template(region, "keystone", public_url,
admin_url, internal_url) admin_url, internal_url)
@ -385,15 +499,15 @@ def update_user_password(username, password):
import manager import manager
manager = manager.KeystoneManager(endpoint=get_local_endpoint(), manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
token=get_admin_token()) token=get_admin_token())
utils.juju_log('INFO', "Updating password for user '%s'" % username) log("Updating password for user '%s'" % username)
user_id = manager.resolve_user_id(username) user_id = manager.resolve_user_id(username)
if user_id is None: if user_id is None:
error_out("Could not resolve user id for '%s'" % username) error_out("Could not resolve user id for '%s'" % username)
manager.api.users.update_password(user=user_id, password=password) manager.api.users.update_password(user=user_id, password=password)
utils.juju_log('INFO', "Successfully updated password for user '%s'" % \ log("Successfully updated password for user '%s'" %
username) username)
def load_stored_passwords(path=SERVICE_PASSWD_PATH): def load_stored_passwords(path=SERVICE_PASSWD_PATH):
@ -425,91 +539,22 @@ def get_service_password(service_username):
return passwd return passwd
def configure_pki_tokens(config):
'''Configure PKI token signing, if enabled.'''
if config['enable-pki'] not in ['True', 'true']:
update_config_block('signing', token_format='UUID')
else:
utils.juju_log('INFO', 'TODO: PKI Support, setting to UUID for now.')
update_config_block('signing', token_format='UUID')
def do_openstack_upgrade(install_src, packages):
'''Upgrade packages from a given install src.'''
config = config_get()
old_vers = get_os_codename_package('keystone')
new_vers = get_os_codename_install_source(install_src)
utils.juju_log('INFO',
"Beginning Keystone upgrade: %s -> %s" % \
(old_vers, new_vers))
# Backup previous config.
utils.juju_log('INFO', "Backing up contents of /etc/keystone.")
stamp = time.strftime('%Y%m%d%H%M')
cmd = 'tar -pcf /var/lib/juju/keystone-backup-%s.tar /etc/keystone' % stamp
execute(cmd, die=True, echo=True)
configure_installation_source(install_src)
execute('apt-get update', die=True, echo=True)
os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
cmd = 'apt-get --option Dpkg::Options::=--force-confnew -y '\
'dist-upgrade'
execute(cmd, echo=True, die=True)
# we have new, fresh config files that need updating.
# set the admin token, which is still stored in config.
set_admin_token(config['admin-token'])
# set the sql connection string if a shared-db relation is found.
ids = utils.relation_ids('shared-db')
if ids:
for rid in ids:
for unit in utils.relation_list(rid):
utils.juju_log('INFO',
'Configuring new keystone.conf for '
'database access on existing database'
' relation to %s' % unit)
relation_data = utils.relation_get_dict(relation_id=rid,
remote_unit=unit)
update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
(config["database-user"],
relation_data["password"],
relation_data["private-address"],
config["database"]))
utils.stop('keystone')
if (cluster.eligible_leader(CLUSTER_RES)):
utils.juju_log('INFO',
'Running database migrations for %s' % new_vers)
execute('keystone-manage db_sync', echo=True, die=True)
else:
utils.juju_log('INFO',
'Not cluster leader; snoozing whilst'
' leader upgrades DB')
time.sleep(10)
utils.start('keystone')
time.sleep(5)
utils.juju_log('INFO',
'Completed Keystone upgrade: '
'%s -> %s' % (old_vers, new_vers))
def synchronize_service_credentials(): def synchronize_service_credentials():
''' '''
Broadcast service credentials to peers or consume those that have been Broadcast service credentials to peers or consume those that have been
broadcasted by peer, depending on hook context. broadcasted by peer, depending on hook context.
''' '''
if (not cluster.eligible_leader(CLUSTER_RES) or if (not eligible_leader(CLUSTER_RES) or
not os.path.isfile(SERVICE_PASSWD_PATH)): not os.path.isfile(SERVICE_PASSWD_PATH)):
return return
utils.juju_log('INFO', 'Synchronizing service passwords to all peers.') log('Synchronizing service passwords to all peers.')
unison.sync_to_peers(peer_interface='cluster', if is_clustered():
paths=[SERVICE_PASSWD_PATH], user=SSH_USER, unison.sync_to_peers(peer_interface='cluster',
verbose=True) paths=[SERVICE_PASSWD_PATH], user=SSH_USER,
verbose=True)
if config('https-service-endpoints') in ['True', 'true']:
unison.sync_to_peers(peer_interface='cluster',
paths=[SSL_DIR], user=SSH_USER, verbose=True)
CA = [] CA = []
@ -527,18 +572,208 @@ def get_ca(user='keystone', group='keystone'):
ca_dir=os.path.join(SSL_DIR, ca_dir=os.path.join(SSL_DIR,
'%s_intermediate_ca' % d_name), '%s_intermediate_ca' % d_name),
root_ca_dir=os.path.join(SSL_DIR, root_ca_dir=os.path.join(SSL_DIR,
'%s_root_ca' % d_name)) '%s_root_ca' % d_name))
# SSL_DIR is synchronized via all peers over unison+ssh, need # SSL_DIR is synchronized via all peers over unison+ssh, need
# to ensure permissions. # to ensure permissions.
execute('chown -R %s.%s %s' % (user, group, SSL_DIR)) subprocess.check_output(['chown', '-R', '%s.%s' % (user, group),
execute('chmod -R g+rwx %s' % SSL_DIR) '%s' % SSL_DIR])
subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR])
CA.append(ca) CA.append(ca)
return CA[0] return CA[0]
def https(): def relation_list(rid):
if (utils.config_get('https-service-endpoints') in ["yes", "true", "True"] cmd = [
or cluster.https()): 'relation-list',
return True '-r', rid,
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else: else:
return False return result
def add_service_to_keystone(relation_id=None, remote_unit=None):
settings = relation_get(rid=relation_id, unit=remote_unit)
# the minimum settings needed per endpoint
single = set(['service', 'region', 'public_url', 'admin_url',
'internal_url'])
if single.issubset(settings):
# other end of relation advertised only one endpoint
if 'None' in [v for k, v in settings.iteritems()]:
# Some backend services advertise no endpoint but require a
# hook execution to update auth strategy.
relation_data = {}
# Check if clustered and use vip + haproxy ports if so
if is_clustered():
relation_data["auth_host"] = config('vip')
relation_data["service_host"] = config('vip')
else:
relation_data["auth_host"] = unit_private_ip()
relation_data["service_host"] = unit_private_ip()
if https():
relation_data["auth_protocol"] = "https"
relation_data["service_protocol"] = "https"
else:
relation_data["auth_protocol"] = "http"
relation_data["service_protocol"] = "http"
relation_data["auth_port"] = config('admin-port')
relation_data["service_port"] = config('service-port')
if config('https-service-endpoints') in ['True', 'true']:
# Pass CA cert as client will need it to
# verify https connections
ca = get_ca(user=SSH_USER)
ca_bundle = ca.get_ca_bundle()
relation_data['https_keystone'] = 'True'
relation_data['ca_cert'] = b64encode(ca_bundle)
# Allow the remote service to request creation of any additional
# roles. Currently used by Horizon
for role in get_requested_roles(settings):
log("Creating requested role: %s" % role)
create_role(role)
relation_set(relation_id=relation_id,
**relation_data)
return
else:
ensure_valid_service(settings['service'])
add_endpoint(region=settings['region'],
service=settings['service'],
publicurl=settings['public_url'],
adminurl=settings['admin_url'],
internalurl=settings['internal_url'])
service_username = settings['service']
https_cn = urlparse.urlparse(settings['internal_url'])
https_cn = https_cn.hostname
else:
# assemble multiple endpoints from relation data. service name
# should be prepended to setting name, ie:
# realtion-set ec2_service=$foo ec2_region=$foo ec2_public_url=$foo
# relation-set nova_service=$foo nova_region=$foo nova_public_url=$foo
# Results in a dict that looks like:
# { 'ec2': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# 'nova': {
# 'service': $foo
# 'region': $foo
# 'public_url': $foo
# }
# }
endpoints = {}
for k, v in settings.iteritems():
ep = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if ep not in endpoints:
endpoints[ep] = {}
endpoints[ep][x] = v
services = []
https_cn = None
for ep in endpoints:
# weed out any unrelated relation stuff Juju might have added
# by ensuring each possible endpiont has appropriate fields
# ['service', 'region', 'public_url', 'admin_url', 'internal_url']
if single.issubset(endpoints[ep]):
ep = endpoints[ep]
ensure_valid_service(ep['service'])
add_endpoint(region=ep['region'], service=ep['service'],
publicurl=ep['public_url'],
adminurl=ep['admin_url'],
internalurl=ep['internal_url'])
services.append(ep['service'])
if not https_cn:
https_cn = urlparse.urlparse(ep['internal_url'])
https_cn = https_cn.hostname
service_username = '_'.join(services)
if 'None' in [v for k, v in settings.iteritems()]:
return
if not service_username:
return
token = get_admin_token()
log("Creating service credentials for '%s'" % service_username)
service_password = get_service_password(service_username)
create_user(service_username, service_password, config('service-tenant'))
grant_role(service_username, config('admin-role'),
config('service-tenant'))
# Allow the remote service to request creation of any additional roles.
# Currently used by Swift and Ceilometer.
for role in get_requested_roles(settings):
log("Creating requested role: %s" % role)
create_role(role, service_username,
config('service-tenant'))
# As of https://review.openstack.org/#change,4675, all nodes hosting
# an endpoint(s) needs a service username and password assigned to
# the service tenant and granted admin role.
# note: config('service-tenant') is created in utils.ensure_initial_admin()
# we return a token, information about our API endpoints, and the generated
# service credentials
relation_data = {
"admin_token": token,
"service_host": unit_private_ip(),
"service_port": config("service-port"),
"auth_host": unit_private_ip(),
"auth_port": config("admin-port"),
"service_username": service_username,
"service_password": service_password,
"service_tenant": config('service-tenant'),
"https_keystone": "False",
"ssl_cert": "",
"ssl_key": "",
"ca_cert": ""
}
# Check if clustered and use vip + haproxy ports if so
if is_clustered():
relation_data["auth_host"] = config('vip')
relation_data["service_host"] = config('vip')
if https():
relation_data["auth_protocol"] = "https"
relation_data["service_protocol"] = "https"
else:
relation_data["auth_protocol"] = "http"
relation_data["service_protocol"] = "http"
# generate or get a new cert/key for service if set to manage certs.
if config('https-service-endpoints') in ['True', 'true']:
ca = get_ca(user=SSH_USER)
cert, key = ca.get_cert_and_key(common_name=https_cn)
ca_bundle = ca.get_ca_bundle()
relation_data['ssl_cert'] = b64encode(cert)
relation_data['ssl_key'] = b64encode(key)
relation_data['ca_cert'] = b64encode(ca_bundle)
relation_data['https_keystone'] = 'True'
relation_set(relation_id=relation_id,
**relation_data)
def ensure_valid_service(service):
if service not in valid_services.keys():
log("Invalid service requested: '%s'" % service)
relation_set(admin_token=-1)
return
def add_endpoint(region, service, publicurl, adminurl, internalurl):
desc = valid_services[service]["desc"]
service_type = valid_services[service]["type"]
create_service_entry(service, service_type, desc)
create_endpoint_template(region=region, service=service,
publicurl=publicurl,
adminurl=adminurl,
internalurl=internalurl)
def get_requested_roles(settings):
''' Retrieve any valid requested_roles from dict settings '''
if ('requested_roles' in settings and
settings['requested_roles'] not in ['None', None]):
return settings['requested_roles'].split(',')
else:
return []

View File

@ -3,6 +3,7 @@ from keystoneclient.v2_0 import client
class KeystoneManager(object): class KeystoneManager(object):
def __init__(self, endpoint, token): def __init__(self, endpoint, token):
self.api = client.Client(endpoint=endpoint, token=token) self.api = client.Client(endpoint=endpoint, token=token)

View File

@ -0,0 +1,93 @@
# essex
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[sql]
{% if database_host -%}
connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[token]
driver = keystone.token.backends.sql.Token
expiration = 86400
[policy]
driver = keystone.policy.backends.rules.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service
[pipeline:admin_api]
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension crud_extension admin_service
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = xml_body public_version_service
[pipeline:admin_version_api]
pipeline = xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/ = admin_version_api

View File

@ -0,0 +1,118 @@
# folsom
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[sql]
{% if database_host -%}
connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[token]
driver = keystone.token.backends.sql.Token
expiration = 86400
[policy]
driver = keystone.policy.backends.rules.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
{% if signing -%}
[signing]
token_format = UUID
certfile = /etc/keystone/ssl/certs/signing_cert.pem
keyfile = /etc/keystone/ssl/private/signing_key.pem
ca_certs = /etc/keystone/ssl/certs/ca.pem
key_size = 1024
valid_days = 3650
ca_password = None
{% endif -%}
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
[pipeline:admin_api]
pipeline = stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = stats_monitoring url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = stats_monitoring url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/ = admin_version_api

View File

@ -0,0 +1,140 @@
# grizzly
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[sql]
{% if database_host -%}
connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[trust]
driver = keystone.trust.backends.sql.Trust
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[token]
driver = keystone.token.backends.sql.Token
expiration = 86400
[policy]
driver = keystone.policy.backends.rules.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
{% if signing -%}
[signing]
token_format = UUID
certfile = /etc/keystone/ssl/certs/signing_cert.pem
keyfile = /etc/keystone/ssl/private/signing_key.pem
ca_certs = /etc/keystone/ssl/certs/ca.pem
key_size = 1024
valid_days = 3650
ca_password = None
{% endif -%}
[auth]
methods = password,token
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:sizelimit]
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[filter:access_log]
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:service_v3]
paste.app_factory = keystone.service:v3_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
[pipeline:admin_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
[pipeline:api_v3]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api

View File

@ -8,8 +8,8 @@ global
defaults defaults
log global log global
mode http mode tcp
option httplog option tcplog
option dontlognull option dontlognull
retries 3 retries 3
timeout queue 1000 timeout queue 1000
@ -19,6 +19,7 @@ defaults
listen stats :8888 listen stats :8888
mode http mode http
option httplog
stats enable stats enable
stats hide-version stats hide-version
stats realm Haproxy\ Statistics stats realm Haproxy\ Statistics
@ -28,7 +29,6 @@ listen stats :8888
{% for service, ports in service_ports.iteritems() -%} {% for service, ports in service_ports.iteritems() -%}
listen {{ service }} 0.0.0.0:{{ ports[0] }} listen {{ service }} 0.0.0.0:{{ ports[0] }}
balance roundrobin balance roundrobin
option tcplog
{% for unit, address in units.iteritems() -%} {% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %} {% endfor %}

View File

@ -0,0 +1,73 @@
# havana
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
admin_token = {{ token }}
admin_port = {{ admin_port }}
public_port = {{ public_port }}
use_syslog = {{ use_syslog }}
log_config = /etc/keystone/logging.conf
debug = {{ debug }}
verbose = {{ verbose }}
[sql]
{% if database_host -%}
connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
{% else -%}
connection = sqlite:////var/lib/keystone/keystone.db
{% endif -%}
idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[credential]
driver = keystone.credential.backends.sql.Credential
[trust]
driver = keystone.trust.backends.sql.Trust
[os_inherit]
[catalog]
driver = keystone.catalog.backends.sql.Catalog
[endpoint_filter]
[token]
driver = keystone.token.backends.sql.Token
expiration = 86400
[cache]
[policy]
driver = keystone.policy.backends.rules.Policy
[ec2]
driver = keystone.contrib.ec2.backends.kvs.Ec2
[assignment]
[oauth1]
{% if signing -%}
[signing]
token_format = UUID
certfile = /etc/keystone/ssl/certs/signing_cert.pem
keyfile = /etc/keystone/ssl/private/signing_key.pem
ca_certs = /etc/keystone/ssl/certs/ca.pem
key_size = 1024
valid_days = 3650
ca_password = None
{% endif -%}
[auth]
methods = external,password,token,oauth1
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
oauth1 = keystone.auth.plugins.oauth1.OAuth
[paste_deploy]
config_file = keystone-paste.ini