From 6a5278dacd55d497486c6e3010fed97837a88d36 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 27 Sep 2013 13:02:37 +0100 Subject: [PATCH] Redux --- Makefile | 14 + charm-helpers.yaml | 9 + hooks/{lib => charmhelpers}/__init__.py | 0 hooks/charmhelpers/contrib/__init__.py | 0 .../contrib/hahelpers/__init__.py | 0 .../charmhelpers/contrib/hahelpers/apache.py | 58 +++ .../contrib/hahelpers/cluster.py} | 87 +++- .../contrib/openstack/__init__.py | 0 .../charmhelpers/contrib/openstack/context.py | 433 ++++++++++++++++++ .../charmhelpers/contrib/openstack/neutron.py | 108 +++++ .../contrib/openstack/templates/__init__.py | 2 + .../contrib/openstack/templates/ceph.conf | 11 + .../contrib/openstack/templates}/haproxy.cfg | 4 +- .../templates/openstack_https_frontend | 4 + .../templates/openstack_https_frontend.conf | 23 + .../contrib/openstack/templating.py | 280 +++++++++++ hooks/charmhelpers/contrib/openstack/utils.py | 361 +++++++++++++++ hooks/charmhelpers/core/__init__.py | 0 hooks/charmhelpers/core/hookenv.py | 340 ++++++++++++++ hooks/charmhelpers/core/host.py | 241 ++++++++++ hooks/charmhelpers/fetch/__init__.py | 209 +++++++++ hooks/charmhelpers/fetch/archiveurl.py | 48 ++ hooks/charmhelpers/fetch/bzrurl.py | 49 ++ hooks/lib/apache_utils.py | 207 --------- hooks/lib/haproxy_utils.py | 52 --- hooks/lib/openstack_common.py | 233 ---------- hooks/lib/utils.py | 332 -------------- hooks/start | 1 + hooks/stop | 1 + hooks/swift_context.py | 223 +++++++++ hooks/swift_hooks.py | 319 +++++++------ hooks/swift_utils.py | 363 +++++---------- revision | 2 +- templates/folsom/memcached.conf | 1 - templates/folsom/proxy-server.conf | 1 - templates/folsom/swift-rings | 1 - templates/folsom/swift.conf | 1 - templates/grizzly/memcached.conf | 1 - templates/grizzly/proxy-server.conf | 1 - templates/grizzly/swift-rings | 1 - templates/grizzly/swift.conf | 1 - templates/havana/memcached.conf | 1 - templates/havana/swift-rings | 1 - templates/havana/swift.conf | 1 - templates/{essex => }/memcached.conf | 0 templates/{essex => }/swift-rings | 0 templates/swift-rings.conf | 1 + templates/{essex => }/swift.conf | 0 48 files changed, 2792 insertions(+), 1234 deletions(-) create mode 100644 Makefile create mode 100644 charm-helpers.yaml rename hooks/{lib => charmhelpers}/__init__.py (100%) create mode 100644 hooks/charmhelpers/contrib/__init__.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/__init__.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/apache.py rename hooks/{lib/cluster_utils.py => charmhelpers/contrib/hahelpers/cluster.py} (55%) create mode 100644 hooks/charmhelpers/contrib/openstack/__init__.py create mode 100644 hooks/charmhelpers/contrib/openstack/context.py create mode 100644 hooks/charmhelpers/contrib/openstack/neutron.py create mode 100644 hooks/charmhelpers/contrib/openstack/templates/__init__.py create mode 100644 hooks/charmhelpers/contrib/openstack/templates/ceph.conf rename {templates => hooks/charmhelpers/contrib/openstack/templates}/haproxy.cfg (94%) rename templates/apache2_site.tmpl => hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (86%) create mode 100644 hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf create mode 100644 hooks/charmhelpers/contrib/openstack/templating.py create mode 100644 hooks/charmhelpers/contrib/openstack/utils.py create mode 100644 hooks/charmhelpers/core/__init__.py create mode 100644 hooks/charmhelpers/core/hookenv.py create mode 100644 hooks/charmhelpers/core/host.py create mode 100644 hooks/charmhelpers/fetch/__init__.py create mode 100644 hooks/charmhelpers/fetch/archiveurl.py create mode 100644 hooks/charmhelpers/fetch/bzrurl.py delete mode 100644 hooks/lib/apache_utils.py delete mode 100644 hooks/lib/haproxy_utils.py delete mode 100644 hooks/lib/openstack_common.py delete mode 100644 hooks/lib/utils.py create mode 120000 hooks/start create mode 120000 hooks/stop create mode 100644 hooks/swift_context.py delete mode 120000 templates/folsom/memcached.conf delete mode 120000 templates/folsom/proxy-server.conf delete mode 120000 templates/folsom/swift-rings delete mode 120000 templates/folsom/swift.conf delete mode 120000 templates/grizzly/memcached.conf delete mode 120000 templates/grizzly/swift-rings delete mode 120000 templates/grizzly/swift.conf delete mode 120000 templates/havana/memcached.conf delete mode 120000 templates/havana/swift-rings delete mode 120000 templates/havana/swift.conf rename templates/{essex => }/memcached.conf (100%) rename templates/{essex => }/swift-rings (100%) create mode 120000 templates/swift-rings.conf rename templates/{essex => }/swift.conf (100%) diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..2b96e5f --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +#!/usr/bin/make +PYTHON := /usr/bin/env python + +lint: + @flake8 --exclude hooks/charmhelpers --ignore=E125 hooks + #@flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests + @charm proof + +test: + @echo Starting tests... + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests + +sync: + @charm-helper-sync -c charm-helpers.yaml diff --git a/charm-helpers.yaml b/charm-helpers.yaml new file mode 100644 index 0000000..e5941b6 --- /dev/null +++ b/charm-helpers.yaml @@ -0,0 +1,9 @@ +branch: lp:~openstack-charmers/charm-helpers/to_upstream +destination: hooks/charmhelpers +include: + - core + - fetch + - contrib.openstack|inc=* + - contrib.hahelpers: + - apache + - cluster diff --git a/hooks/lib/__init__.py b/hooks/charmhelpers/__init__.py similarity index 100% rename from hooks/lib/__init__.py rename to hooks/charmhelpers/__init__.py diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 0000000..3208a85 --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,58 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess + +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(): + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get('ssl_cert', + rid=r_id, unit=unit) + if not key: + key = relation_get('ssl_key', + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = None + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not ca_cert: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def install_ca_cert(ca_cert): + if ca_cert: + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', + 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/hooks/lib/cluster_utils.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py similarity index 55% rename from hooks/lib/cluster_utils.py rename to hooks/charmhelpers/contrib/hahelpers/cluster.py index b7d00f8..074855f 100644 --- a/hooks/lib/cluster_utils.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -1,24 +1,31 @@ # # Copyright 2012 Canonical Ltd. # -# This file is sourced from lp:openstack-charm-helpers -# # Authors: # James Page # Adam Gandelman # -from lib.utils import ( - juju_log, - relation_ids, - relation_list, - relation_get, - get_unit_hostname, - config_get - ) import subprocess import os +from socket import gethostname as get_unit_hostname + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + ERROR, + unit_get, +) + + +class HAIncompleteConfig(Exception): + pass + def is_clustered(): for r_id in (relation_ids('ha') or []): @@ -35,7 +42,7 @@ def is_leader(resource): cmd = [ "crm", "resource", "show", resource - ] + ] try: status = subprocess.check_output(cmd) except subprocess.CalledProcessError: @@ -67,12 +74,12 @@ def oldest_peer(peers): def eligible_leader(resource): if is_clustered(): if not is_leader(resource): - juju_log('INFO', 'Deferring action to CRM leader.') + log('Deferring action to CRM leader.', level=INFO) return False else: peers = peer_units() if peers and not oldest_peer(peers): - juju_log('INFO', 'Deferring action to oldest service unit.') + log('Deferring action to oldest service unit.', level=INFO) return False return True @@ -90,10 +97,14 @@ def https(): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): - if (relation_get('https_keystone', rid=r_id, unit=unit) and - relation_get('ssl_cert', rid=r_id, unit=unit) and - relation_get('ssl_key', rid=r_id, unit=unit) and - relation_get('ca_cert', rid=r_id, unit=unit)): + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ssl_cert', rid=r_id, unit=unit), + relation_get('ssl_key', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): return True return False @@ -128,3 +139,45 @@ def determine_haproxy_port(public_port): if https(): i += 1 return public_port - (i * 10) + + +def get_hacluster_config(): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr + + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] + conf = {} + for setting in settings: + conf[setting] = config_get(setting) + missing = [] + [missing.append(s) for s, v in conf.iteritems() if v is None] + if missing: + log('Insufficient config data to configure hacluster.', level=ERROR) + raise HAIncompleteConfig + return conf + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py new file mode 100644 index 0000000..92924e3 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,433 @@ +import os + +from base64 import b64decode + +from subprocess import ( + check_call +) + + +from charmhelpers.fetch import ( + apt_install, + filter_installed_packages, +) + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_get, + relation_ids, + related_units, + unit_get, + unit_private_ip, + WARNING, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + determine_api_port, + determine_haproxy_port, + https, + is_clustered, + peer_units, +) + +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, +) + +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, +) + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' + + +class OSContextError(Exception): + pass + + +def ensure_packages(packages): + '''Install but do not upgrade required plugin packages''' + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + +def context_complete(ctxt): + _missing = [] + for k, v in ctxt.iteritems(): + if v is None or v == '': + _missing.append(k) + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level='INFO') + return False + return True + + +class OSContextGenerator(object): + interfaces = [] + + def __call__(self): + raise NotImplementedError + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __init__(self, database=None, user=None, relation_prefix=None): + ''' + Allows inspecting relation for settings prefixed with relation_prefix. + This is useful for parsing access for multiple databases returned via + the shared-db interface (eg, nova_password, quantum_password) + ''' + self.relation_prefix = relation_prefix + self.database = database + self.user = user + + def __call__(self): + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: + log('Could not generate shared_db context. ' + 'Missing required charm config options. ' + '(database name and user)') + raise OSContextError + ctxt = {} + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + + for rid in relation_ids('shared-db'): + for unit in related_units(rid): + passwd = relation_get(password_setting, rid=rid, unit=unit) + ctxt = { + 'database_host': relation_get('db_host', rid=rid, + unit=unit), + 'database': self.database, + 'database_user': self.user, + 'database_password': passwd, + } + if context_complete(ctxt): + return ctxt + return {} + + +class IdentityServiceContext(OSContextGenerator): + interfaces = ['identity-service'] + + def __call__(self): + log('Generating template context for identity-service') + ctxt = {} + + for rid in relation_ids('identity-service'): + for unit in related_units(rid): + ctxt = { + 'service_port': relation_get('service_port', rid=rid, + unit=unit), + 'service_host': relation_get('service_host', rid=rid, + unit=unit), + 'auth_host': relation_get('auth_host', rid=rid, unit=unit), + 'auth_port': relation_get('auth_port', rid=rid, unit=unit), + 'admin_tenant_name': relation_get('service_tenant', + rid=rid, unit=unit), + 'admin_user': relation_get('service_username', rid=rid, + unit=unit), + 'admin_password': relation_get('service_password', rid=rid, + unit=unit), + # XXX: Hard-coded http. + 'service_protocol': 'http', + 'auth_protocol': 'http', + } + if context_complete(ctxt): + return ctxt + return {} + + +class AMQPContext(OSContextGenerator): + interfaces = ['amqp'] + + def __call__(self): + log('Generating template context for amqp') + conf = config() + try: + username = conf['rabbit-user'] + vhost = conf['rabbit-vhost'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + + ctxt = {} + for rid in relation_ids('amqp'): + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + ctxt['clustered'] = True + ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, + unit=unit) + else: + ctxt['rabbitmq_host'] = relation_get('private-address', + rid=rid, unit=unit) + ctxt.update({ + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + }) + if context_complete(ctxt): + # Sufficient information found = break out! + break + # Used for active/active rabbitmq >= grizzly + ctxt['rabbitmq_hosts'] = [] + for unit in related_units(rid): + ctxt['rabbitmq_hosts'].append(relation_get('private-address', + rid=rid, unit=unit)) + if not context_complete(ctxt): + return {} + else: + return ctxt + + +class CephContext(OSContextGenerator): + interfaces = ['ceph'] + + def __call__(self): + '''This generates context for /etc/ceph/ceph.conf templates''' + if not relation_ids('ceph'): + return {} + log('Generating template context for ceph') + mon_hosts = [] + auth = None + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + mon_hosts.append(relation_get('private-address', rid=rid, + unit=unit)) + auth = relation_get('auth', rid=rid, unit=unit) + key = relation_get('key', rid=rid, unit=unit) + + ctxt = { + 'mon_hosts': ' '.join(mon_hosts), + 'auth': auth, + 'key': key, + } + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + if not context_complete(ctxt): + return {} + + ensure_packages(['ceph-common']) + + return ctxt + + +class HAProxyContext(OSContextGenerator): + interfaces = ['cluster'] + + def __call__(self): + ''' + Builds half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + ''' + if not relation_ids('cluster'): + return {} + + cluster_hosts = {} + l_unit = local_unit().replace('/', '-') + cluster_hosts[l_unit] = unit_get('private-address') + + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + addr = relation_get('private-address', rid=rid, unit=unit) + cluster_hosts[_unit] = addr + + ctxt = { + 'units': cluster_hosts, + } + if len(cluster_hosts.keys()) > 1: + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.') + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + return ctxt + log('HAProxy context is incomplete, this unit has no peers.') + return {} + + +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + ''' + Obtains the glance API server from the image-service relation. Useful + in nova and cinder (currently). + ''' + log('Generating template context for image-service.') + rids = relation_ids('image-service') + if not rids: + return {} + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + log('ImageService context is incomplete. ' + 'Missing required relation data.') + return {} + + +class ApacheSSLContext(OSContextGenerator): + """ + Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like: + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self): + if not os.path.isdir('/etc/apache2/ssl'): + os.mkdir('/etc/apache2/ssl') + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + if not os.path.isdir(ssl_dir): + os.mkdir(ssl_dir) + cert, key = get_cert() + with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: + cert_out.write(b64decode(cert)) + with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: + key_out.write(b64decode(key)) + ca_cert = get_ca_cert() + if ca_cert: + with open(CA_CERT_PATH, 'w') as ca_out: + ca_out.write(b64decode(ca_cert)) + check_call(['update-ca-certificates']) + + def __call__(self): + if isinstance(self.external_ports, basestring): + self.external_ports = [self.external_ports] + if (not self.external_ports or not https()): + return {} + + self.configure_cert() + self.enable_modules() + + ctxt = { + 'namespace': self.service_namespace, + 'private_address': unit_get('private-address'), + 'endpoints': [] + } + for ext_port in self.external_ports: + if peer_units() or is_clustered(): + int_port = determine_haproxy_port(ext_port) + else: + int_port = determine_api_port(ext_port) + portmap = (int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + return ctxt + + +class NeutronContext(object): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute( + self.plugin, 'packages', self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + ensure_packages(self.packages) + + def _save_flag_file(self): + if self.network_manager == 'quantum': + _file = '/etc/nova/quantum_plugin.conf' + else: + _file = '/etc/nova/neutron_plugin.conf' + with open(_file, 'wb') as out: + out.write(self.plugin + '\n') + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + + ovs_ctxt = { + 'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + } + + return ovs_ctxt + + def __call__(self): + self._ensure_packages() + + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = {'network_manager': self.network_manager} + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + + self._save_flag_file() + return ctxt + + +class OSConfigFlagContext(OSContextGenerator): + ''' + Responsible adding user-defined config-flags in charm config to a + to a template context. + ''' + def __call__(self): + config_flags = config('config-flags') + if not config_flags or config_flags in ['None', '']: + return {} + config_flags = config_flags.split(',') + flags = {} + for flag in config_flags: + if '=' not in flag: + log('Improperly formatted config-flag, expected k=v ' + 'got %s' % flag, level=WARNING) + continue + k, v = flag.split('=') + flags[k.strip()] = v + ctxt = {'user_config_flags': flags} + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 0000000..37b5a7b --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,108 @@ +# Various utilies for dealing with Neutron and the renaming from Quantum. + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import os_release + + +# legacy +def quantum_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron')], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': ['quantum-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'services': [], + 'packages': ['quantum-plugin-nicira'], + } + } + + +def neutron_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron')], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': ['neutron-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'services': [], + 'packages': ['neutron-plugin-nicira'], + } + } + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log('Error: Network manager does not support plugins.') + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 0000000..0b49ad2 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,2 @@ +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf new file mode 100644 index 0000000..49d07c8 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -0,0 +1,11 @@ +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +{% if auth -%} +[global] + auth_supported = {{ auth }} + keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} +{% endif -%} diff --git a/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg similarity index 94% rename from templates/haproxy.cfg rename to hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 7404000..a1694e4 100644 --- a/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -25,6 +25,7 @@ listen stats :8888 stats uri / stats auth admin:password +{% if units -%} {% for service, ports in service_ports.iteritems() -%} listen {{ service }} 0.0.0.0:{{ ports[0] }} balance roundrobin @@ -32,4 +33,5 @@ listen {{ service }} 0.0.0.0:{{ ports[0] }} {% for unit, address in units.iteritems() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} -{% endfor %} +{% endfor -%} +{% endif -%} diff --git a/templates/apache2_site.tmpl b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend similarity index 86% rename from templates/apache2_site.tmpl rename to hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index a605f07..e02dc75 100644 --- a/templates/apache2_site.tmpl +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -1,3 +1,5 @@ +{% if endpoints -%} +{% for ext, int in endpoints -%} Listen {{ ext }} NameVirtualHost *:{{ ext }} @@ -17,3 +19,5 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf new file mode 100644 index 0000000..e02dc75 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -0,0 +1,23 @@ +{% if endpoints -%} +{% for ext, int in endpoints -%} +Listen {{ ext }} +NameVirtualHost *:{{ ext }} + + ServerName {{ private_address }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 0000000..4595778 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,280 @@ +import os + +from charmhelpers.fetch import apt_install + +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO +) + +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + # python-jinja2 may not be installed yet, or we're running unittests. + FileSystemLoader = ChoiceLoader = Environment = exceptions = None + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg: + hooks/charmhelpers/contrib/openstack/templates. + + :param templates_dir: str: Base template directory containing release + sub-directories. + :param os_release : str: OpenStack release codename to construct template + loader. + + :returns : jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in OPENSTACK_CODENAMES.itervalues()] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=INFO) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + def __init__(self, config_file, contexts): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have atisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage: + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + Details: + + OpenStack Releases and template loading + --------------------------------------- + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + + For the example above, '/tmp/templates' contains the following structure: + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + Context generators + --------------------------------------- + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + apt_install('python-jinja2') + + def register(self, config_file, contexts): + """ + Register a config file with a list of context generators to be called + during rendering. + """ + self.templates[config_file] = OSConfigTemplate(config_file=config_file, + contexts=contexts) + log('Registered config file: %s' % config_file, level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from %s' % template.filename, level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + ctxt = self.templates[config_file].context() + + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking for it + # using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from %s by %s or %s.' % + (self.templates_dir, os.path.basename(config_file), _tmpl), + level=ERROR) + raise e + + log('Rendering from template: %s' % _tmpl, level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in self.templates.iterkeys()] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in self.templates.itervalues()] + return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 0000000..39f627d --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,361 @@ +#!/usr/bin/python + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict + +import apt_pkg as apt +import subprocess +import os +import socket +import sys + +from charmhelpers.core.hookenv import ( + config, + log as juju_log, + charm_dir, +) + +from charmhelpers.core.host import ( + lsb_release, +) + +from charmhelpers.fetch import ( + apt_install, +) + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), +]) + +# The ugly duckling +SWIFT_CODENAMES = { + '1.4.3': 'diablo', + '1.4.8': 'essex', + '1.7.4': 'folsom', + '1.7.6': 'grizzly', + '1.7.7': 'grizzly', + '1.8.0': 'grizzly', + '1.9.0': 'havana', + '1.9.1': 'havana', +} + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src == 'distro': + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename): + '''Determine OpenStack version number from codename.''' + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + apt.init() + cache = apt.Cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + try: + if 'swift' in pkg.name: + vers = vers[:5] + return SWIFT_CODENAMES[vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in vers_map.iteritems(): + if cname == codename: + return version + #e = "Could not determine OpenStack version for package: %s" % pkg + #error_out(e) + + +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + +def import_key(keyid): + cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in env_vars.iteritems() if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, basestring): + rtype = 'A' + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + return ns_query(hostname) + + +def get_hostname(address): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if not is_ip(address): + return address + + try: + import dns.reversename + except ImportError: + apt_install('python-dnspython') + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + + # strip trailing . + if result.endswith('.'): + return result[:-1] + return result diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 0000000..2b06706 --- /dev/null +++ b/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,340 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +def service_name(): + "The name service group this unit belongs to" + return local_unit().split('/')[0] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return json.loads(subprocess.check_output(config_cmd_line)) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py new file mode 100644 index 0000000..1a63bf8 --- /dev/null +++ b/hooks/charmhelpers/core/host.py @@ -0,0 +1,241 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import pwd +import grp +import random +import string +import subprocess +import hashlib + +from collections import OrderedDict + +from hookenv import log + + +def service_start(service_name): + return service('start', service_name) + + +def service_stop(service_name): + return service('stop', service_name) + + +def service_restart(service_name): + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + service_result = service('reload', service_name) + if not service_result and restart_on_failure: + service_result = service('restart', service_name) + return service_result + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def service_running(service): + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, content, owner='root', group='root', perms=0444): + """Create or overwrite a file with the contents of a string""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(OrderedDict.fromkeys(restarts)): + service('restart', service_name) + return wrapped_f + return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def pwgen(length=None): + '''Generate a random pasword.''' + if length is None: + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + random_chars = [ + random.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py new file mode 100644 index 0000000..b2f9646 --- /dev/null +++ b/hooks/charmhelpers/fetch/__init__.py @@ -0,0 +1,209 @@ +import importlib +from yaml import safe_load +from charmhelpers.core.host import ( + lsb_release +) +from urlparse import ( + urlparse, + urlunparse, +) +import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) +import apt_pkg + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages""" + cmd = ['apt-get', '-y', 'purge'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def add_source(source, key=None): + if ((source.startswith('ppa:') or + source.startswith('http:'))): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + if key: + subprocess.check_call(['apt-key', 'import', key]) + + +class SourceConfigError(Exception): + pass + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration + + Example config: + install_sources: + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load(config(sources_var)) + keys = safe_load(config(keys_var)) + if isinstance(sources, basestring) and isinstance(keys, basestring): + add_source(sources, keys) + else: + if not len(sources) == len(keys): + msg = 'Install sources and keys lists are different lengths' + raise SourceConfigError(msg) + for src_num in range(len(sources)): + add_source(sources[src_num], keys[src_num]) + if update: + apt_update(fatal=True) + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', +) + + +class UnhandledSource(Exception): + pass + + +def install_remote(source): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules + Options supported are submodule-specific""" + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + installed_to = None + for handler in handlers: + try: + installed_to = handler.install(source) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr(importlib.import_module(package), classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format(handler_name)) + return plugin_list diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py new file mode 100644 index 0000000..e35b8f1 --- /dev/null +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,48 @@ +import os +import urllib2 +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """Handler for archives via generic URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + # propogate all exceptions + # URLError, OSError, etc + response = urllib2.urlopen(source) + try: + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e + + def install(self, source): + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except urllib2.URLError as e: + raise UnhandledSource(e.reason) + except OSError as e: + raise UnhandledSource(e.strerror) + return extract(dld_file) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py new file mode 100644 index 0000000..c348b4b --- /dev/null +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,49 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from bzrlib.branch import Branch +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-bzrlib") + from bzrlib.branch import Branch + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp'): + return False + else: + return True + + def branch(self, source, dest): + url_parts = self.parse_url(source) + # If we use lp:branchname scheme we need to load plugins + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + if url_parts.scheme == "lp": + from bzrlib.plugin import load_plugins + load_plugins() + try: + remote_branch = Branch.open(source) + remote_branch.bzrdir.sprout(dest).open_branch() + except Exception as e: + raise e + + def install(self, source): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.branch(source, dest_dir) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir + diff --git a/hooks/lib/apache_utils.py b/hooks/lib/apache_utils.py deleted file mode 100644 index 38eb6c7..0000000 --- a/hooks/lib/apache_utils.py +++ /dev/null @@ -1,207 +0,0 @@ -# -# Copyright 2012 Canonical Ltd. -# -# Authors: -# James Page -# - -from lib.utils import ( - relation_ids, - relation_list, - relation_get, - render_template, - juju_log, - config_get, - install, - get_host_ip, - restart - ) -from lib.cluster_utils import https - -import os -import subprocess -from base64 import b64decode - -APACHE_SITE_DIR = "/etc/apache2/sites-available" -SITE_TEMPLATE = "apache2_site.tmpl" -RELOAD_CHECK = "To activate the new configuration" - - -def is_apache24(): - try: - version = subprocess.check_output(['a2query', '-v']) - return version.startswith('2.4') - except (subprocess.CalledProcessError, OSError): - # Catch OSError just in case apache2 not yet installed - return False - - -def get_cert(): - cert = config_get('ssl_cert') - key = config_get('ssl_key') - if not (cert and key): - juju_log('INFO', - "Inspecting identity-service relations for SSL certificate.") - cert = key = None - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - if not cert: - cert = relation_get('ssl_cert', - rid=r_id, unit=unit) - if not key: - key = relation_get('ssl_key', - rid=r_id, unit=unit) - return (cert, key) - - -def get_ca_cert(): - ca_cert = None - juju_log('INFO', - "Inspecting identity-service relations for CA SSL certificate.") - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - if not ca_cert: - ca_cert = relation_get('ca_cert', - rid=r_id, unit=unit) - return ca_cert - - -def install_ca_cert(ca_cert): - if ca_cert: - with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', - 'w') as crt: - crt.write(ca_cert) - subprocess.check_call(['update-ca-certificates', '--fresh']) - - -def enable_https(port_maps, namespace, cert, key, ca_cert=None): - ''' - For a given number of port mappings, configures apache2 - HTTPs local reverse proxying using certficates and keys provided in - either configuration data (preferred) or relation data. Assumes ports - are not in use (calling charm should ensure that). - - port_maps: dict: external to internal port mappings - namespace: str: name of charm - ''' - def _write_if_changed(path, new_content): - content = None - if os.path.exists(path): - with open(path, 'r') as f: - content = f.read().strip() - if content != new_content: - with open(path, 'w') as f: - f.write(new_content) - return True - else: - return False - - juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps)) - http_restart = False - - if cert: - cert = b64decode(cert) - if key: - key = b64decode(key) - if ca_cert: - ca_cert = b64decode(ca_cert) - - if not cert and not key: - juju_log('ERROR', - "Expected but could not find SSL certificate data, not " - "configuring HTTPS!") - return False - - install('apache2') - if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl', - 'proxy', 'proxy_http']): - http_restart = True - - ssl_dir = os.path.join('/etc/apache2/ssl', namespace) - if not os.path.exists(ssl_dir): - os.makedirs(ssl_dir) - - if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)): - http_restart = True - if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)): - http_restart = True - os.chmod(os.path.join(ssl_dir, 'key'), 0600) - - install_ca_cert(ca_cert) - - sites_dir = '/etc/apache2/sites-available' - for ext_port, int_port in port_maps.items(): - juju_log('INFO', - 'Creating apache2 reverse proxy vhost' - ' for {}:{}'.format(ext_port, - int_port)) - site = "{}_{}".format(namespace, ext_port) - if is_apache24(): - site = "{}.conf".format(site) - site_path = os.path.join(sites_dir, site) - with open(site_path, 'w') as fsite: - context = { - "ext": ext_port, - "int": int_port, - "namespace": namespace, - "private_address": get_host_ip() - } - fsite.write(render_template(SITE_TEMPLATE, - context)) - - if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]): - http_restart = True - - if http_restart: - restart('apache2') - - return True - - -def disable_https(port_maps, namespace): - ''' - Ensure HTTPS reverse proxying is disables for given port mappings - - port_maps: dict: of ext -> int port mappings - namespace: str: name of chamr - ''' - juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps)) - - if (not os.path.exists('/etc/apache2') or - not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))): - return - - http_restart = False - for ext_port in port_maps.keys(): - site_path = os.path.join(APACHE_SITE_DIR, - "{}_{}".format(namespace, - ext_port)) - if is_apache24(): - site_path = "{}.conf".format(site_path) - if os.path.exists(site_path): - juju_log('INFO', - "Disabling HTTPS reverse proxy" - " for {} {}.".format(namespace, - ext_port)) - if (RELOAD_CHECK in - subprocess.check_output(['a2dissite', - '{}_{}'.format(namespace, - ext_port)])): - http_restart = True - - if http_restart: - restart(['apache2']) - - -def setup_https(port_maps, namespace, cert, key, ca_cert=None): - ''' - Ensures HTTPS is either enabled or disabled for given port - mapping. - - port_maps: dict: of ext -> int port mappings - namespace: str: name of charm - ''' - if not https: - disable_https(port_maps, namespace) - else: - enable_https(port_maps, namespace, cert, key, ca_cert) diff --git a/hooks/lib/haproxy_utils.py b/hooks/lib/haproxy_utils.py deleted file mode 100644 index 3236628..0000000 --- a/hooks/lib/haproxy_utils.py +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright 2012 Canonical Ltd. -# -# Authors: -# James Page -# - -from lib.utils import ( - relation_ids, - relation_list, - relation_get, - unit_get, - reload, - render_template - ) -import os - -HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' -HAPROXY_DEFAULT = '/etc/default/haproxy' - - -def configure_haproxy(service_ports): - ''' - Configure HAProxy based on the current peers in the service - cluster using the provided port map: - - "swift": [ 8080, 8070 ] - - HAproxy will also be reloaded/started if required - - service_ports: dict: dict of lists of [ frontend, backend ] - ''' - cluster_hosts = {} - cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \ - unit_get('private-address') - for r_id in relation_ids('cluster'): - for unit in relation_list(r_id): - cluster_hosts[unit.replace('/', '-')] = \ - relation_get(attribute='private-address', - rid=r_id, - unit=unit) - context = { - 'units': cluster_hosts, - 'service_ports': service_ports - } - with open(HAPROXY_CONF, 'w') as f: - f.write(render_template(os.path.basename(HAPROXY_CONF), - context)) - with open(HAPROXY_DEFAULT, 'w') as f: - f.write('ENABLED=1') - - reload('haproxy') diff --git a/hooks/lib/openstack_common.py b/hooks/lib/openstack_common.py deleted file mode 100644 index 0a38a6d..0000000 --- a/hooks/lib/openstack_common.py +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/python - -# Common python helper functions used for OpenStack charms. - -import apt_pkg as apt -import subprocess -import os - -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' - -ubuntu_openstack_release = { - 'oneiric': 'diablo', - 'precise': 'essex', - 'quantal': 'folsom', - 'raring': 'grizzly', - 'saucy': 'havana', -} - - -openstack_codenames = { - '2011.2': 'diablo', - '2012.1': 'essex', - '2012.2': 'folsom', - '2013.1': 'grizzly', - '2013.2': 'havana', -} - -# The ugly duckling -swift_codenames = { - '1.4.3': 'diablo', - '1.4.8': 'essex', - '1.7.4': 'folsom', - '1.7.6': 'grizzly', - '1.7.7': 'grizzly', - '1.8.0': 'grizzly', - '1.9.1': 'havana', -} - - -def juju_log(msg): - subprocess.check_call(['juju-log', msg]) - - -def error_out(msg): - juju_log("FATAL ERROR: %s" % msg) - exit(1) - - -def lsb_release(): - '''Return /etc/lsb-release in a dict''' - lsb = open('/etc/lsb-release', 'r') - d = {} - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def get_os_codename_install_source(src): - '''Derive OpenStack release codename from a given installation source.''' - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - - rel = '' - if src == 'distro': - try: - rel = ubuntu_openstack_release[ubuntu_rel] - except KeyError: - e = 'Code not derive openstack release for '\ - 'this Ubuntu release: %s' % rel - error_out(e) - return rel - - if src.startswith('cloud:'): - ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] - return ca_rel - - # Best guess match based on deb string provided - if src.startswith('deb') or src.startswith('ppa'): - for k, v in openstack_codenames.iteritems(): - if v in src: - return v - - -def get_os_codename_version(vers): - '''Determine OpenStack codename from version number.''' - try: - return openstack_codenames[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_codename(codename): - '''Determine OpenStack version number from codename.''' - for k, v in openstack_codenames.iteritems(): - if v == codename: - return k - e = 'Code not derive OpenStack version for '\ - 'codename: %s' % codename - error_out(e) - - -def get_os_codename_package(pkg): - '''Derive OpenStack release codename from an installed package.''' - apt.init() - cache = apt.Cache() - try: - pkg = cache[pkg] - except: - e = 'Could not determine version of installed package: %s' % pkg - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - - try: - if 'swift' in pkg.name: - vers = vers[:5] - return swift_codenames[vers] - else: - vers = vers[:6] - return openstack_codenames[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_package(pkg): - '''Derive OpenStack version number from an installed package.''' - codename = get_os_codename_package(pkg) - - if 'swift' in pkg: - vers_map = swift_codenames - else: - vers_map = openstack_codenames - - for version, cname in vers_map.iteritems(): - if cname == codename: - return version - e = "Could not determine OpenStack version for package: %s" % pkg - error_out(e) - - -def configure_installation_source(rel): - '''Configure apt installation source.''' - - def _import_key(keyid): - cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ - "--recv-keys %s" % keyid - try: - subprocess.check_call(cmd.split(' ')) - except subprocess.CalledProcessError: - error_out("Error importing repo key %s" % keyid) - - if rel == 'distro': - return - elif rel[:4] == "ppa:": - src = rel - subprocess.check_call(["add-apt-repository", "-y", src]) - elif rel[:3] == "deb": - l = len(rel.split('|')) - if l == 2: - src, key = rel.split('|') - juju_log("Importing PPA key from keyserver for %s" % src) - _import_key(key) - elif l == 1: - src = rel - else: - error_out("Invalid openstack-release: %s" % rel) - - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(src) - elif rel[:6] == 'cloud:': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = rel.split(':')[1] - u_rel = rel.split('-')[0] - ca_rel = rel.split('-')[1] - - if u_rel != ubuntu_rel: - e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ - 'version (%s)' % (ca_rel, ubuntu_rel) - error_out(e) - - if 'staging' in ca_rel: - # staging is just a regular PPA. - os_rel = ca_rel.split('/')[0] - ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel - cmd = 'add-apt-repository -y %s' % ppa - subprocess.check_call(cmd.split(' ')) - return - - # map charm config options to actual archive pockets. - pockets = { - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly' - } - - try: - pocket = pockets[ca_rel] - except KeyError: - e = 'Invalid Cloud Archive release specified: %s' % rel - error_out(e) - - src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) - cmd = ['apt-get', '-y', 'install', 'ubuntu-cloud-keyring'] - subprocess.check_call(cmd) - - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: - f.write(src) - else: - error_out("Invalid openstack-release specified: %s" % rel) - - -def save_script_rc(script_path="scripts/scriptrc", **env_vars): - """ - Write an rc file in the charm-delivered directory containing - exported environment variables provided by env_vars. Any charm scripts run - outside the juju hook environment can source this scriptrc to obtain - updated config information necessary to perform health checks or - service changes. - """ - charm_dir = os.getenv('CHARM_DIR') - juju_rc_path = "%s/%s" % (charm_dir, script_path) - with open(juju_rc_path, 'wb') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in env_vars.iteritems() if u != "script_path"] diff --git a/hooks/lib/utils.py b/hooks/lib/utils.py deleted file mode 100644 index 1033a58..0000000 --- a/hooks/lib/utils.py +++ /dev/null @@ -1,332 +0,0 @@ -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Paul Collins -# Adam Gandelman -# - -import json -import os -import subprocess -import socket -import sys - - -def do_hooks(hooks): - hook = os.path.basename(sys.argv[0]) - - try: - hook_func = hooks[hook] - except KeyError: - juju_log('INFO', - "This charm doesn't know how to handle '{}'.".format(hook)) - else: - hook_func() - - -def install(*pkgs): - cmd = [ - 'apt-get', - '-y', - 'install' - ] - for pkg in pkgs: - cmd.append(pkg) - subprocess.check_call(cmd) - -TEMPLATES_DIR = 'templates' - -try: - import jinja2 -except ImportError: - install('python-jinja2') - import jinja2 - -try: - import dns.resolver -except ImportError: - install('python-dnspython') - import dns.resolver - - -def render_template(template_name, context, template_dir=TEMPLATES_DIR): - templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir) - ) - template = templates.get_template(template_name) - return template.render(context) - -CLOUD_ARCHIVE = \ -""" # Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" - -CLOUD_ARCHIVE_POCKETS = { - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly' - } - - -def configure_source(): - source = str(config_get('openstack-origin')) - if not source: - return - if source.startswith('ppa:'): - cmd = [ - 'add-apt-repository', - source - ] - subprocess.check_call(cmd) - if source.startswith('cloud:'): - # CA values should be formatted as cloud:ubuntu-openstack/pocket, eg: - # cloud:precise-folsom/updates or cloud:precise-folsom/proposed - install('ubuntu-cloud-keyring') - pocket = source.split(':')[1] - pocket = pocket.split('-')[1] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket])) - if source.startswith('deb'): - l = len(source.split('|')) - if l == 2: - (apt_line, key) = source.split('|') - cmd = [ - 'apt-key', - 'adv', '--keyserver keyserver.ubuntu.com', - '--recv-keys', key - ] - subprocess.check_call(cmd) - elif l == 1: - apt_line = source - - with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: - apt.write(apt_line + "\n") - cmd = [ - 'apt-get', - 'update' - ] - subprocess.check_call(cmd) - -# Protocols -TCP = 'TCP' -UDP = 'UDP' - - -def expose(port, protocol='TCP'): - cmd = [ - 'open-port', - '{}/{}'.format(port, protocol) - ] - subprocess.check_call(cmd) - - -def juju_log(severity, message): - cmd = [ - 'juju-log', - '--log-level', severity, - message - ] - subprocess.check_call(cmd) - - -cache = {} - - -def cached(func): - def wrapper(*args, **kwargs): - global cache - key = str((func, args, kwargs)) - try: - return cache[key] - except KeyError: - res = func(*args, **kwargs) - cache[key] = res - return res - return wrapper - - -@cached -def relation_ids(relation): - cmd = [ - 'relation-ids', - relation - ] - result = str(subprocess.check_output(cmd)).split() - if result == "": - return None - else: - return result - - -@cached -def relation_list(rid): - cmd = [ - 'relation-list', - '-r', rid, - ] - result = str(subprocess.check_output(cmd)).split() - if result == "": - return None - else: - return result - - -@cached -def relation_get(attribute, unit=None, rid=None): - cmd = [ - 'relation-get', - ] - if rid: - cmd.append('-r') - cmd.append(rid) - cmd.append(attribute) - if unit: - cmd.append(unit) - value = subprocess.check_output(cmd).strip() # IGNORE:E1103 - if value == "": - return None - else: - return value - - -@cached -def relation_get_dict(relation_id=None, remote_unit=None): - """Obtain all relation data as dict by way of JSON""" - cmd = [ - 'relation-get', '--format=json' - ] - if relation_id: - cmd.append('-r') - cmd.append(relation_id) - if remote_unit: - remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None) - os.environ['JUJU_REMOTE_UNIT'] = remote_unit - j = subprocess.check_output(cmd) - if remote_unit and remote_unit_orig: - os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig - d = json.loads(j) - settings = {} - # convert unicode to strings - for k, v in d.iteritems(): - settings[str(k)] = str(v) - return settings - - -def relation_set(**kwargs): - cmd = [ - 'relation-set' - ] - args = [] - for k, v in kwargs.items(): - if k == 'rid': - if v: - cmd.append('-r') - cmd.append(v) - else: - args.append('{}={}'.format(k, v)) - cmd += args - subprocess.check_call(cmd) - - -@cached -def unit_get(attribute): - cmd = [ - 'unit-get', - attribute - ] - value = subprocess.check_output(cmd).strip() # IGNORE:E1103 - if value == "": - return None - else: - return value - - -@cached -def config_get(attribute): - cmd = [ - 'config-get', - '--format', - 'json', - ] - out = subprocess.check_output(cmd).strip() # IGNORE:E1103 - cfg = json.loads(out) - - try: - return cfg[attribute] - except KeyError: - return None - - -@cached -def get_unit_hostname(): - return socket.gethostname() - - -@cached -def get_host_ip(hostname=unit_get('private-address')): - try: - # Test to see if already an IPv4 address - socket.inet_aton(hostname) - return hostname - except socket.error: - answers = dns.resolver.query(hostname, 'A') - if answers: - return answers[0].address - return None - - -def _svc_control(service, action): - subprocess.check_call(['service', service, action]) - - -def restart(*services): - for service in services: - _svc_control(service, 'restart') - - -def stop(*services): - for service in services: - _svc_control(service, 'stop') - - -def start(*services): - for service in services: - _svc_control(service, 'start') - - -def reload(*services): - for service in services: - try: - _svc_control(service, 'reload') - except subprocess.CalledProcessError: - # Reload failed - either service does not support reload - # or it was not running - restart will fixup most things - _svc_control(service, 'restart') - - -def running(service): - try: - output = subprocess.check_output(['service', service, 'status']) - except subprocess.CalledProcessError: - return False - else: - if ("start/running" in output or - "is running" in output): - return True - else: - return False - - -def is_relation_made(relation, key='private-address'): - for r_id in (relation_ids(relation) or []): - for unit in (relation_list(r_id) or []): - if relation_get(key, rid=r_id, unit=unit): - return True - return False diff --git a/hooks/start b/hooks/start new file mode 120000 index 0000000..8623fba --- /dev/null +++ b/hooks/start @@ -0,0 +1 @@ +swift_hooks.py \ No newline at end of file diff --git a/hooks/stop b/hooks/stop new file mode 120000 index 0000000..8623fba --- /dev/null +++ b/hooks/stop @@ -0,0 +1 @@ +swift_hooks.py \ No newline at end of file diff --git a/hooks/swift_context.py b/hooks/swift_context.py new file mode 100644 index 0000000..196af46 --- /dev/null +++ b/hooks/swift_context.py @@ -0,0 +1,223 @@ +from charmhelpers.core.hookenv import ( + config, + log, + relation_ids, + related_units, + relation_get, + unit_get +) + +from charmhelpers.contrib.openstack.context import ( + OSContextGenerator, + ApacheSSLContext as SSLContext, + context_complete, + CA_CERT_PATH +) + +from charmhelpers.contrib.hahelpers.cluster import ( + determine_api_port, + determine_haproxy_port, +) + +from charmhelpers.contrib.openstack.utils import get_host_ip +import subprocess +import os + + +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, +) + +from base64 import b64decode, b64encode + + +class HAProxyContext(OSContextGenerator): + interfaces = ['cluster'] + + def __call__(self): + ''' + Extends the main charmhelpers HAProxyContext with a port mapping + specific to this charm. + Also used to extend cinder.conf context with correct api_listening_port + ''' + haproxy_port = determine_haproxy_port(config('bind-port')) + api_port = determine_api_port(config('bind-port')) + + ctxt = { + 'service_ports': {'swift_api': [haproxy_port, api_port]}, + } + return ctxt + + +WWW_DIR = '/var/www/swift-rings' + + +def generate_cert(): + ''' + Generates a self signed certificate and key using the + provided charm configuration data. + + returns: tuple of (cert, key) + ''' + CERT = '/etc/swift/ssl.cert' + KEY = '/etc/swift/ssl.key' + if not os.path.exists(CERT) and not os.path.exists(KEY): + subj = '/C=%s/ST=%s/L=%s/CN=%s' %\ + (config('country'), config('state'), + config('locale'), config('common-name')) + cmd = ['openssl', 'req', '-new', '-x509', '-nodes', + '-out', CERT, '-keyout', KEY, + '-subj', subj] + subprocess.check_call(cmd) + os.chmod(KEY, 0600) + # Slurp as base64 encoded - makes handling easier up the stack + with open(CERT, 'r') as cfile: + ssl_cert = b64encode(cfile.read()) + with open(KEY, 'r') as kfile: + ssl_key = b64encode(kfile.read()) + return (ssl_cert, ssl_key) + + +class ApacheSSLContext(SSLContext): + interfaces = ['https'] + external_ports = [config('bind-port')] + service_namespace = 'swift' + + def configure_cert(self): + if not os.path.isdir('/etc/apache2/ssl'): + os.mkdir('/etc/apache2/ssl') + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + if not os.path.isdir(ssl_dir): + os.mkdir(ssl_dir) + cert, key = get_cert() + # Swift specific - generate a cert by default if not using + # a) user supplied cert or b) keystone signed cert + if None in [cert, key]: + cert, key = generate_cert() + with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: + cert_out.write(b64decode(cert)) + with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: + key_out.write(b64decode(key)) + ca_cert = get_ca_cert() + if ca_cert: + with open(CA_CERT_PATH, 'w') as ca_out: + ca_out.write(b64decode(ca_cert)) + subprocess.check_call(['update-ca-certificates']) + + def __call__(self): + return super(ApacheSSLContext, self).__call__() + + +class SwiftRingContext(OSContextGenerator): + def __call__(self): + allowed_hosts = [] + for relid in relation_ids('swift-storage'): + for unit in related_units(relid): + host = relation_get('private-address', unit, relid) + allowed_hosts.append(get_host_ip(host)) + + ctxt = { + 'www_dir': WWW_DIR, + 'allowed_hosts': allowed_hosts + } + return ctxt + + +class SwiftIdentityContext(OSContextGenerator): + interfaces = ['identity-service'] + + def __call__(self): + bind_port = config('bind-port') + workers = config('workers') + if workers == '0': + import multiprocessing + workers = multiprocessing.cpu_count() + ctxt = { + 'proxy_ip': get_host_ip(unit_get('private-address')), + 'bind_port': determine_api_port(bind_port), + 'workers': workers, + 'operator_roles': config('operator-roles'), + 'delay_auth_decision': config('delay-auth-decision') + } + + ctxt['ssl'] = False + + auth_type = config('auth-type') + auth_host = config('keystone-auth-host') + admin_user = config('keystone-admin-user') + admin_password = config('keystone-admin-user') + if (auth_type == 'keystone' and auth_host + and admin_user and admin_password): + log('Using user-specified Keystone configuration.') + ks_auth = { + 'auth_type': 'keystone', + 'auth_protocol': config('keystone-auth-protocol'), + 'keystone_host': auth_host, + 'auth_port': config('keystone-auth-port'), + 'service_user': admin_user, + 'service_password': admin_password, + 'service_tenant': config('keystone-admin-tenant-name') + } + ctxt.update(ks_auth) + + for relid in relation_ids('identity-service'): + log('Using Keystone configuration from identity-service.') + for unit in related_units(relid): + ks_auth = { + 'auth_type': 'keystone', + 'auth_protocol': 'http', # TODO: http hardcode + 'keystone_host': relation_get('auth_host', + unit, relid), + 'auth_port': relation_get('auth_port', + unit, relid), + 'service_user': relation_get('service_username', + unit, relid), + 'service_password': relation_get('service_password', + unit, relid), + 'service_tenant': relation_get('service_tenant', + unit, relid), + 'service_port': relation_get('service_port', + unit, relid), + 'admin_token': relation_get('admin_token', + unit, relid), + } + if context_complete(ks_auth): + ctxt.update(ks_auth) + return ctxt + + +class MemcachedContext(OSContextGenerator): + def __call__(self): + ctxt = { + 'proxy_ip': get_host_ip(unit_get('private-address')) + } + return ctxt + +SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf' + + +def get_swift_hash(): + if os.path.isfile(SWIFT_HASH_FILE): + with open(SWIFT_HASH_FILE, 'r') as hashfile: + swift_hash = hashfile.read().strip() + elif config('swift-hash'): + swift_hash = config('swift-hash') + with open(SWIFT_HASH_FILE, 'w') as hashfile: + hashfile.write(swift_hash) + else: + cmd = ['od', '-t', 'x8', '-N', '8', '-A', 'n'] + rand = open('/dev/random', 'r') + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=rand) + swift_hash = p.communicate()[0].strip() + with open(SWIFT_HASH_FILE, 'w') as hashfile: + hashfile.write(swift_hash) + return swift_hash + + +class SwiftHashContext(OSContextGenerator): + def __call__(self): + ctxt = { + 'swift_hash': get_swift_hash() + } + return ctxt diff --git a/hooks/swift_hooks.py b/hooks/swift_hooks.py index 543e757..add48a9 100755 --- a/hooks/swift_hooks.py +++ b/hooks/swift_hooks.py @@ -4,254 +4,289 @@ import os import sys import shutil import uuid -from subprocess import check_call +import subprocess -import lib.openstack_common as openstack -import lib.utils as utils -import lib.cluster_utils as cluster -import swift_utils as swift +import charmhelpers.contrib.openstack.utils as openstack +import charmhelpers.contrib.hahelpers.cluster as cluster +from swift_utils import ( + register_configs, + restart_map, + determine_packages, + ensure_swift_dir, + SWIFT_RINGS, WWW_DIR, + initialize_ring, + swift_user, + SWIFT_HA_RES, + balance_ring, + SWIFT_CONF_DIR, + get_zone, + exists_in_ring, + add_to_ring, + should_balance, + do_openstack_upgrade +) +from swift_context import get_swift_hash + +from charmhelpers.core.hookenv import ( + config, + unit_get, + relation_set, + relation_ids, + relation_get, + log, ERROR, + Hooks, UnregisteredHookError, + open_port +) +from charmhelpers.core.host import ( + service_restart, + restart_on_change +) +from charmhelpers.fetch import ( + apt_install, + apt_update +) extra_pkgs = [ "haproxy", "python-jinja2" - ] +] +hooks = Hooks() + +CONFIGS = register_configs() + + +@hooks.hook('install') def install(): - src = utils.config_get('openstack-origin') + src = config('openstack-origin') if src != 'distro': openstack.configure_installation_source(src) - check_call(['apt-get', 'update']) + apt_update(fatal=True) rel = openstack.get_os_codename_install_source(src) - pkgs = swift.determine_packages(rel) - utils.install(*pkgs) - utils.install(*extra_pkgs) - - swift.ensure_swift_dir() - - # initialize swift configs. - # swift.conf hash - ctxt = { - 'swift_hash': swift.get_swift_hash() - } - with open(swift.SWIFT_CONF, 'w') as conf: - conf.write(swift.render_config(swift.SWIFT_CONF, ctxt)) - - # swift-proxy.conf - swift.write_proxy_config() - - # memcached.conf - ctxt = {'proxy_ip': utils.get_host_ip()} - with open(swift.MEMCACHED_CONF, 'w') as conf: - conf.write(swift.render_config(swift.MEMCACHED_CONF, ctxt)) - check_call(['service', 'memcached', 'restart']) + pkgs = determine_packages(rel) + apt_install(pkgs, fatal=True) + apt_install(extra_pkgs, fatal=True) + ensure_swift_dir() # initialize new storage rings. - for ring in swift.SWIFT_RINGS.iteritems(): - swift.initialize_ring(ring[1], - utils.config_get('partition-power'), - utils.config_get('replicas'), - utils.config_get('min-hours')) + for ring in SWIFT_RINGS.iteritems(): + initialize_ring(ring[1], + config('partition-power'), + config('replicas'), + config('min-hours')) # configure a directory on webserver for distributing rings. - if not os.path.isdir(swift.WWW_DIR): - os.mkdir(swift.WWW_DIR, 0755) - uid, gid = swift.swift_user() - os.chown(swift.WWW_DIR, uid, gid) - swift.write_apache_config() - swift.configure_https() + if not os.path.isdir(WWW_DIR): + os.mkdir(WWW_DIR, 0755) + uid, gid = swift_user() + os.chown(WWW_DIR, uid, gid) +@hooks.hook('identity-service-relation-joined') def keystone_joined(relid=None): - if not cluster.eligible_leader(swift.SWIFT_HA_RES): + if not cluster.eligible_leader(SWIFT_HA_RES): return if cluster.is_clustered(): - hostname = utils.config_get('vip') + hostname = config('vip') else: - hostname = utils.unit_get('private-address') - port = utils.config_get('bind-port') + hostname = unit_get('private-address') + port = config('bind-port') if cluster.https(): proto = 'https' else: proto = 'http' admin_url = '%s://%s:%s' % (proto, hostname, port) internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url - utils.relation_set(service='swift', - region=utils.config_get('region'), - public_url=public_url, internal_url=internal_url, - admin_url=admin_url, - requested_roles=utils.config_get('operator-roles'), - rid=relid) + relation_set(service='swift', + region=config('region'), + public_url=public_url, internal_url=internal_url, + admin_url=admin_url, + requested_roles=config('operator-roles'), + relation_id=relid) +@hooks.hook('identity-service-relation-changed') +@restart_on_change(restart_map()) def keystone_changed(): - swift.write_proxy_config() - swift.configure_https() - # Re-fire keystone hooks to ripple back the HTTPS service entry - for relid in utils.relation_ids('identity-service'): - keystone_joined(relid=relid) + configure_https() def balance_rings(): '''handle doing ring balancing and distribution.''' new_ring = False - for ring in swift.SWIFT_RINGS.itervalues(): - if swift.balance_ring(ring): - utils.juju_log('INFO', 'Balanced ring %s' % ring) + for ring in SWIFT_RINGS.itervalues(): + if balance_ring(ring): + log('Balanced ring %s' % ring) new_ring = True if not new_ring: return - for ring in swift.SWIFT_RINGS.keys(): + for ring in SWIFT_RINGS.keys(): f = '%s.ring.gz' % ring - shutil.copyfile(os.path.join(swift.SWIFT_CONF_DIR, f), - os.path.join(swift.WWW_DIR, f)) + shutil.copyfile(os.path.join(SWIFT_CONF_DIR, f), + os.path.join(WWW_DIR, f)) - if cluster.eligible_leader(swift.SWIFT_HA_RES): + if cluster.eligible_leader(SWIFT_HA_RES): msg = 'Broadcasting notification to all storage nodes that new '\ 'ring is ready for consumption.' - utils.juju_log('INFO', msg) - path = swift.WWW_DIR.split('/var/www/')[1] + log(msg) + path = WWW_DIR.split('/var/www/')[1] trigger = uuid.uuid4() - swift_hash = swift.get_swift_hash() if cluster.is_clustered(): - hostname = utils.config_get('vip') + hostname = config('vip') else: - hostname = utils.unit_get('private-address') + hostname = unit_get('private-address') rings_url = 'http://%s/%s' % (hostname, path) # notify storage nodes that there is a new ring to fetch. - for relid in utils.relation_ids('swift-storage'): - utils.relation_set(rid=relid, swift_hash=swift_hash, - rings_url=rings_url, trigger=trigger) + for relid in relation_ids('swift-storage'): + relation_set(relation_id=relid, swift_hash=get_swift_hash(), + rings_url=rings_url, trigger=trigger) - swift.proxy_control('restart') + service_restart('swift-proxy') +@hooks.hook('swift-storage-relation-changed') +@restart_on_change(restart_map()) def storage_changed(): - zone = swift.get_zone(utils.config_get('zone-assignment')) + zone = get_zone(config('zone-assignment')) node_settings = { - 'ip': utils.get_host_ip(utils.relation_get('private-address')), + 'ip': openstack.get_host_ip(relation_get('private-address')), 'zone': zone, - 'account_port': utils.relation_get('account_port'), - 'object_port': utils.relation_get('object_port'), - 'container_port': utils.relation_get('container_port'), + 'account_port': relation_get('account_port'), + 'object_port': relation_get('object_port'), + 'container_port': relation_get('container_port'), } if None in node_settings.itervalues(): - utils.juju_log('INFO', 'storage_changed: Relation not ready.') + log('storage_changed: Relation not ready.') return None for k in ['zone', 'account_port', 'object_port', 'container_port']: node_settings[k] = int(node_settings[k]) - # Grant new node access to rings via apache. - swift.write_apache_config() + CONFIGS.write_all() # allow for multiple devs per unit, passed along as a : separated list - devs = utils.relation_get('device').split(':') + devs = relation_get('device').split(':') for dev in devs: node_settings['device'] = dev - for ring in swift.SWIFT_RINGS.itervalues(): - if not swift.exists_in_ring(ring, node_settings): - swift.add_to_ring(ring, node_settings) + for ring in SWIFT_RINGS.itervalues(): + if not exists_in_ring(ring, node_settings): + add_to_ring(ring, node_settings) - if swift.should_balance([r for r in swift.SWIFT_RINGS.itervalues()]): + if should_balance([r for r in SWIFT_RINGS.itervalues()]): balance_rings() +@hooks.hook('swift-storage-relation-broken') +@restart_on_change(restart_map()) def storage_broken(): - swift.write_apache_config() + CONFIGS.write_all() +@hooks.hook('config-changed') +@restart_on_change(restart_map()) def config_changed(): + configure_https() + open_port(config('bind-port')) # Determine whether or not we should do an upgrade, based on the # the version offered in keyston-release. - src = utils.config_get('openstack-origin') + src = config('openstack-origin') available = openstack.get_os_codename_install_source(src) installed = openstack.get_os_codename_package('python-swift') if (available and - openstack.get_os_version_codename(available) > \ + openstack.get_os_version_codename(available) > openstack.get_os_version_codename(installed)): - pkgs = swift.determine_packages(available) - swift.do_openstack_upgrade(src, pkgs) - - relids = utils.relation_ids('identity-service') - if relids: - for relid in relids: - keystone_joined(relid) - swift.write_proxy_config() - swift.configure_https() + pkgs = determine_packages(available) + do_openstack_upgrade(src, pkgs) +@hooks.hook('cluster-relation-changed', + 'cluster-relation-joined') +@restart_on_change(restart_map()) def cluster_changed(): - swift.configure_haproxy() + CONFIGS.write_all() +@hooks.hook('ha-relation-changed') def ha_relation_changed(): - clustered = utils.relation_get('clustered') - if clustered and cluster.is_leader(swift.SWIFT_HA_RES): - utils.juju_log('INFO', - 'Cluster configured, notifying other services and' - 'updating keystone endpoint configuration') + clustered = relation_get('clustered') + if clustered and cluster.is_leader(SWIFT_HA_RES): + log('Cluster configured, notifying other services and' + 'updating keystone endpoint configuration') # Tell all related services to start using # the VIP instead - for r_id in utils.relation_ids('identity-service'): + for r_id in relation_ids('identity-service'): keystone_joined(relid=r_id) +@hooks.hook('ha-relation-joined') def ha_relation_joined(): # Obtain the config values necessary for the cluster config. These # include multicast port and interface to bind to. - corosync_bindiface = utils.config_get('ha-bindiface') - corosync_mcastport = utils.config_get('ha-mcastport') - vip = utils.config_get('vip') - vip_cidr = utils.config_get('vip_cidr') - vip_iface = utils.config_get('vip_iface') + corosync_bindiface = config('ha-bindiface') + corosync_mcastport = config('ha-mcastport') + vip = config('vip') + vip_cidr = config('vip_cidr') + vip_iface = config('vip_iface') if not vip: - utils.juju_log('ERROR', - 'Unable to configure hacluster as vip not provided') + log('Unable to configure hacluster as vip not provided', + level=ERROR) sys.exit(1) # Obtain resources resources = { - 'res_swift_vip': 'ocf:heartbeat:IPaddr2', - 'res_swift_haproxy': 'lsb:haproxy' - } + 'res_swift_vip': 'ocf:heartbeat:IPaddr2', + 'res_swift_haproxy': 'lsb:haproxy' + } resource_params = { - 'res_swift_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ - (vip, vip_cidr, vip_iface), - 'res_swift_haproxy': 'op monitor interval="5s"' - } + 'res_swift_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % + (vip, vip_cidr, vip_iface), + 'res_swift_haproxy': 'op monitor interval="5s"' + } init_services = { - 'res_swift_haproxy': 'haproxy' - } + 'res_swift_haproxy': 'haproxy' + } clones = { - 'cl_swift_haproxy': 'res_swift_haproxy' - } + 'cl_swift_haproxy': 'res_swift_haproxy' + } - utils.relation_set(init_services=init_services, - corosync_bindiface=corosync_bindiface, - corosync_mcastport=corosync_mcastport, - resources=resources, - resource_params=resource_params, - clones=clones) + relation_set(init_services=init_services, + corosync_bindiface=corosync_bindiface, + corosync_mcastport=corosync_mcastport, + resources=resources, + resource_params=resource_params, + clones=clones) -hooks = { - 'install': install, - 'config-changed': config_changed, - 'identity-service-relation-joined': keystone_joined, - 'identity-service-relation-changed': keystone_changed, - 'swift-storage-relation-changed': storage_changed, - 'swift-storage-relation-broken': storage_broken, - "cluster-relation-joined": cluster_changed, - "cluster-relation-changed": cluster_changed, - "ha-relation-joined": ha_relation_joined, - "ha-relation-changed": ha_relation_changed -} +def configure_https(): + ''' + Enables SSL API Apache config if appropriate and kicks identity-service + with any required api updates. + ''' + # need to write all to ensure changes to the entire request pipeline + # propagate (c-api, haprxy, apache) + CONFIGS.write_all() + if 'https' in CONFIGS.complete_contexts(): + cmd = ['a2ensite', 'openstack_https_frontend'] + subprocess.check_call(cmd) + else: + cmd = ['a2dissite', 'openstack_https_frontend'] + subprocess.check_call(cmd) -utils.do_hooks(hooks) + for rid in relation_ids('identity-service'): + keystone_joined(relid=rid) -sys.exit(0) + +def main(): + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) + + +if __name__ == '__main__': + main() diff --git a/hooks/swift_utils.py b/hooks/swift_utils.py index 95ed39e..6dde51a 100644 --- a/hooks/swift_utils.py +++ b/hooks/swift_utils.py @@ -1,23 +1,37 @@ import os import pwd import subprocess -import lib.openstack_common as openstack -import lib.utils as utils -import lib.haproxy_utils as haproxy -import lib.apache_utils as apache -import lib.cluster_utils as cluster +import charmhelpers.contrib.openstack.utils as openstack import sys from base64 import b64encode +from collections import OrderedDict + +from charmhelpers.core.hookenv import ( + log, ERROR, + config, + relation_get, +) +from charmhelpers.fetch import ( + apt_update, + apt_install +) + +import charmhelpers.contrib.openstack.context as context +import charmhelpers.contrib.openstack.templating as templating +import swift_context # Various config files that are managed via templating. -SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf' SWIFT_CONF = '/etc/swift/swift.conf' SWIFT_PROXY_CONF = '/etc/swift/proxy-server.conf' SWIFT_CONF_DIR = os.path.dirname(SWIFT_CONF) MEMCACHED_CONF = '/etc/memcached.conf' -APACHE_CONF = '/etc/apache2/conf.d/swift-rings' -APACHE_24_CONF = '/etc/apache2/conf-available/swift-rings.conf' +SWIFT_RINGS_CONF = '/etc/apache2/conf.d/swift-rings' +SWIFT_RINGS_24_CONF = '/etc/apache2/conf-available/swift-rings.conf' +HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' +APACHE_SITE_CONF = '/etc/apache2/sites-available/openstack_https_frontend' +APACHE_SITE_24_CONF = '/etc/apache2/sites-available/' \ + 'openstack_https_frontend.conf' WWW_DIR = '/var/www/swift-rings' @@ -38,47 +52,103 @@ BASE_PACKAGES = [ 'apache2', 'python-keystone', ] +# > Folsom specific packages +FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3'] SWIFT_HA_RES = 'res_swift_vip' -# Folsom-specific packages -FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3'] +TEMPLATES = 'templates/' + +# Map config files to hook contexts and services that will be associated +# with file in restart_on_changes()'s service map. +CONFIG_FILES = OrderedDict([ + (SWIFT_CONF, { + 'hook_contexts': [swift_context.SwiftHashContext()], + 'services': ['swift-proxy'], + }), + (SWIFT_PROXY_CONF, { + 'hook_contexts': [swift_context.SwiftIdentityContext()], + 'services': ['swift-proxy'], + }), + (HAPROXY_CONF, { + 'hook_contexts': [context.HAProxyContext(), + swift_context.HAProxyContext()], + 'services': ['haproxy'], + }), + (SWIFT_RINGS_CONF, { + 'hook_contexts': [swift_context.SwiftRingContext()], + 'services': ['apache2'], + }), + (SWIFT_RINGS_24_CONF, { + 'hook_contexts': [swift_context.SwiftRingContext()], + 'services': ['apache2'], + }), + (APACHE_SITE_CONF, { + 'hook_contexts': [swift_context.ApacheSSLContext()], + 'services': ['apache2'], + }), + (APACHE_SITE_24_CONF, { + 'hook_contexts': [swift_context.ApacheSSLContext()], + 'services': ['apache2'], + }), + (MEMCACHED_CONF, { + 'hook_contexts': [swift_context.MemcachedContext()], + 'services': ['memcached'], + }), +]) -def proxy_control(action): - '''utility to work around swift-init's bad RCs.''' - def _cmd(action): - return ['swift-init', 'proxy-server', action] +def register_configs(): + """ + Register config files with their respective contexts. + Regstration of some configs may not be required depending on + existing of certain relations. + """ + # if called without anything installed (eg during install hook) + # just default to earliest supported release. configs dont get touched + # till post-install, anyway. + release = openstack.get_os_codename_package('swift-proxy', fatal=False) \ + or 'essex' + configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, + openstack_release=release) - p = subprocess.Popen(_cmd('status'), stdout=subprocess.PIPE) - p.communicate() - status = p.returncode - if action == 'stop': - if status == 1: - return - elif status == 0: - return subprocess.check_call(_cmd('stop')) + confs = [SWIFT_CONF, + SWIFT_PROXY_CONF, + HAPROXY_CONF, + MEMCACHED_CONF] - # the proxy will not start unless there are balanced rings - # gzip'd in /etc/swift - missing = False - for k in SWIFT_RINGS.keys(): - if not os.path.exists(os.path.join(SWIFT_CONF_DIR, '%s.ring.gz' % k)): - missing = True - if missing: - utils.juju_log('INFO', 'Rings not balanced, skipping %s.' % action) - return + for conf in confs: + configs.register(conf, CONFIG_FILES[conf]['hook_contexts']) - if action == 'start': - if status == 0: - return - elif status == 1: - return subprocess.check_call(_cmd('start')) - elif action == 'restart': - if status == 0: - return subprocess.check_call(_cmd('restart')) - elif status == 1: - return subprocess.check_call(_cmd('start')) + if os.path.exists('/etc/apache2/conf-available'): + configs.register(SWIFT_RINGS_24_CONF, + CONFIG_FILES[SWIFT_RINGS_24_CONF]['hook_contexts']) + configs.register(APACHE_SITE_24_CONF, + CONFIG_FILES[APACHE_SITE_24_CONF]['hook_contexts']) + else: + configs.register(SWIFT_RINGS_CONF, + CONFIG_FILES[SWIFT_RINGS_CONF]['hook_contexts']) + configs.register(APACHE_SITE_CONF, + CONFIG_FILES[APACHE_SITE_CONF]['hook_contexts']) + return configs + + +def restart_map(): + ''' + Determine the correct resource map to be passed to + charmhelpers.core.restart_on_change() based on the services configured. + + :returns: dict: A dictionary mapping config file to lists of services + that should be restarted when file changes. + ''' + _map = [] + for f, ctxt in CONFIG_FILES.iteritems(): + svcs = [] + for svc in ctxt['services']: + svcs.append(svc) + if svcs: + _map.append((f, svcs)) + return OrderedDict(_map) def swift_user(username='swift'): @@ -105,118 +175,12 @@ def determine_packages(release): return FOLSOM_PACKAGES -def render_config(config_file, context): - '''write out config using templates for a specific openstack release.''' - os_release = openstack.get_os_codename_package('python-swift') - # load os release-specific templates. - cfile = os.path.basename(config_file) - templates_dir = os.path.join(utils.TEMPLATES_DIR, os_release) - context['os_release'] = os_release - return utils.render_template(cfile, context, templates_dir) - - -def get_swift_hash(): - if os.path.isfile(SWIFT_HASH_FILE): - with open(SWIFT_HASH_FILE, 'r') as hashfile: - swift_hash = hashfile.read().strip() - elif utils.config_get('swift-hash'): - swift_hash = utils.config_get('swift-hash') - with open(SWIFT_HASH_FILE, 'w') as hashfile: - hashfile.write(swift_hash) - else: - cmd = ['od', '-t', 'x8', '-N', '8', '-A', 'n'] - rand = open('/dev/random', 'r') - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=rand) - swift_hash = p.communicate()[0].strip() - with open(SWIFT_HASH_FILE, 'w') as hashfile: - hashfile.write(swift_hash) - return swift_hash - - -def get_keystone_auth(): - '''return standard keystone auth credentials, either from config or the - identity-service relation. user-specified config is given priority - over an existing relation. - ''' - auth_type = utils.config_get('auth-type') - auth_host = utils.config_get('keystone-auth-host') - admin_user = utils.config_get('keystone-admin-user') - admin_password = utils.config_get('keystone-admin-user') - if (auth_type == 'keystone' and auth_host - and admin_user and admin_password): - utils.juju_log('INFO', 'Using user-specified Keystone configuration.') - ks_auth = { - 'auth_type': 'keystone', - 'auth_protocol': utils.config_get('keystone-auth-protocol'), - 'keystone_host': auth_host, - 'auth_port': utils.config_get('keystone-auth-port'), - 'service_user': admin_user, - 'service_password': admin_password, - 'service_tenant': utils.config_get('keystone-admin-tenant-name') - } - return ks_auth - - for relid in utils.relation_ids('identity-service'): - utils.juju_log('INFO', - 'Using Keystone configuration from identity-service.') - for unit in utils.relation_list(relid): - ks_auth = { - 'auth_type': 'keystone', - 'auth_protocol': 'http', - 'keystone_host': utils.relation_get('auth_host', - unit, relid), - 'auth_port': utils.relation_get('auth_port', unit, relid), - 'service_user': utils.relation_get('service_username', - unit, relid), - 'service_password': utils.relation_get('service_password', - unit, relid), - 'service_tenant': utils.relation_get('service_tenant', - unit, relid), - 'service_port': utils.relation_get('service_port', - unit, relid), - 'admin_token': utils.relation_get('admin_token', - unit, relid), - } - if None not in ks_auth.itervalues(): - return ks_auth - return None - - def write_proxy_config(): - - bind_port = utils.config_get('bind-port') - workers = utils.config_get('workers') - if workers == '0': - import multiprocessing - workers = multiprocessing.cpu_count() - env_vars = {'OPENSTACK_SERVICE_SWIFT': 'proxy-server', - 'OPENSTACK_PORT_API': bind_port, + 'OPENSTACK_PORT_API': config('bind-port'), 'OPENSTACK_PORT_MEMCACHED': 11211} openstack.save_script_rc(**env_vars) - ctxt = { - 'proxy_ip': utils.get_host_ip(), - 'bind_port': cluster.determine_api_port(bind_port), - 'workers': workers, - 'operator_roles': utils.config_get('operator-roles'), - 'delay_auth_decision': utils.config_get('delay-auth-decision') - } - - ctxt['ssl'] = False - - ks_auth = get_keystone_auth() - if ks_auth: - utils.juju_log('INFO', 'Enabling Keystone authentication.') - for k, v in ks_auth.iteritems(): - ctxt[k] = v - - with open(SWIFT_PROXY_CONF, 'w') as conf: - conf.write(render_config(SWIFT_PROXY_CONF, ctxt)) - - proxy_control('restart') - subprocess.check_call(['open-port', str(bind_port)]) - def _load_builder(path): # lifted straight from /usr/bin/swift-ring-builder @@ -266,7 +230,7 @@ def exists_in_ring(ring_path, node): if sorted(d) == sorted(n): msg = 'Node already exists in ring (%s).' % ring_path - utils.juju_log('INFO', msg) + log(msg) return True return False @@ -293,9 +257,9 @@ def add_to_ring(ring_path, node): ring.add_dev(new_dev) _write_ring(ring, ring_path) msg = 'Added new device to ring %s: %s' %\ - (ring_path, - [k for k in new_dev.iteritems()]) - utils.juju_log('INFO', msg) + (ring_path, + [k for k in new_dev.iteritems()]) + log(msg) def _get_zone(ring_builder): @@ -331,7 +295,7 @@ def get_zone(assignment_policy): being assigned to a different zone. ''' if assignment_policy == 'manual': - return utils.relation_get('zone') + return relation_get('zone') elif assignment_policy == 'auto': potential_zones = [] for ring in SWIFT_RINGS.itervalues(): @@ -339,8 +303,8 @@ def get_zone(assignment_policy): potential_zones.append(_get_zone(builder)) return set(potential_zones).pop() else: - utils.juju_log('ERROR', 'Invalid zone assignment policy: %s' %\ - assignment_policy) + log('Invalid zone assignment policy: %s' % assignment_policy, + level=ERROR) sys.exit(1) @@ -358,7 +322,7 @@ def balance_ring(ring_path): # swift-ring-builder returns 1 on WARNING (ring didn't require balance) return False else: - utils.juju_log('ERROR', 'balance_ring: %s returned %s' % (cmd, rc)) + log('balance_ring: %s returned %s' % (cmd, rc), level=ERROR) sys.exit(1) @@ -379,91 +343,14 @@ def should_balance(rings): def write_apache_config(): '''write out /etc/apache2/conf.d/swift-rings with a list of authenticated hosts''' - apache_conf = APACHE_CONF - apache24 = os.path.exists(os.path.dirname(APACHE_24_CONF)) - if apache24: - apache_conf = APACHE_24_CONF - utils.juju_log('INFO', 'Updating %s.' % apache_conf) - - allowed_hosts = [] - for relid in utils.relation_ids('swift-storage'): - for unit in utils.relation_list(relid): - host = utils.relation_get('private-address', unit, relid) - allowed_hosts.append(utils.get_host_ip(host)) - - ctxt = { - 'www_dir': WWW_DIR, - 'allowed_hosts': allowed_hosts - } - with open(apache_conf, 'w') as conf: - conf.write(render_config(APACHE_CONF, ctxt)) + apache24 = os.path.exists(os.path.dirname(MEMCACHED_CONF)) if apache24: subprocess.check_call(['a2enconf', 'swift-rings']) - utils.reload('apache2') - - -def generate_cert(): - ''' - Generates a self signed certificate and key using the - provided charm configuration data. - - returns: tuple of (cert, key) - ''' - CERT = '/etc/swift/ssl.cert' - KEY = '/etc/swift/ssl.key' - if (not os.path.exists(CERT) and - not os.path.exists(KEY)): - subj = '/C=%s/ST=%s/L=%s/CN=%s' %\ - (utils.config_get('country'), utils.config_get('state'), - utils.config_get('locale'), utils.config_get('common-name')) - cmd = ['openssl', 'req', '-new', '-x509', '-nodes', - '-out', CERT, '-keyout', KEY, - '-subj', subj] - subprocess.check_call(cmd) - os.chmod(KEY, 0600) - # Slurp as base64 encoded - makes handling easier up the stack - with open(CERT, 'r') as cfile: - ssl_cert = b64encode(cfile.read()) - with open(KEY, 'r') as kfile: - ssl_key = b64encode(kfile.read()) - return (ssl_cert, ssl_key) - - -def configure_haproxy(): - api_port = utils.config_get('bind-port') - service_ports = { - "swift": [ - cluster.determine_haproxy_port(api_port), - cluster.determine_api_port(api_port) - ] - } - write_proxy_config() - haproxy.configure_haproxy(service_ports) - - -def configure_https(): - if cluster.https(): - api_port = utils.config_get('bind-port') - if (len(cluster.peer_units()) > 0 or - cluster.is_clustered()): - target_port = cluster.determine_haproxy_port(api_port) - configure_haproxy() - else: - target_port = cluster.determine_api_port(api_port) - write_proxy_config() - cert, key = apache.get_cert() - if None in (cert, key): - cert, key = generate_cert() - ca_cert = apache.get_ca_cert() - apache.setup_https(namespace="swift", - port_maps={api_port: target_port}, - cert=cert, key=key, ca_cert=ca_cert) def do_openstack_upgrade(source, packages): openstack.configure_installation_source(source) - os.environ['DEBIAN_FRONTEND'] = 'noninteractive' - subprocess.check_call(['apt-get', 'update']) - cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confnew', '-y', - 'install'] + packages - subprocess.check_call(cmd) + apt_update(fatal=True) + apt_install(options=['--option', 'Dpkg::Options::=--force-confnew'], + packages=packages, + fatal=True) diff --git a/revision b/revision index 6a4573e..878d5a0 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -133 +146 diff --git a/templates/folsom/memcached.conf b/templates/folsom/memcached.conf deleted file mode 120000 index 55feb6b..0000000 --- a/templates/folsom/memcached.conf +++ /dev/null @@ -1 +0,0 @@ -../essex/memcached.conf \ No newline at end of file diff --git a/templates/folsom/proxy-server.conf b/templates/folsom/proxy-server.conf deleted file mode 120000 index 695f6f6..0000000 --- a/templates/folsom/proxy-server.conf +++ /dev/null @@ -1 +0,0 @@ -../essex/proxy-server.conf \ No newline at end of file diff --git a/templates/folsom/swift-rings b/templates/folsom/swift-rings deleted file mode 120000 index 4a86480..0000000 --- a/templates/folsom/swift-rings +++ /dev/null @@ -1 +0,0 @@ -../essex/swift-rings \ No newline at end of file diff --git a/templates/folsom/swift.conf b/templates/folsom/swift.conf deleted file mode 120000 index 9f252b6..0000000 --- a/templates/folsom/swift.conf +++ /dev/null @@ -1 +0,0 @@ -../essex/swift.conf \ No newline at end of file diff --git a/templates/grizzly/memcached.conf b/templates/grizzly/memcached.conf deleted file mode 120000 index 55feb6b..0000000 --- a/templates/grizzly/memcached.conf +++ /dev/null @@ -1 +0,0 @@ -../essex/memcached.conf \ No newline at end of file diff --git a/templates/grizzly/proxy-server.conf b/templates/grizzly/proxy-server.conf index 56262c3..adb7bcd 100644 --- a/templates/grizzly/proxy-server.conf +++ b/templates/grizzly/proxy-server.conf @@ -48,7 +48,6 @@ admin_password = {{ service_password }} delay_auth_decision = {{ delay_auth_decision|lower }} {% if os_release != 'essex' %}signing_dir = /etc/swift{% endif %} - [filter:s3token] paste.filter_factory = keystone.middleware.s3_token:filter_factory service_host = {{ keystone_host }} diff --git a/templates/grizzly/swift-rings b/templates/grizzly/swift-rings deleted file mode 120000 index 4a86480..0000000 --- a/templates/grizzly/swift-rings +++ /dev/null @@ -1 +0,0 @@ -../essex/swift-rings \ No newline at end of file diff --git a/templates/grizzly/swift.conf b/templates/grizzly/swift.conf deleted file mode 120000 index 9f252b6..0000000 --- a/templates/grizzly/swift.conf +++ /dev/null @@ -1 +0,0 @@ -../essex/swift.conf \ No newline at end of file diff --git a/templates/havana/memcached.conf b/templates/havana/memcached.conf deleted file mode 120000 index 55feb6b..0000000 --- a/templates/havana/memcached.conf +++ /dev/null @@ -1 +0,0 @@ -../essex/memcached.conf \ No newline at end of file diff --git a/templates/havana/swift-rings b/templates/havana/swift-rings deleted file mode 120000 index 4a86480..0000000 --- a/templates/havana/swift-rings +++ /dev/null @@ -1 +0,0 @@ -../essex/swift-rings \ No newline at end of file diff --git a/templates/havana/swift.conf b/templates/havana/swift.conf deleted file mode 120000 index 9f252b6..0000000 --- a/templates/havana/swift.conf +++ /dev/null @@ -1 +0,0 @@ -../essex/swift.conf \ No newline at end of file diff --git a/templates/essex/memcached.conf b/templates/memcached.conf similarity index 100% rename from templates/essex/memcached.conf rename to templates/memcached.conf diff --git a/templates/essex/swift-rings b/templates/swift-rings similarity index 100% rename from templates/essex/swift-rings rename to templates/swift-rings diff --git a/templates/swift-rings.conf b/templates/swift-rings.conf new file mode 120000 index 0000000..a6311b1 --- /dev/null +++ b/templates/swift-rings.conf @@ -0,0 +1 @@ +swift-rings \ No newline at end of file diff --git a/templates/essex/swift.conf b/templates/swift.conf similarity index 100% rename from templates/essex/swift.conf rename to templates/swift.conf