diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..71dfd409 --- /dev/null +++ b/Makefile @@ -0,0 +1,8 @@ +#!/usr/bin/make + +lint: + @flake8 --exclude hooks/charmhelpers hooks + @charm proof + +sync: + @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml new file mode 100644 index 00000000..908ac6f5 --- /dev/null +++ b/charm-helpers-sync.yaml @@ -0,0 +1,6 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core + - contrib.openstack + - contrib.hahelpers diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 00000000..3208a85c --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,58 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess + +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(): + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get('ssl_cert', + rid=r_id, unit=unit) + if not key: + key = relation_get('ssl_key', + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = None + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not ca_cert: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def install_ca_cert(ca_cert): + if ca_cert: + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', + 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/hooks/charmhelpers/contrib/hahelpers/ceph.py b/hooks/charmhelpers/contrib/hahelpers/ceph.py new file mode 100644 index 00000000..fb1b8b9b --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/ceph.py @@ -0,0 +1,278 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import commands +import os +import shutil + +from subprocess import ( + check_call, + check_output, + CalledProcessError +) + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + INFO, +) + +from charmhelpers.core.host import ( + apt_install, + mount, + mounts, + service_start, + service_stop, + umount, +) + +KEYRING = '/etc/ceph/ceph.client.%s.keyring' +KEYFILE = '/etc/ceph/ceph.client.%s.key' + +CEPH_CONF = """[global] + auth supported = %(auth)s + keyring = %(keyring)s + mon host = %(mon_hosts)s +""" + + +def running(service): + # this local util can be dropped as soon the following branch lands + # in lp:charm-helpers + # https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/ + try: + output = check_output(['service', service, 'status']) + except CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def install(): + ceph_dir = "/etc/ceph" + if not os.path.isdir(ceph_dir): + os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' % + (service, pool)) + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + cmd = [ + 'rbd', + 'create', + image, + '--size', + str(sizemb), + '--id', + service, + '--pool', + pool + ] + check_call(cmd) + + +def pool_exists(service, name): + (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service) + return name in out + + +def create_pool(service, name): + cmd = [ + 'rados', + '--id', + service, + 'mkpool', + name + ] + check_call(cmd) + + +def keyfile_path(service): + return KEYFILE % service + + +def keyring_path(service): + return KEYRING % service + + +def create_keyring(service, key): + keyring = keyring_path(service) + if os.path.exists(keyring): + log('ceph: Keyring exists at %s.' % keyring, level=INFO) + cmd = [ + 'ceph-authtool', + keyring, + '--create-keyring', + '--name=client.%s' % service, + '--add-key=%s' % key + ] + check_call(cmd) + log('ceph: Created new ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + # create a file containing the key + keyfile = keyfile_path(service) + if os.path.exists(keyfile): + log('ceph: Keyfile exists at %s.' % keyfile, level=INFO) + fd = open(keyfile, 'w') + fd.write(key) + fd.close() + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts + + +def configure(service, key, auth): + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + mon_hosts = ",".join(map(str, hosts)) + keyring = keyring_path(service) + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF % locals()) + modprobe_kernel_module('rbd') + + +def image_mapped(image_name): + (rc, out) = commands.getstatusoutput('rbd showmapped') + return image_name in out + + +def map_block_storage(service, pool, image): + cmd = [ + 'rbd', + 'map', + '%s/%s' % (pool, image), + '--user', + service, + '--secret', + keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + return fs in [f for m, f in mounts()] + + +def make_filesystem(blk_device, fstype='ext4'): + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + cmd = ['mkfs', '-t', fstype, blk_device] + check_call(cmd) + + +def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): + # mount block device into /mnt + mount(blk_device, '/mnt') + + # copy data to /mnt + try: + copy_files(data_src_dst, '/mnt') + except: + pass + + # umount block device + umount('/mnt') + + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + + # re-mount where the data should originally be + mount(blk_device, data_src_dst, persist=True) + + # ensure original ownership of new mount. + cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst] + check_call(cmd) + + +# TODO: re-use +def modprobe_kernel_module(module): + log('ceph: Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + cmd = 'echo %s >> /etc/modules' % module + check_call(cmd, shell=True) + + +def copy_files(src, dst, symlinks=False, ignore=None): + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[]): + """ + To be called from the current cluster leader. + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being remounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('ceph: Creating new pool %s.' % pool, level=INFO) + create_pool(service, pool) + + if not rbd_exists(service, pool, rbd_img): + log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('ceph: Mapping RBD Image as a Block Device.', level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if running(svc): + log('Stopping services %s prior to migrating data.' % svc, + level=INFO) + service_stop(svc) + + place_data_on_ceph(service, blk_device, mount_point, fstype) + + for svc in system_services: + service_start(svc) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 00000000..dde6c9bb --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,180 @@ +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess +import os + +from socket import gethostname as get_unit_hostname + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + ERROR, +) + + +class HAIncompleteConfig(Exception): + pass + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_leader(resource): + cmd = [ + "crm", "resource", + "show", resource + ] + try: + status = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + return False + else: + if get_unit_hostname() in status: + return True + else: + return False + + +def peer_units(): + peers = [] + for r_id in (relation_ids('cluster') or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def oldest_peer(peers): + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + if is_clustered(): + if not is_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + if config_get('use-https') == "yes": + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if None not in [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ssl_cert', rid=r_id, unit=unit), + relation_get('ssl_key', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ]: + return True + return False + + +def determine_api_port(public_port): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the API service + ''' + i = 0 + if len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_haproxy_port(public_port): + ''' + Description: Determine correct proxy listening port based on public IP + + existence of HTTPS reverse proxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if https(): + i += 1 + return public_port - (i * 10) + + +def get_hacluster_config(): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr + + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] + conf = {} + for setting in settings: + conf[setting] = config_get(setting) + missing = [] + [missing.append(s) for s, v in conf.iteritems() if v is None] + if missing: + log('Insufficient config data to configure hacluster.', level=ERROR) + raise HAIncompleteConfig + return conf + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = get_unit_hostname() + return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py new file mode 100644 index 00000000..f146e0bc --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,271 @@ +import os + +from base64 import b64decode + +from subprocess import ( + check_call +) + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_get, + relation_ids, + related_units, + unit_get, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + determine_api_port, + determine_haproxy_port, + https, + is_clustered, + peer_units, +) + +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, +) + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' + + +class OSContextError(Exception): + pass + + +def context_complete(ctxt): + _missing = [] + for k, v in ctxt.iteritems(): + if v is None or v == '': + _missing.append(k) + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level='INFO') + return False + return True + + +class OSContextGenerator(object): + interfaces = [] + + def __call__(self): + raise NotImplementedError + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __call__(self): + log('Generating template context for shared-db') + conf = config() + try: + database = conf['database'] + username = conf['database-user'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + ctxt = {} + for rid in relation_ids('shared-db'): + for unit in related_units(rid): + ctxt = { + 'database_host': relation_get('db_host', rid=rid, + unit=unit), + 'database': database, + 'database_user': username, + 'database_password': relation_get('password', rid=rid, + unit=unit) + } + if not context_complete(ctxt): + return {} + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + interfaces = ['identity-service'] + + def __call__(self): + log('Generating template context for identity-service') + ctxt = {} + + for rid in relation_ids('identity-service'): + for unit in related_units(rid): + ctxt = { + 'service_port': relation_get('service_port', rid=rid, + unit=unit), + 'service_host': relation_get('service_host', rid=rid, + unit=unit), + 'auth_host': relation_get('auth_host', rid=rid, unit=unit), + 'auth_port': relation_get('auth_port', rid=rid, unit=unit), + 'admin_tenant_name': relation_get('service_tenant', + rid=rid, unit=unit), + 'admin_user': relation_get('service_username', rid=rid, + unit=unit), + 'admin_password': relation_get('service_password', rid=rid, + unit=unit), + # XXX: Hard-coded http. + 'service_protocol': 'http', + 'auth_protocol': 'http', + } + if not context_complete(ctxt): + return {} + return ctxt + + +class AMQPContext(OSContextGenerator): + interfaces = ['amqp'] + + def __call__(self): + log('Generating template context for amqp') + conf = config() + try: + username = conf['rabbit-user'] + vhost = conf['rabbit-vhost'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + + ctxt = {} + for rid in relation_ids('amqp'): + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + rabbitmq_host = relation_get('vip', rid=rid, unit=unit) + else: + rabbitmq_host = relation_get('private-address', + rid=rid, unit=unit) + ctxt = { + 'rabbitmq_host': rabbitmq_host, + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + } + if not context_complete(ctxt): + return {} + return ctxt + + +class CephContext(OSContextGenerator): + interfaces = ['ceph'] + + def __call__(self): + '''This generates context for /etc/ceph/ceph.conf templates''' + log('Generating tmeplate context for ceph') + mon_hosts = [] + auth = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + mon_hosts.append(relation_get('private-address', rid=rid, + unit=unit)) + auth = relation_get('auth', rid=rid, unit=unit) + + ctxt = { + 'mon_hosts': ' '.join(mon_hosts), + 'auth': auth, + } + if not context_complete(ctxt): + return {} + return ctxt + + +class HAProxyContext(OSContextGenerator): + interfaces = ['cluster'] + + def __call__(self): + ''' + Builds half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + ''' + if not relation_ids('cluster'): + return {} + + cluster_hosts = {} + l_unit = local_unit().replace('/', '-') + cluster_hosts[l_unit] = unit_get('private-address') + + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + addr = relation_get('private-address', rid=rid, unit=unit) + cluster_hosts[_unit] = addr + + ctxt = { + 'units': cluster_hosts, + } + if len(cluster_hosts.keys()) > 1: + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.') + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + return ctxt + log('HAProxy context is incomplete, this unit has no peers.') + return {} + + +class ApacheSSLContext(OSContextGenerator): + """ + Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like: + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self): + if not os.path.isdir('/etc/apache2/ssl'): + os.mkdir('/etc/apache2/ssl') + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + if not os.path.isdir(ssl_dir): + os.mkdir(ssl_dir) + cert, key = get_cert() + with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: + cert_out.write(b64decode(cert)) + with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: + key_out.write(b64decode(key)) + ca_cert = get_ca_cert() + if ca_cert: + with open(CA_CERT_PATH, 'w') as ca_out: + ca_out.write(b64decode(ca_cert)) + + def __call__(self): + if isinstance(self.external_ports, basestring): + self.external_ports = [self.external_ports] + if (not self.external_ports or not https()): + return {} + + self.configure_cert() + self.enable_modules() + + ctxt = { + 'namespace': self.service_namespace, + 'private_address': unit_get('private-address'), + 'endpoints': [] + } + for ext_port in self.external_ports: + if peer_units() or is_clustered(): + int_port = determine_haproxy_port(ext_port) + else: + int_port = determine_api_port(ext_port) + portmap = (int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 00000000..0b49ad28 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,2 @@ +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 00000000..c555cc6e --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,261 @@ +import os + +from charmhelpers.core.host import apt_install + +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO +) + +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment +except ImportError: + # python-jinja2 may not be installed yet, or we're running unittests. + FileSystemLoader = ChoiceLoader = Environment = None + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg: + hooks/charmhelpers/contrib/openstack/templates. + + :param templates_dir: str: Base template directory containing release + sub-directories. + :param os_release : str: OpenStack release codename to construct template + loader. + + :returns : jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in OPENSTACK_CODENAMES.itervalues()] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=INFO) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + def __init__(self, config_file, contexts): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have atisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage: + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + Details: + + OpenStack Releases and template loading + --------------------------------------- + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + + For the example above, '/tmp/templates' contains the following structure: + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + Context generators + --------------------------------------- + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + apt_install('python-jinja2') + + def register(self, config_file, contexts): + """ + Register a config file with a list of context generators to be called + during rendering. + """ + self.templates[config_file] = OSConfigTemplate(config_file=config_file, + contexts=contexts) + log('Registered config file: %s' % config_file, level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from %s' % template.filename, level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + ctxt = self.templates[config_file].context() + _tmpl = os.path.basename(config_file) + log('Rendering from template: %s' % _tmpl, level=INFO) + template = self._get_template(_tmpl) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + with open(config_file, 'wb') as out: + out.write(self.render(config_file)) + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in self.templates.iterkeys()] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in self.templates.itervalues()] + return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 00000000..2c096f1d --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,266 @@ +#!/usr/bin/python + +# Common python helper functions used for OpenStack charms. + +from collections import OrderedDict + +import apt_pkg as apt +import subprocess +import os +import sys + +from charmhelpers.core.hookenv import ( + config, + log as juju_log, +) + +from charmhelpers.core.host import ( + lsb_release, +) + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), +]) + +# The ugly duckling +SWIFT_CODENAMES = { + '1.4.3': 'diablo', + '1.4.8': 'essex', + '1.7.4': 'folsom', + '1.7.6': 'grizzly', + '1.7.7': 'grizzly', + '1.8.0': 'grizzly', +} + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src == 'distro': + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename): + '''Determine OpenStack version number from codename.''' + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + apt.init() + cache = apt.Cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.UpstreamVersion(pkg.current_ver.ver_str) + + try: + if 'swift' in pkg.name: + vers = vers[:5] + return SWIFT_CODENAMES[vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in vers_map.iteritems(): + if cname == codename: + return version + #e = "Could not determine OpenStack version for package: %s" % pkg + #error_out(e) + + +def import_key(keyid): + cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly' + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + # TODO: Replace key import with cloud archive keyring pkg. + import_key(CLOUD_ARCHIVE_KEY_ID) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') + juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in env_vars.iteritems() if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..f03e494b --- /dev/null +++ b/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,339 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +def service_name(): + "The name service group this unit belongs to" + return local_unit().split('/')[0] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return json.loads(subprocess.check_output(config_cmd_line)) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py new file mode 100644 index 00000000..6550b632 --- /dev/null +++ b/hooks/charmhelpers/core/host.py @@ -0,0 +1,272 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import apt_pkg +import os +import pwd +import grp +import subprocess +import hashlib + +from collections import OrderedDict + +from hookenv import log, execution_environment + + +def service_start(service_name): + service('start', service_name) + + +def service_stop(service_name): + service('stop', service_name) + + +def service_restart(service_name): + service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + if not service('reload', service_name) and restart_on_failure: + service('restart', service_name) + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def service_running(service): + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + context = execution_environment() + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path.format(**context)) + cmd.append(to_path.format(**context)) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + context = execution_environment() + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source.format(**context), + destination.format(**context) + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + context = execution_environment() + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner.format(**context)).pw_uid + gid = grp.getgrnam(group.format(**context)).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, content, owner='root', group='root', perms=0444): + """Create or overwrite a file with the contents of a string""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(OrderedDict.fromkeys(restarts)): + service('restart', service_name) + return wrapped_f + return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d diff --git a/hooks/cluster-relation-changed b/hooks/cluster-relation-changed index 99375287..8355ca46 120000 --- a/hooks/cluster-relation-changed +++ b/hooks/cluster-relation-changed @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/cluster-relation-departed b/hooks/cluster-relation-departed index 99375287..8355ca46 120000 --- a/hooks/cluster-relation-departed +++ b/hooks/cluster-relation-departed @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/config-changed b/hooks/config-changed index 99375287..8355ca46 120000 --- a/hooks/config-changed +++ b/hooks/config-changed @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/ha-relation-changed b/hooks/ha-relation-changed deleted file mode 120000 index 99375287..00000000 --- a/hooks/ha-relation-changed +++ /dev/null @@ -1 +0,0 @@ -horizon-relations \ No newline at end of file diff --git a/hooks/ha-relation-joined b/hooks/ha-relation-joined index 99375287..8355ca46 120000 --- a/hooks/ha-relation-joined +++ b/hooks/ha-relation-joined @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/horizon_contexts.py b/hooks/horizon_contexts.py new file mode 100644 index 00000000..8f023852 --- /dev/null +++ b/hooks/horizon_contexts.py @@ -0,0 +1,92 @@ +from charmhelpers.core.hookenv import ( + config, + relation_ids, + related_units, + relation_get +) +from charmhelpers.contrib.openstack.context import ( + OSContextGenerator, + context_complete +) +from charmhelpers.contrib.hahelpers.apache import ( + get_cert +) +from base64 import b64decode +import os + + +class HAProxyContext(OSContextGenerator): + def __call__(self): + ''' + Extends the main charmhelpers HAProxyContext with a port mapping + specific to this charm. + ''' + ctxt = { + 'service_ports': { + 'dash_insecure': [80, 70], + 'dash_secure': [433, 423] + } + } + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + def __call__(self): + ''' Provide context for Identity Service relation ''' + ctxt = {} + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + ctxt['service_host'] = relation_get('service_host', + rid=r_id, + unit=unit) + ctxt['service_port'] = relation_get('service_port', + rid=r_id, + unit=unit) + if not context_complete(ctxt): + return {} + return ctxt + + +class HorizonContext(OSContextGenerator): + def __call__(self): + ''' Provide all configuration for Horizon ''' + ctxt = { + 'compress_offline': config('offline-compression') == 'yes', + 'debug': config('debug') == 'yes', + 'default_role': config('default-role'), + "webroot": config('webroot') + } + return ctxt + + +class ApacheContext(OSContextGenerator): + def __call__(self): + ''' Grab cert and key from configuraton for SSL config ''' + ctxt = { + 'http_port': 70, + 'https_port': 423 + } + return ctxt + + +class ApacheSSLContext(OSContextGenerator): + def __call__(self): + ''' Grab cert and key from configuration for SSL config ''' + (ssl_cert, ssl_key) = get_cert() + if None not in [ssl_cert, ssl_key]: + with open('/etc/ssl/certs/dashboard.cert', 'w') as cert_out: + cert_out.write(b64decode(ssl_cert)) + with open('/etc/ssl/private/dashboard.key', 'w') as key_out: + key_out.write(b64decode(ssl_key)) + os.chmod('/etc/ssl/private/dashboard.key', 0600) + ctxt = { + 'ssl_configured': True, + 'ssl_cert': '/etc/ssl/certs/dashboard.cert', + 'ssl_key': '/etc/ssl/private/dashboard.key', + } + else: + # Use snakeoil ones by default + ctxt = { + 'ssl_configured': False, + } + return ctxt diff --git a/hooks/horizon_relations.py b/hooks/horizon_relations.py new file mode 100755 index 00000000..57ad41e7 --- /dev/null +++ b/hooks/horizon_relations.py @@ -0,0 +1,113 @@ +#!/usr/bin/python + +import sys +from charmhelpers.core.hookenv import ( + Hooks, UnregisteredHookError, + log, + open_port, + config, + relation_set, + relation_get, + relation_ids +) +from charmhelpers.core.host import ( + apt_update, apt_install, + filter_installed_packages, + restart_on_change +) +from charmhelpers.contrib.openstack.utils import ( + configure_installation_source +) +from horizon_utils import ( + PACKAGES, register_configs, + restart_map, + LOCAL_SETTINGS, HAPROXY_CONF, + enable_ssl +) +from charmhelpers.contrib.hahelpers.apache import install_ca_cert +from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config + +hooks = Hooks() +CONFIGS = register_configs() + + +@hooks.hook() +def install(): + configure_installation_source(config('openstack-origin')) + apt_update(fatal=True) + apt_install(filter_installed_packages(PACKAGES), fatal=True) + open_port(80) + + +@hooks.hook() +def upgrade_charm(): + apt_install(filter_installed_packages(PACKAGES), fatal=True) + + +@restart_on_change(restart_map()) +@hooks.hook() +def config_changed(): + # Ensure default role changes are propagated to keystone + for relid in relation_ids('identity-service'): + keystone_joined(relid) + enable_ssl() + CONFIGS.write_all() + + +@hooks.hook('identity-service-relation-joined') +def keystone_joined(rel_id=None): + relation_set(relation_id=rel_id, + service="None", + region="None", + public_url="None", + admin_url="None", + internal_url="None", + requested_roles=config('default-role')) + + +@restart_on_change(restart_map()) +@hooks.hook('identity-service-relation-changed') +def keystone_changed(): + CONFIGS.write(LOCAL_SETTINGS) + if relation_get('ca_cert'): + install_ca_cert(relation_get('ca_cert')) + + +@restart_on_change(restart_map()) +@hooks.hook('cluster-relation-departed', + 'cluster-relation-changed') +def cluster_relation(): + CONFIGS.write(HAPROXY_CONF) + + +@hooks.hook() +def ha_relation_joined(): + config = get_hacluster_config() + resources = { + 'res_horizon_vip': 'ocf:heartbeat:IPaddr2', + 'res_horizon_haproxy': 'lsb:haproxy' + } + vip_params = 'params ip="{}" cidr_netmask="{}" nic="{}"'\ + .format(config['vip'], config['vip_cidr'], config['vip_iface']) + resource_params = { + 'res_horizon_vip': vip_params, + 'res_horizon_haproxy': 'op monitor interval="5s"' + } + init_services = { + 'res_horizon_haproxy': 'haproxy' + } + clones = { + 'cl_horizon_haproxy': 'res_horizon_haproxy' + } + relation_set(init_services=init_services, + corosync_bindiface=config['ha-bindiface'], + corosync_mcastport=config['ha-mcastport'], + resources=resources, + resource_params=resource_params, + clones=clones) + +if __name__ == '__main__': + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) diff --git a/hooks/horizon_utils.py b/hooks/horizon_utils.py new file mode 100644 index 00000000..52440c08 --- /dev/null +++ b/hooks/horizon_utils.py @@ -0,0 +1,97 @@ +import horizon_contexts +import charmhelpers.contrib.openstack.context as context +import charmhelpers.contrib.openstack.templating as templating +import subprocess +from collections import OrderedDict + +from charmhelpers.contrib.openstack.utils import ( + get_os_codename_package +) + +PACKAGES = [ + "openstack-dashboard", "python-keystoneclient", "python-memcache", + "memcached", "haproxy", "python-novaclient", + "nodejs", "node-less" +] + +LOCAL_SETTINGS = "/etc/openstack-dashboard/local_settings.py" +HAPROXY_CONF = "/etc/haproxy/haproxy.cfg" +APACHE_CONF = "/etc/apache2/conf.d/openstack-dashboard.conf" +PORTS_CONF = "/etc/apache2/ports.conf" +APACHE_SSL = "/etc/apache2/sites-available/default-ssl" +APACHE_DEFAULT = "/etc/apache2/sites-available/default" + +TEMPLATES = 'templates' + +CONFIG_FILES = OrderedDict([ + (LOCAL_SETTINGS, { + 'hook_contexts': [horizon_contexts.HorizonContext(), + horizon_contexts.IdentityServiceContext()], + 'services': ['apache2'] + }), + (HAPROXY_CONF, { + 'hook_contexts': [context.HAProxyContext(), + horizon_contexts.HAProxyContext()], + 'services': ['haproxy'], + }), + (APACHE_CONF, { + 'hook_contexts': [horizon_contexts.HorizonContext()], + 'services': ['apache2'], + },) + (APACHE_SSL, { + 'hook_contexts': [horizon_contexts.ApacheSSLContext(), + horizon_contexts.ApacheContext()], + 'services': ['apache2'], + },) + (APACHE_DEFAULT, { + 'hook_contexts': [horizon_contexts.ApacheContext()], + 'services': ['apache2'], + },) + (PORTS_CONF, { + 'hook_contexts': [horizon_contexts.ApacheContext()], + 'services': ['apache2'], + },) +]) + + +def register_configs(): + # Register config files with their respective contexts. + release = get_os_codename_package('openstack-dashboard', fatal=False) or \ + 'essex' + configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, + openstack_release=release) + + confs = [LOCAL_SETTINGS, + HAPROXY_CONF, + APACHE_CONF, + APACHE_SSL, + APACHE_DEFAULT, + PORTS_CONF] + + for conf in confs: + configs.register(conf, CONFIG_FILES[conf]['hook_contexts']) + + return configs + + +def restart_map(): + ''' + Determine the correct resource map to be passed to + charmhelpers.core.restart_on_change() based on the services configured. + + :returns: dict: A dictionary mapping config file to lists of services + that should be restarted when file changes. + ''' + _map = [] + for f, ctxt in CONFIG_FILES.iteritems(): + svcs = [] + for svc in ctxt['services']: + svcs.append(svc) + if svcs: + _map.append((f, svcs)) + return OrderedDict(_map) + + +def enable_ssl(): + subprocess.call(['a2ensite', 'default-ssl']) + subprocess.call(['a2enmod', 'ssl']) diff --git a/hooks/identity-service-relation-changed b/hooks/identity-service-relation-changed index 99375287..8355ca46 120000 --- a/hooks/identity-service-relation-changed +++ b/hooks/identity-service-relation-changed @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/identity-service-relation-joined b/hooks/identity-service-relation-joined index 99375287..8355ca46 120000 --- a/hooks/identity-service-relation-joined +++ b/hooks/identity-service-relation-joined @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/install b/hooks/install index 99375287..8355ca46 120000 --- a/hooks/install +++ b/hooks/install @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/lib/openstack-common b/hooks/lib/openstack-common deleted file mode 100644 index 6942e619..00000000 --- a/hooks/lib/openstack-common +++ /dev/null @@ -1,769 +0,0 @@ -#!/bin/bash -e - -# Common utility functions used across all OpenStack charms. - -error_out() { - juju-log "$CHARM ERROR: $@" - exit 1 -} - -function service_ctl_status { - # Return 0 if a service is running, 1 otherwise. - local svc="$1" - local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }') - case $status in - "start") return 0 ;; - "stop") return 1 ;; - *) error_out "Unexpected status of service $svc: $status" ;; - esac -} - -function service_ctl { - # control a specific service, or all (as defined by $SERVICES) - if [[ $1 == "all" ]] ; then - ctl="$SERVICES" - else - ctl="$1" - fi - action="$2" - if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then - error_out "ERROR service_ctl: Not enough arguments" - fi - - for i in $ctl ; do - case $action in - "start") - service_ctl_status $i || service $i start ;; - "stop") - service_ctl_status $i && service $i stop || return 0 ;; - "restart") - service_ctl_status $i && service $i restart || service $i start ;; - esac - if [[ $? != 0 ]] ; then - juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action" - fi - done -} - -function configure_install_source { - # Setup and configure installation source based on a config flag. - local src="$1" - - # Default to installing from the main Ubuntu archive. - [[ $src == "distro" ]] || [[ -z "$src" ]] && return 0 - - . /etc/lsb-release - - # standard 'ppa:someppa/name' format. - if [[ "${src:0:4}" == "ppa:" ]] ; then - juju-log "$CHARM: Configuring installation from custom src ($src)" - add-apt-repository -y "$src" || error_out "Could not configure PPA access." - return 0 - fi - - # standard 'deb http://url/ubuntu main' entries. gpg key ids must - # be appended to the end of url after a |, ie: - # 'deb http://url/ubuntu main|$GPGKEYID' - if [[ "${src:0:3}" == "deb" ]] ; then - juju-log "$CHARM: Configuring installation from custom src URL ($src)" - if echo "$src" | grep -q "|" ; then - # gpg key id tagged to end of url folloed by a | - url=$(echo $src | cut -d'|' -f1) - key=$(echo $src | cut -d'|' -f2) - juju-log "$CHARM: Importing repository key: $key" - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \ - juju-log "$CHARM WARN: Could not import key from keyserver: $key" - else - juju-log "$CHARM No repository key specified." - url="$src" - fi - echo "$url" > /etc/apt/sources.list.d/juju_deb.list - return 0 - fi - - # Cloud Archive - if [[ "${src:0:6}" == "cloud:" ]] ; then - - # current os releases supported by the UCA. - local cloud_archive_versions="folsom grizzly" - - local ca_rel=$(echo $src | cut -d: -f2) - local u_rel=$(echo $ca_rel | cut -d- -f1) - local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1) - - [[ "$u_rel" != "$DISTRIB_CODENAME" ]] && - error_out "Cannot install from Cloud Archive pocket $src " \ - "on this Ubuntu version ($DISTRIB_CODENAME)!" - - valid_release="" - for rel in $cloud_archive_versions ; do - if [[ "$os_rel" == "$rel" ]] ; then - valid_release=1 - juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive." - fi - done - if [[ -z "$valid_release" ]] ; then - error_out "OpenStack release ($os_rel) not supported by "\ - "the Ubuntu Cloud Archive." - fi - - # CA staging repos are standard PPAs. - if echo $ca_rel | grep -q "staging" ; then - add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging - return 0 - fi - - # the others are LP-external deb repos. - case "$ca_rel" in - "$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; - "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; - "$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; - "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; - *) error_out "Invalid Cloud Archive repo specified: $src" - esac - - apt-get -y install ubuntu-cloud-keyring - entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main" - echo "$entry" \ - >/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list - return 0 - fi - - error_out "Invalid installation source specified in config: $src" - -} - -get_os_codename_install_source() { - # derive the openstack release provided by a supported installation source. - local rel="$1" - local codename="unknown" - . /etc/lsb-release - - # map ubuntu releases to the openstack version shipped with it. - if [[ "$rel" == "distro" ]] ; then - case "$DISTRIB_CODENAME" in - "oneiric") codename="diablo" ;; - "precise") codename="essex" ;; - "quantal") codename="folsom" ;; - "raring") codename="grizzly" ;; - esac - fi - - # derive version from cloud archive strings. - if [[ "${rel:0:6}" == "cloud:" ]] ; then - rel=$(echo $rel | cut -d: -f2) - local u_rel=$(echo $rel | cut -d- -f1) - local ca_rel=$(echo $rel | cut -d- -f2) - if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then - case "$ca_rel" in - "folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging") - codename="folsom" ;; - "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging") - codename="grizzly" ;; - esac - fi - fi - - # have a guess based on the deb string provided - if [[ "${rel:0:3}" == "deb" ]] || \ - [[ "${rel:0:3}" == "ppa" ]] ; then - CODENAMES="diablo essex folsom grizzly havana" - for cname in $CODENAMES; do - if echo $rel | grep -q $cname; then - codename=$cname - fi - done - fi - echo $codename -} - -get_os_codename_package() { - local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none" - pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs - case "${pkg_vers:0:6}" in - "2011.2") echo "diablo" ;; - "2012.1") echo "essex" ;; - "2012.2") echo "folsom" ;; - "2013.1") echo "grizzly" ;; - "2013.2") echo "havana" ;; - esac -} - -get_os_version_codename() { - case "$1" in - "diablo") echo "2011.2" ;; - "essex") echo "2012.1" ;; - "folsom") echo "2012.2" ;; - "grizzly") echo "2013.1" ;; - "havana") echo "2013.2" ;; - esac -} - -get_ip() { - dpkg -l | grep -q python-dnspython || { - apt-get -y install python-dnspython 2>&1 > /dev/null - } - hostname=$1 - python -c " -import dns.resolver -import socket -try: - # Test to see if already an IPv4 address - socket.inet_aton('$hostname') - print '$hostname' -except socket.error: - try: - answers = dns.resolver.query('$hostname', 'A') - if answers: - print answers[0].address - except dns.resolver.NXDOMAIN: - pass -" -} - -# Common storage routines used by cinder, nova-volume and swift-storage. -clean_storage() { - # if configured to overwrite existing storage, we unmount the block-dev - # if mounted and clear any previous pv signatures - local block_dev="$1" - juju-log "Cleaining storage '$block_dev'" - if grep -q "^$block_dev" /proc/mounts ; then - mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }') - juju-log "Unmounting $block_dev from $mp" - umount "$mp" || error_out "ERROR: Could not unmount storage from $mp" - fi - if pvdisplay "$block_dev" >/dev/null 2>&1 ; then - juju-log "Removing existing LVM PV signatures from $block_dev" - - # deactivate any volgroups that may be built on this dev - vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }') - if [[ -n "$vg" ]] ; then - juju-log "Deactivating existing volume group: $vg" - vgchange -an "$vg" || - error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?" - fi - echo "yes" | pvremove -ff "$block_dev" || - error_out "Could not pvremove $block_dev" - else - juju-log "Zapping disk of all GPT and MBR structures" - sgdisk --zap-all $block_dev || - error_out "Unable to zap $block_dev" - fi -} - -function get_block_device() { - # given a string, return full path to the block device for that - # if input is not a block device, find a loopback device - local input="$1" - - case "$input" in - /dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist." - echo "$input"; return 0;; - /*) :;; - *) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist." - echo "/dev/$input"; return 0;; - esac - - # this represents a file - # support "/path/to/file|5G" - local fpath size oifs="$IFS" - if [ "${input#*|}" != "${input}" ]; then - size=${input##*|} - fpath=${input%|*} - else - fpath=${input} - size=5G - fi - - ## loop devices are not namespaced. This is bad for containers. - ## it means that the output of 'losetup' may have the given $fpath - ## in it, but that may not represent this containers $fpath, but - ## another containers. To address that, we really need to - ## allow some uniq container-id to be expanded within path. - ## TODO: find a unique container-id that will be consistent for - ## this container throughout its lifetime and expand it - ## in the fpath. - # fpath=${fpath//%{id}/$THAT_ID} - - local found="" - # parse through 'losetup -a' output, looking for this file - # output is expected to look like: - # /dev/loop0: [0807]:961814 (/tmp/my.img) - found=$(losetup -a | - awk 'BEGIN { found=0; } - $3 == f { sub(/:$/,"",$1); print $1; found=found+1; } - END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \ - f="($fpath)") - - if [ $? -ne 0 ]; then - echo "multiple devices found for $fpath: $found" 1>&2 - return 1; - fi - - [ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; } - - if [ -n "$found" ]; then - echo "confused, $found is not a block device for $fpath"; - return 1; - fi - - # no existing device was found, create one - mkdir -p "${fpath%/*}" - truncate --size "$size" "$fpath" || - { echo "failed to create $fpath of size $size"; return 1; } - - found=$(losetup --find --show "$fpath") || - { echo "failed to setup loop device for $fpath" 1>&2; return 1; } - - echo "$found" - return 0 -} - -HAPROXY_CFG=/etc/haproxy/haproxy.cfg -HAPROXY_DEFAULT=/etc/default/haproxy -########################################################################## -# Description: Configures HAProxy services for Openstack API's -# Parameters: -# Space delimited list of service:port:mode combinations for which -# haproxy service configuration should be generated for. The function -# assumes the name of the peer relation is 'cluster' and that every -# service unit in the peer relation is running the same services. -# -# Services that do not specify :mode in parameter will default to http. -# -# Example -# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http -########################################################################## -configure_haproxy() { - local address=`unit-get private-address` - local name=${JUJU_UNIT_NAME////-} - cat > $HAPROXY_CFG << EOF -global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice - maxconn 20000 - user haproxy - group haproxy - spread-checks 0 - -defaults - log global - mode http - option httplog - option dontlognull - retries 3 - timeout queue 1000 - timeout connect 1000 - timeout client 30000 - timeout server 30000 - -listen stats :8888 - mode http - stats enable - stats hide-version - stats realm Haproxy\ Statistics - stats uri / - stats auth admin:password - -EOF - for service in $@; do - local service_name=$(echo $service | cut -d : -f 1) - local haproxy_listen_port=$(echo $service | cut -d : -f 2) - local api_listen_port=$(echo $service | cut -d : -f 3) - local mode=$(echo $service | cut -d : -f 4) - [[ -z "$mode" ]] && mode="http" - juju-log "Adding haproxy configuration entry for $service "\ - "($haproxy_listen_port -> $api_listen_port)" - cat >> $HAPROXY_CFG << EOF -listen $service_name 0.0.0.0:$haproxy_listen_port - balance roundrobin - mode $mode - option ${mode}log - server $name $address:$api_listen_port check -EOF - local r_id="" - local unit="" - for r_id in `relation-ids cluster`; do - for unit in `relation-list -r $r_id`; do - local unit_name=${unit////-} - local unit_address=`relation-get -r $r_id private-address $unit` - if [ -n "$unit_address" ]; then - echo " server $unit_name $unit_address:$api_listen_port check" \ - >> $HAPROXY_CFG - fi - done - done - done - echo "ENABLED=1" > $HAPROXY_DEFAULT - service haproxy restart -} - -########################################################################## -# Description: Query HA interface to determine is cluster is configured -# Returns: 0 if configured, 1 if not configured -########################################################################## -is_clustered() { - local r_id="" - local unit="" - for r_id in $(relation-ids ha); do - if [ -n "$r_id" ]; then - for unit in $(relation-list -r $r_id); do - clustered=$(relation-get -r $r_id clustered $unit) - if [ -n "$clustered" ]; then - juju-log "Unit is haclustered" - return 0 - fi - done - fi - done - juju-log "Unit is not haclustered" - return 1 -} - -########################################################################## -# Description: Return a list of all peers in cluster relations -########################################################################## -peer_units() { - local peers="" - local r_id="" - for r_id in $(relation-ids cluster); do - peers="$peers $(relation-list -r $r_id)" - done - echo $peers -} - -########################################################################## -# Description: Determines whether the current unit is the oldest of all -# its peers - supports partial leader election -# Returns: 0 if oldest, 1 if not -########################################################################## -oldest_peer() { - peers=$1 - local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2) - for peer in $peers; do - echo "Comparing $JUJU_UNIT_NAME with peers: $peers" - local r_unit_no=$(echo $peer | cut -d / -f 2) - if (($r_unit_no<$l_unit_no)); then - juju-log "Not oldest peer; deferring" - return 1 - fi - done - juju-log "Oldest peer; might take charge?" - return 0 -} - -########################################################################## -# Description: Determines whether the current service units is the -# leader within a) a cluster of its peers or b) across a -# set of unclustered peers. -# Parameters: CRM resource to check ownership of if clustered -# Returns: 0 if leader, 1 if not -########################################################################## -eligible_leader() { - if is_clustered; then - if ! is_leader $1; then - juju-log 'Deferring action to CRM leader' - return 1 - fi - else - peers=$(peer_units) - if [ -n "$peers" ] && ! oldest_peer "$peers"; then - juju-log 'Deferring action to oldest service unit.' - return 1 - fi - fi - return 0 -} - -########################################################################## -# Description: Query Cluster peer interface to see if peered -# Returns: 0 if peered, 1 if not peered -########################################################################## -is_peered() { - local r_id=$(relation-ids cluster) - if [ -n "$r_id" ]; then - if [ -n "$(relation-list -r $r_id)" ]; then - juju-log "Unit peered" - return 0 - fi - fi - juju-log "Unit not peered" - return 1 -} - -########################################################################## -# Description: Determines whether host is owner of clustered services -# Parameters: Name of CRM resource to check ownership of -# Returns: 0 if leader, 1 if not leader -########################################################################## -is_leader() { - hostname=`hostname` - if [ -x /usr/sbin/crm ]; then - if crm resource show $1 | grep -q $hostname; then - juju-log "$hostname is cluster leader." - return 0 - fi - fi - juju-log "$hostname is not cluster leader." - return 1 -} - -########################################################################## -# Description: Determines whether enough data has been provided in -# configuration or relation data to configure HTTPS. -# Parameters: None -# Returns: 0 if HTTPS can be configured, 1 if not. -########################################################################## -https() { - local r_id="" - if [[ -n "$(config-get ssl_cert)" ]] && - [[ -n "$(config-get ssl_key)" ]] ; then - return 0 - fi - for r_id in $(relation-ids identity-service) ; do - for unit in $(relation-list -r $r_id) ; do - if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] && - [[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] && - [[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] && - [[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then - return 0 - fi - done - done - return 1 -} - -########################################################################## -# Description: For a given number of port mappings, configures apache2 -# HTTPs local reverse proxying using certficates and keys provided in -# either configuration data (preferred) or relation data. Assumes ports -# are not in use (calling charm should ensure that). -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if reverse proxy(s) have been configured, 0 if not. -########################################################################## -enable_https() { - local port_maps="$@" - local http_restart="" - juju-log "Enabling HTTPS for port mappings: $port_maps." - - # allow overriding of keystone provided certs with those set manually - # in config. - local cert=$(config-get ssl_cert) - local key=$(config-get ssl_key) - local ca_cert="" - if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then - juju-log "Inspecting identity-service relations for SSL certificate." - local r_id="" - cert="" - key="" - ca_cert="" - for r_id in $(relation-ids identity-service) ; do - for unit in $(relation-list -r $r_id) ; do - [[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)" - [[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)" - [[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)" - done - done - [[ -n "$cert" ]] && cert=$(echo $cert | base64 -di) - [[ -n "$key" ]] && key=$(echo $key | base64 -di) - [[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di) - else - juju-log "Using SSL certificate provided in service config." - fi - - [[ -z "$cert" ]] || [[ -z "$key" ]] && - juju-log "Expected but could not find SSL certificate data, not "\ - "configuring HTTPS!" && return 1 - - apt-get -y install apache2 - a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" && - http_restart=1 - - mkdir -p /etc/apache2/ssl/$CHARM - echo "$cert" >/etc/apache2/ssl/$CHARM/cert - echo "$key" >/etc/apache2/ssl/$CHARM/key - if [[ -n "$ca_cert" ]] ; then - juju-log "Installing Keystone supplied CA cert." - echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt - update-ca-certificates --fresh - - # XXX TODO: Find a better way of exporting this? - if [[ "$CHARM" == "nova-cloud-controller" ]] ; then - [[ -e /var/www/keystone_juju_ca_cert.crt ]] && - rm -rf /var/www/keystone_juju_ca_cert.crt - ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \ - /var/www/keystone_juju_ca_cert.crt - fi - - fi - for port_map in $port_maps ; do - local ext_port=$(echo $port_map | cut -d: -f1) - local int_port=$(echo $port_map | cut -d: -f2) - juju-log "Creating apache2 reverse proxy vhost for $port_map." - cat >/etc/apache2/sites-available/${CHARM}_${ext_port} < - ServerName $(unit-get private-address) - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/$CHARM/cert - SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key - ProxyPass / http://localhost:$int_port/ - ProxyPassReverse / http://localhost:$int_port/ - ProxyPreserveHost on - - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -END - a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && - http_restart=1 - done - if [[ -n "$http_restart" ]] ; then - service apache2 restart - fi -} - -########################################################################## -# Description: Ensure HTTPS reverse proxying is disabled for given port -# mappings. -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error. -########################################################################## -disable_https() { - local port_maps="$@" - local http_restart="" - juju-log "Ensuring HTTPS disabled for $port_maps." - ( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0 - for port_map in $port_maps ; do - local ext_port=$(echo $port_map | cut -d: -f1) - local int_port=$(echo $port_map | cut -d: -f2) - if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then - juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map." - a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && - http_restart=1 - fi - done - if [[ -n "$http_restart" ]] ; then - service apache2 restart - fi -} - - -########################################################################## -# Description: Ensures HTTPS is either enabled or disabled for given port -# mapping. -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not. -########################################################################## -setup_https() { - # configure https via apache reverse proxying either - # using certs provided by config or keystone. - [[ -z "$CHARM" ]] && - error_out "setup_https(): CHARM not set." - if ! https ; then - disable_https $@ - else - enable_https $@ - fi -} - -########################################################################## -# Description: Determine correct API server listening port based on -# existence of HTTPS reverse proxy and/or haproxy. -# Paremeters: The standard public port for given service. -# Returns: The correct listening port for API service. -########################################################################## -determine_api_port() { - local public_port="$1" - local i=0 - ( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1] - https >/dev/null 2>&1 && i=$[$i + 1] - echo $[$public_port - $[$i * 10]] -} - -########################################################################## -# Description: Determine correct proxy listening port based on public IP + -# existence of HTTPS reverse proxy. -# Paremeters: The standard public port for given service. -# Returns: The correct listening port for haproxy service public address. -########################################################################## -determine_haproxy_port() { - local public_port="$1" - local i=0 - https >/dev/null 2>&1 && i=$[$i + 1] - echo $[$public_port - $[$i * 10]] -} - -########################################################################## -# Description: Print the value for a given config option in an OpenStack -# .ini style configuration file. -# Parameters: File path, option to retrieve, optional -# section name (default=DEFAULT) -# Returns: Prints value if set, prints nothing otherwise. -########################################################################## -local_config_get() { - # return config values set in openstack .ini config files. - # default placeholders starting (eg, %AUTH_HOST%) treated as - # unset values. - local file="$1" - local option="$2" - local section="$3" - [[ -z "$section" ]] && section="DEFAULT" - python -c " -import ConfigParser -config = ConfigParser.RawConfigParser() -config.read('$file') -try: - value = config.get('$section', '$option') -except: - print '' - exit(0) -if value.startswith('%'): exit(0) -print value -" -} - -########################################################################## -# Description: Creates an rc file exporting environment variables to a -# script_path local to the charm's installed directory. -# Any charm scripts run outside the juju hook environment can source this -# scriptrc to obtain updated config information necessary to perform health -# checks or service changes -# -# Parameters: -# An array of '=' delimited ENV_VAR:value combinations to export. -# If optional script_path key is not provided in the array, script_path -# defaults to scripts/scriptrc -########################################################################## -function save_script_rc { - if [ ! -n "$JUJU_UNIT_NAME" ]; then - echo "Error: Missing JUJU_UNIT_NAME environment variable" - exit 1 - fi - # our default unit_path - unit_path="$CHARM_DIR/scripts/scriptrc" - echo $unit_path - tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc" - - echo "#!/bin/bash" > $tmp_rc - for env_var in "${@}" - do - if `echo $env_var | grep -q script_path`; then - # well then we need to reset the new unit-local script path - unit_path="$CHARM_DIR/${env_var/script_path=/}" - else - echo "export $env_var" >> $tmp_rc - fi - done - chmod 755 $tmp_rc - mv $tmp_rc $unit_path -} diff --git a/hooks/shared-db-relation-changed b/hooks/shared-db-relation-changed index 99375287..8355ca46 120000 --- a/hooks/shared-db-relation-changed +++ b/hooks/shared-db-relation-changed @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/shared-db-relation-joined b/hooks/shared-db-relation-joined index 99375287..8355ca46 120000 --- a/hooks/shared-db-relation-joined +++ b/hooks/shared-db-relation-joined @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/hooks/upgrade-charm b/hooks/upgrade-charm index 99375287..8355ca46 120000 --- a/hooks/upgrade-charm +++ b/hooks/upgrade-charm @@ -1 +1 @@ -horizon-relations \ No newline at end of file +horizon_relations.py \ No newline at end of file diff --git a/templates/default b/templates/default new file mode 100644 index 00000000..a442e900 --- /dev/null +++ b/templates/default @@ -0,0 +1,32 @@ + + ServerAdmin webmaster@localhost + + DocumentRoot /var/www + + Options FollowSymLinks + AllowOverride None + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + Order allow,deny + allow from all + + + ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/ + + AllowOverride None + Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch + Order allow,deny + Allow from all + + + ErrorLog ${APACHE_LOG_DIR}/error.log + + # Possible values include: debug, info, notice, warn, error, crit, + # alert, emerg. + LogLevel warn + + CustomLog ${APACHE_LOG_DIR}/access.log combined + + diff --git a/templates/default-ssl b/templates/default-ssl new file mode 100644 index 00000000..f8bcafc6 --- /dev/null +++ b/templates/default-ssl @@ -0,0 +1,50 @@ + + + ServerAdmin webmaster@localhost + + DocumentRoot /var/www + + Options FollowSymLinks + AllowOverride None + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + Order allow,deny + allow from all + + + ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/ + + AllowOverride None + Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch + Order allow,deny + Allow from all + + + ErrorLog ${APACHE_LOG_DIR}/error.log + LogLevel warn + + CustomLog ${APACHE_LOG_DIR}/ssl_access.log combined + + SSLEngine on +{% if ssl_configured %} + SSLCertificateFile {{ ssl_cert }} + SSLCertificateKeyFile {{ ssl_key }} +{% else %} + SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem + SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key +{% endif %} + + SSLOptions +StdEnvVars + + + SSLOptions +StdEnvVars + + BrowserMatch "MSIE [2-6]" \ + nokeepalive ssl-unclean-shutdown \ + downgrade-1.0 force-response-1.0 + BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown + + + diff --git a/templates/essex/local_settings.py b/templates/essex/local_settings.py new file mode 100644 index 00000000..2c40762e --- /dev/null +++ b/templates/essex/local_settings.py @@ -0,0 +1,120 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +DEBUG = {{ debug }} +TEMPLATE_DEBUG = DEBUG +PROD = False +USE_SSL = False + +# Ubuntu-specific: Enables an extra panel in the 'Settings' section +# that easily generates a Juju environments.yaml for download, +# preconfigured with endpoints and credentails required for bootstrap +# and service deployment. +ENABLE_JUJU_PANEL = True + +# Note: You should change this value +SECRET_KEY = 'elj1IWiLoWHgcyYxFVLj7cM5rGOOxWl0' + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG = { +# "password_validator": { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# } +# } + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +CACHE_BACKEND = 'memcached://127.0.0.1:11211/' + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://{{ service_host }}:{{ service_port }}/v2.0" +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ default_role }}" + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'internalURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# The number of Swift containers and objects to display on a single page before +# providing a paging element (a "more" link) to paginate results. +API_RESULT_LIMIT = 1000 + +# If you have external monitoring links, eg: +# EXTERNAL_MONITORING = [ +# ['Nagios','http://foo.com'], +# ['Ganglia','http://bar.com'], +# ] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'propagate': False, + } + } +} diff --git a/templates/folsom/local_settings.py b/templates/folsom/local_settings.py new file mode 100644 index 00000000..7c639084 --- /dev/null +++ b/templates/folsom/local_settings.py @@ -0,0 +1,163 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +DEBUG = {{ debug }} +TEMPLATE_DEBUG = DEBUG + +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG = { +# "password_validator": { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# }, +# 'help_url': "http://docs.openstack.org" +# } + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Set custom secret key: +# You can either set it to a specific value or you can let horizion generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +# from horizon.utils import secret_key +# SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHE_BACKED to something like 'memcached://127.0.0.1:11211/' +CACHE_BACKEND = 'memcached://127.0.0.1:11211' + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://{{ service_host }}:{{ service_port }}/v2.0" +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ default_role }}" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True +} + +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': True +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'internalURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'propagate': False, + } + } +} + +# Enable the Ubuntu theme if it is present. +try: + from ubuntu_theme import * +except ImportError: + pass + +# Default Ubuntu apache configuration uses /horizon as the application root. +# Configure auth redirects here accordingly. +LOGIN_URL='{{ webroot }}/auth/login/' +LOGIN_REDIRECT_URL='{{ webroot }}' + +# The Ubuntu package includes pre-compressed JS and compiled CSS to allow +# offline compression by default. To enable online compression, install +# the node-less package and enable the following option. +COMPRESS_OFFLINE = {{ compress_offline }} diff --git a/templates/grizzly/local_settings.py b/templates/grizzly/local_settings.py new file mode 100644 index 00000000..b4ead669 --- /dev/null +++ b/templates/grizzly/local_settings.py @@ -0,0 +1,219 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +from openstack_dashboard import exceptions + +DEBUG = {{ debug }} +TEMPLATE_DEBUG = DEBUG + +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# Default OpenStack Dashboard configuration. +HORIZON_CONFIG = { + 'dashboards': ('project', 'admin', 'settings',), + 'default_dashboard': 'project', + 'user_home': 'openstack_dashboard.views.get_user_home', + 'ajax_queue_limit': 10, + 'auto_fade_alerts': { + 'delay': 3000, + 'fade_duration': 1500, + 'types': ['alert-success', 'alert-info'] + }, + 'help_url': "http://docs.openstack.org", + 'exceptions': {'recoverable': exceptions.RECOVERABLE, + 'not_found': exceptions.NOT_FOUND, + 'unauthorized': exceptions.UNAUTHORIZED}, +} + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# } + +# Disable simplified floating IP address management for deployments with +# multiple floating IP pools or complex network requirements. +# HORIZON_CONFIG["simple_ip_management"] = False + +# Turn off browser autocompletion for the login form if so desired. +# HORIZON_CONFIG["password_autocomplete"] = "off" + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Set custom secret key: +# You can either set it to a specific value or you can let horizion generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +# from horizon.utils import secret_key +# SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like +# CACHES = { +# 'default': { +# 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', +# 'LOCATION' : '127.0.0.1:11211', +# } +#} + +CACHES = { + 'default': { + 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION' : '127.0.0.1:11211' + } +} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Enable the Ubuntu theme if it is present. +try: + from ubuntu_theme import * +except ImportError: + pass + +# Default Ubuntu apache configuration uses /horizon as the application root. +# Configure auth redirects here accordingly. +LOGIN_URL='{{ webroot }}/auth/login/' +LOGIN_REDIRECT_URL='{{ webroot }}' + +# The Ubuntu package includes pre-compressed JS and compiled CSS to allow +# offline compression by default. To enable online compression, install +# the node-less package and enable the following option. +COMPRESS_OFFLINE = {{ compress_offline }} + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://{{ service_host }}:{{ service_port }]/v2.0" +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ default_role }}" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_project': True +} + +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': True, + + # NOTE: as of Grizzly this is not yet supported in Nova so enabling this + # setting will not do anything useful + 'can_encrypt_volumes': False +} + +# The OPENSTACK_QUANTUM_NETWORK settings can be used to enable optional +# services provided by quantum. Currently only the load balancer service +# is available. +OPENSTACK_QUANTUM_NETWORK = { + 'enable_lb': False +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'internalURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'propagate': False, + } + } +} diff --git a/templates/haproxy.cfg b/templates/haproxy.cfg new file mode 100644 index 00000000..11872418 --- /dev/null +++ b/templates/haproxy.cfg @@ -0,0 +1,37 @@ +global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + +defaults + log global + mode tcp + option httplog + option dontlognull + retries 3 + timeout queue 1000 + timeout connect 1000 + timeout client 30000 + timeout server 30000 + +listen stats :8888 + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:password + +{% if units %} +{% for service, ports in service_ports.iteritems() -%} +listen {{ service }} 0.0.0.0:{{ ports[0] }} + balance roundrobin + option tcplog + {% for unit, address in units.iteritems() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/templates/local_settings.py b/templates/local_settings.py new file mode 100644 index 00000000..e829f306 --- /dev/null +++ b/templates/local_settings.py @@ -0,0 +1,243 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +from openstack_dashboard import exceptions + +DEBUG = {{ debug }} +TEMPLATE_DEBUG = DEBUG + +# Required for Django 1.5. +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +#ALLOWED_HOSTS = ['horizon.example.com', ] + +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +CSRF_COOKIE_SECURE = True +SESSION_COOKIE_SECURE = True + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specfic API version for a given service API. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be "2.0" or "3". +# OPENSTACK_API_VERSIONS = { +# "identity": 3 +# } + +# Set this to True if running on multi-domain model. When this is enabled, it +# will require user to enter the Domain name in addition to username for login. +# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Default OpenStack Dashboard configuration. +HORIZON_CONFIG = { + 'dashboards': ('project', 'admin', 'settings',), + 'default_dashboard': 'project', + 'user_home': 'openstack_dashboard.views.get_user_home', + 'ajax_queue_limit': 10, + 'auto_fade_alerts': { + 'delay': 3000, + 'fade_duration': 1500, + 'types': ['alert-success', 'alert-info'] + }, + 'help_url': "http://docs.openstack.org", + 'exceptions': {'recoverable': exceptions.RECOVERABLE, + 'not_found': exceptions.NOT_FOUND, + 'unauthorized': exceptions.UNAUTHORIZED}, +} + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# } + +# Disable simplified floating IP address management for deployments with +# multiple floating IP pools or complex network requirements. +# HORIZON_CONFIG["simple_ip_management"] = False + +# Turn off browser autocompletion for the login form if so desired. +# HORIZON_CONFIG["password_autocomplete"] = "off" + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Set custom secret key: +# You can either set it to a specific value or you can let horizion generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +from horizon.utils import secret_key +SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like +CACHES = { + 'default': { + 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION' : '127.0.0.1:11211', + } +} + +# Enable the Ubuntu theme if it is present. +try: + from ubuntu_theme import * +except ImportError: + pass + +# Default Ubuntu apache configuration uses /horizon as the application root. +# Configure auth redirects here accordingly. +LOGIN_URL='{{ webroot }}/auth/login/' +LOGIN_REDIRECT_URL='{{ webroot }}' + +# The Ubuntu package includes pre-compressed JS and compiled CSS to allow +# offline compression by default. To enable online compression, install +# the node-less package and enable the following option. +COMPRESS_OFFLINE = {{ compress_offline }} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "127.0.0.1" +OPENSTACK_KEYSTONE_URL = "http://{{ service_host }}:{{ service_port }}/v2.0" +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ default_role }}" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True +} + +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': True, + + # NOTE: as of Grizzly this is not yet supported in Nova so enabling this + # setting will not do anything useful + 'can_encrypt_volumes': False +} + +# The OPENSTACK_QUANTUM_NETWORK settings can be used to enable optional +# services provided by quantum. Currently only the load balancer service +# is available. +OPENSTACK_QUANTUM_NETWORK = { + 'enable_lb': False +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'internalURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'propagate': False, + } + } +} diff --git a/templates/openstack-dashboard.conf b/templates/openstack-dashboard.conf new file mode 100644 index 00000000..7cb30f55 --- /dev/null +++ b/templates/openstack-dashboard.conf @@ -0,0 +1,7 @@ +WSGIScriptAlias {{ webroot }} /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi +WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10 +Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/ + + Order allow,deny + Allow from all + diff --git a/templates/ports.conf b/templates/ports.conf new file mode 100644 index 00000000..a12609d6 --- /dev/null +++ b/templates/ports.conf @@ -0,0 +1,9 @@ +Listen {{ http_port }} + + + Listen {{ https_port }} + + + + Listen {{ https_port }} +