commit fde4c9cd38097f03b6fc3754a867499068340703 Author: Bilal Baqar Date: Tue May 19 14:05:20 2015 -0700 PLUMgrid director initial charm diff --git a/.coverage b/.coverage new file mode 100644 index 0000000..7c94385 Binary files /dev/null and b/.coverage differ diff --git a/.project b/.project new file mode 100644 index 0000000..a146609 --- /dev/null +++ b/.project @@ -0,0 +1,17 @@ + + + neutron-openvswitch + + + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/.pydevproject b/.pydevproject new file mode 100644 index 0000000..d6fbb94 --- /dev/null +++ b/.pydevproject @@ -0,0 +1,9 @@ + + +python 2.7 +Default + +/neutron-openvswitch/hooks +/neutron-openvswitch/unit_tests + + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..a529464 --- /dev/null +++ b/Makefile @@ -0,0 +1,24 @@ +#!/usr/bin/make +PYTHON := /usr/bin/env python + +lint: + @flake8 --exclude hooks/charmhelpers hooks + @flake8 --exclude hooks/charmhelpers unit_tests + @charm proof + +unit_test: + @echo Starting tests... + @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests + +bin/charm_helpers_sync.py: + @mkdir -p bin + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ + > bin/charm_helpers_sync.py + +sync: bin/charm_helpers_sync.py + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml + patch -p0 < ~/profsvcs/canonical/charms/charmhelpers.patch + +publish: lint unit_test + bzr push lp:charms/plumgrid-director + bzr push lp:charms/trusty/plumgrid-director diff --git a/README.md b/README.md new file mode 100644 index 0000000..afe2844 --- /dev/null +++ b/README.md @@ -0,0 +1,14 @@ +# Overview + +This charm provides the PLUMgrid Director configuration for a node. + + +# Usage + +To deploy (partial deployment of linked charms only): + + juju deploy neutron-api + juju deploy neutron-iovisor + juju deploy plumgrid-director + juju add-relation plumgrid-director neutron-iovisor + diff --git a/bin/charm_helpers_sync.py b/bin/charm_helpers_sync.py new file mode 100644 index 0000000..f67fdb9 --- /dev/null +++ b/bin/charm_helpers_sync.py @@ -0,0 +1,253 @@ +#!/usr/bin/python + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Authors: +# Adam Gandelman + +import logging +import optparse +import os +import subprocess +import shutil +import sys +import tempfile +import yaml +from fnmatch import fnmatch + +import six + +CHARM_HELPERS_BRANCH = 'lp:charm-helpers' + + +def parse_config(conf_file): + if not os.path.isfile(conf_file): + logging.error('Invalid config file: %s.' % conf_file) + return False + return yaml.load(open(conf_file).read()) + + +def clone_helpers(work_dir, branch): + dest = os.path.join(work_dir, 'charm-helpers') + logging.info('Checking out %s to %s.' % (branch, dest)) + cmd = ['bzr', 'checkout', '--lightweight', branch, dest] + subprocess.check_call(cmd) + return dest + + +def _module_path(module): + return os.path.join(*module.split('.')) + + +def _src_path(src, module): + return os.path.join(src, 'charmhelpers', _module_path(module)) + + +def _dest_path(dest, module): + return os.path.join(dest, _module_path(module)) + + +def _is_pyfile(path): + return os.path.isfile(path + '.py') + + +def ensure_init(path): + ''' + ensure directories leading up to path are importable, omitting + parent directory, eg path='/hooks/helpers/foo'/: + hooks/ + hooks/helpers/__init__.py + hooks/helpers/foo/__init__.py + ''' + for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): + _i = os.path.join(d, '__init__.py') + if not os.path.exists(_i): + logging.info('Adding missing __init__.py: %s' % _i) + open(_i, 'wb').close() + + +def sync_pyfile(src, dest): + src = src + '.py' + src_dir = os.path.dirname(src) + logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) + if not os.path.exists(dest): + os.makedirs(dest) + shutil.copy(src, dest) + if os.path.isfile(os.path.join(src_dir, '__init__.py')): + shutil.copy(os.path.join(src_dir, '__init__.py'), + dest) + ensure_init(dest) + + +def get_filter(opts=None): + opts = opts or [] + if 'inc=*' in opts: + # do not filter any files, include everything + return None + + def _filter(dir, ls): + incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] + _filter = [] + for f in ls: + _f = os.path.join(dir, f) + + if not os.path.isdir(_f) and not _f.endswith('.py') and incs: + if True not in [fnmatch(_f, inc) for inc in incs]: + logging.debug('Not syncing %s, does not match include ' + 'filters (%s)' % (_f, incs)) + _filter.append(f) + else: + logging.debug('Including file, which matches include ' + 'filters (%s): %s' % (incs, _f)) + elif (os.path.isfile(_f) and not _f.endswith('.py')): + logging.debug('Not syncing file: %s' % f) + _filter.append(f) + elif (os.path.isdir(_f) and not + os.path.isfile(os.path.join(_f, '__init__.py'))): + logging.debug('Not syncing directory: %s' % f) + _filter.append(f) + return _filter + return _filter + + +def sync_directory(src, dest, opts=None): + if os.path.exists(dest): + logging.debug('Removing existing directory: %s' % dest) + shutil.rmtree(dest) + logging.info('Syncing directory: %s -> %s.' % (src, dest)) + + shutil.copytree(src, dest, ignore=get_filter(opts)) + ensure_init(dest) + + +def sync(src, dest, module, opts=None): + + # Sync charmhelpers/__init__.py for bootstrap code. + sync_pyfile(_src_path(src, '__init__'), dest) + + # Sync other __init__.py files in the path leading to module. + m = [] + steps = module.split('.')[:-1] + while steps: + m.append(steps.pop(0)) + init = '.'.join(m + ['__init__']) + sync_pyfile(_src_path(src, init), + os.path.dirname(_dest_path(dest, init))) + + # Sync the module, or maybe a .py file. + if os.path.isdir(_src_path(src, module)): + sync_directory(_src_path(src, module), _dest_path(dest, module), opts) + elif _is_pyfile(_src_path(src, module)): + sync_pyfile(_src_path(src, module), + os.path.dirname(_dest_path(dest, module))) + else: + logging.warn('Could not sync: %s. Neither a pyfile or directory, ' + 'does it even exist?' % module) + + +def parse_sync_options(options): + if not options: + return [] + return options.split(',') + + +def extract_options(inc, global_options=None): + global_options = global_options or [] + if global_options and isinstance(global_options, six.string_types): + global_options = [global_options] + if '|' not in inc: + return (inc, global_options) + inc, opts = inc.split('|') + return (inc, parse_sync_options(opts) + global_options) + + +def sync_helpers(include, src, dest, options=None): + if not os.path.isdir(dest): + os.makedirs(dest) + + global_options = parse_sync_options(options) + + for inc in include: + if isinstance(inc, str): + inc, opts = extract_options(inc, global_options) + sync(src, dest, inc, opts) + elif isinstance(inc, dict): + # could also do nested dicts here. + for k, v in six.iteritems(inc): + if isinstance(v, list): + for m in v: + inc, opts = extract_options(m, global_options) + sync(src, dest, '%s.%s' % (k, inc), opts) + +if __name__ == '__main__': + parser = optparse.OptionParser() + parser.add_option('-c', '--config', action='store', dest='config', + default=None, help='helper config file') + parser.add_option('-D', '--debug', action='store_true', dest='debug', + default=False, help='debug') + parser.add_option('-b', '--branch', action='store', dest='branch', + help='charm-helpers bzr branch (overrides config)') + parser.add_option('-d', '--destination', action='store', dest='dest_dir', + help='sync destination dir (overrides config)') + (opts, args) = parser.parse_args() + + if opts.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + if opts.config: + logging.info('Loading charm helper config from %s.' % opts.config) + config = parse_config(opts.config) + if not config: + logging.error('Could not parse config from %s.' % opts.config) + sys.exit(1) + else: + config = {} + + if 'branch' not in config: + config['branch'] = CHARM_HELPERS_BRANCH + if opts.branch: + config['branch'] = opts.branch + if opts.dest_dir: + config['destination'] = opts.dest_dir + + if 'destination' not in config: + logging.error('No destination dir. specified as option or config.') + sys.exit(1) + + if 'include' not in config: + if not args: + logging.error('No modules to sync specified as option or config.') + sys.exit(1) + config['include'] = [] + [config['include'].append(a) for a in args] + + sync_options = None + if 'options' in config: + sync_options = config['options'] + tmpd = tempfile.mkdtemp() + try: + checkout = clone_helpers(tmpd, config['branch']) + sync_helpers(config['include'], checkout, config['destination'], + options=sync_options) + except Exception as e: + logging.error("Could not sync: %s" % e) + raise e + finally: + logging.debug('Cleaning up %s' % tmpd) + shutil.rmtree(tmpd) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml new file mode 100644 index 0000000..9b5e79e --- /dev/null +++ b/charm-helpers-sync.yaml @@ -0,0 +1,12 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core + - fetch + - contrib.openstack|inc=* + - contrib.hahelpers + - contrib.network.ovs + - contrib.storage.linux + - payload.execd + - contrib.network.ip + - contrib.python.packages diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..93641f4 --- /dev/null +++ b/config.yaml @@ -0,0 +1,5 @@ +options: + plumgrid-virtual-ip: + default: 192.168.100.250 + type: string + description: The IP on which PG Console will be accessible. diff --git a/copyright b/copyright new file mode 100644 index 0000000..d44f24c --- /dev/null +++ b/copyright @@ -0,0 +1,9 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 + +Files: * +Copyright: 2012, Canonical Ltd. +License: GPL-3 + +License: GPL-3 + On Debian GNU/Linux system you can find the complete text of the + GPL-3 license in '/usr/share/common-licenses/GPL-3' diff --git a/hooks/charmhelpers-pl/__init__.py b/hooks/charmhelpers-pl/__init__.py new file mode 100644 index 0000000..f72e7f8 --- /dev/null +++ b/hooks/charmhelpers-pl/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/hooks/charmhelpers-pl/contrib/__init__.py b/hooks/charmhelpers-pl/contrib/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/hahelpers/__init__.py b/hooks/charmhelpers-pl/contrib/hahelpers/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/hahelpers/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/hahelpers/apache.py b/hooks/charmhelpers-pl/contrib/hahelpers/apache.py new file mode 100644 index 0000000..0091719 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/hahelpers/apache.py @@ -0,0 +1,82 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess + +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get(ssl_cert_attr, + rid=r_id, unit=unit) + if not key: + key = relation_get(ssl_key_attr, + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = config_get('ssl_ca') + if ca_cert is None: + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if ca_cert is None: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def install_ca_cert(ca_cert): + if ca_cert: + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', + 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/hooks/charmhelpers-pl/contrib/hahelpers/cluster.py b/hooks/charmhelpers-pl/contrib/hahelpers/cluster.py new file mode 100644 index 0000000..9333efc --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/hahelpers/cluster.py @@ -0,0 +1,272 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + +import subprocess +import os + +from socket import gethostname as get_unit_hostname + +import six + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + ERROR, + WARNING, + unit_get, +) +from charmhelpers.core.decorators import ( + retry_on_exception, +) +from charmhelpers.core.strutils import ( + bool_from_string, +) + + +class HAIncompleteConfig(Exception): + pass + + +class CRMResourceNotFound(Exception): + pass + + +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 2. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) +def is_crm_leader(resource, retry=False): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. + """ + cmd = ['crm', 'resource', 'show', resource] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError: + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False + + +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): + peers = [] + for r_id in (relation_ids(peer_relation) or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + +def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): + return True + return False + + +def determine_api_port(public_port, singlenode_mode=False): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the API service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_apache_port(public_port, singlenode_mode=False): + ''' + Description: Determine correct apache listening port based on public IP + + state of the cluster. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + return public_port - (i * 10) + + +def get_hacluster_config(exclude_keys=None): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip + + param: exclude_keys: list of setting key(s) to be excluded. + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip'] + conf = {} + for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + + conf[setting] = config_get(setting) + missing = [] + [missing.append(s) for s, v in six.iteritems(conf) if v is None] + if missing: + log('Insufficient config data to configure hacluster.', level=ERROR) + raise HAIncompleteConfig + return conf + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers-pl/contrib/network/__init__.py b/hooks/charmhelpers-pl/contrib/network/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/network/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/network/ip.py b/hooks/charmhelpers-pl/contrib/network/ip.py new file mode 100644 index 0000000..fff6d5c --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/network/ip.py @@ -0,0 +1,450 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import glob +import re +import subprocess +import six +import socket + +from functools import partial + +from charmhelpers.core.hookenv import unit_get +from charmhelpers.fetch import apt_install +from charmhelpers.core.hookenv import ( + log, + WARNING, +) + +try: + import netifaces +except ImportError: + apt_install('python-netifaces') + import netifaces + +try: + import netaddr +except ImportError: + apt_install('python-netaddr') + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + +def get_address_in_network(network, fallback=None, fatal=False): + """Get an IPv4 or IPv6 address within the network from the host. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ + if network is None: + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + else: + return None + + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if network.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + + return None + + +def is_ipv6(address): + """Determine whether provided address is IPv6 or not.""" + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + if not addr['addr'].startswith('fe80'): + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] + + return None + + +get_iface_for_address = partial(_get_for_address, key='iface') + + +get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def format_ipv6_addr(address): + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. + """ + if is_ipv6(address): + return "[%s]" % address + + return None + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): + """Return the assigned IP address for a given interface, if any.""" + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + + if not exc_list: + exc_list = [] + + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception("Unknown inet type '%s'" % str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("Interface '%s' not found " % (iface)) + else: + return [] + + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) + + return sorted(addresses) + + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd).decode('UTF-8') + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' does not have a scope global " + "non-temporary ipv6 address." % iface) + + return [] + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] + + +def is_bridge_member(nic): + """Check if a given nic is a member of a bridge.""" + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + + return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install("python-dnspython") + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] diff --git a/hooks/charmhelpers-pl/contrib/network/ovs/__init__.py b/hooks/charmhelpers-pl/contrib/network/ovs/__init__.py new file mode 100644 index 0000000..77e2db7 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/network/ovs/__init__.py @@ -0,0 +1,96 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +''' Helpers for interacting with OpenvSwitch ''' +import subprocess +import os +from charmhelpers.core.hookenv import ( + log, WARNING +) +from charmhelpers.core.host import ( + service +) + + +def add_bridge(name): + ''' Add the named bridge to openvswitch ''' + log('Creating bridge {}'.format(name)) + subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name]) + + +def del_bridge(name): + ''' Delete the named bridge from openvswitch ''' + log('Deleting bridge {}'.format(name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name]) + + +def add_bridge_port(name, port, promisc=False): + ''' Add a port to the named openvswitch bridge ''' + log('Adding port {} to bridge {}'.format(port, name)) + subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port", + name, port]) + subprocess.check_call(["ip", "link", "set", port, "up"]) + if promisc: + subprocess.check_call(["ip", "link", "set", port, "promisc", "on"]) + else: + subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) + + +def del_bridge_port(name, port): + ''' Delete a port from the named openvswitch bridge ''' + log('Deleting port {} from bridge {}'.format(port, name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port", + name, port]) + subprocess.check_call(["ip", "link", "set", port, "down"]) + subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) + + +def set_manager(manager): + ''' Set the controller for the local openvswitch ''' + log('Setting manager for local ovs to {}'.format(manager)) + subprocess.check_call(['ovs-vsctl', 'set-manager', + 'ssl:{}'.format(manager)]) + + +CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem' + + +def get_certificate(): + ''' Read openvswitch certificate from disk ''' + if os.path.exists(CERT_PATH): + log('Reading ovs certificate from {}'.format(CERT_PATH)) + with open(CERT_PATH, 'r') as cert: + full_cert = cert.read() + begin_marker = "-----BEGIN CERTIFICATE-----" + end_marker = "-----END CERTIFICATE-----" + begin_index = full_cert.find(begin_marker) + end_index = full_cert.rfind(end_marker) + if end_index == -1 or begin_index == -1: + raise RuntimeError("Certificate does not contain valid begin" + " and end markers.") + full_cert = full_cert[begin_index:(end_index + len(end_marker))] + return full_cert + else: + log('Certificate not found', level=WARNING) + return None + + +def full_restart(): + ''' Full restart and reload of openvswitch ''' + if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'): + service('start', 'openvswitch-force-reload-kmod') + else: + service('force-reload-kmod', 'openvswitch-switch') diff --git a/hooks/charmhelpers-pl/contrib/openstack/__init__.py b/hooks/charmhelpers-pl/contrib/openstack/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/openstack/alternatives.py b/hooks/charmhelpers-pl/contrib/openstack/alternatives.py new file mode 100644 index 0000000..ef77caf --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/alternatives.py @@ -0,0 +1,33 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +''' Helper for managing alternatives for file conflict resolution ''' + +import subprocess +import shutil +import os + + +def install_alternative(name, target, source, priority=50): + ''' Install alternative configuration ''' + if (os.path.exists(target) and not os.path.islink(target)): + # Move existing file/directory away before installing + shutil.move(target, '{}.bak'.format(target)) + cmd = [ + 'update-alternatives', '--force', '--install', + target, name, source, str(priority) + ] + subprocess.check_call(cmd) diff --git a/hooks/charmhelpers-pl/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers-pl/contrib/openstack/amulet/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers-pl/contrib/openstack/amulet/deployment.py new file mode 100644 index 0000000..461a702 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/amulet/deployment.py @@ -0,0 +1,146 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, stable=True): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come + # out. + self.current_next = "trusty" + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb'] + + if self.series in ['precise', 'trusty']: + base_series = self.series + else: + base_series = self.current_next + + if self.stable: + for svc in other_services: + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}/{}' + svc['location'] = temp.format(base_series, + svc['name']) + else: + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) + return other_services + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin/source.""" + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + ignore: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in ignore: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, + self.trusty_kilo, self.vivid_kilo) = range(10) + + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('utopic', None): self.utopic_juno, + ('vivid', None): self.vivid_kilo} + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] diff --git a/hooks/charmhelpers-pl/contrib/openstack/amulet/utils.py b/hooks/charmhelpers-pl/contrib/openstack/amulet/utils.py new file mode 100644 index 0000000..9c3d918 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/amulet/utils.py @@ -0,0 +1,294 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +import six + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True diff --git a/hooks/charmhelpers-pl/contrib/openstack/context.py b/hooks/charmhelpers-pl/contrib/openstack/context.py new file mode 100644 index 0000000..76ffb27 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/context.py @@ -0,0 +1,1343 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import json +import os +import re +import time +from base64 import b64decode +from subprocess import check_call + +import six +import yaml + +from charmhelpers.fetch import ( + apt_install, + filter_installed_packages, +) +from charmhelpers.core.hookenv import ( + config, + is_relation_made, + local_unit, + log, + relation_get, + relation_ids, + related_units, + relation_set, + unit_get, + unit_private_ip, + charm_name, + DEBUG, + INFO, + WARNING, + ERROR, +) + +from charmhelpers.core.sysctl import create as sysctl_create +from charmhelpers.core.strutils import bool_from_string + +from charmhelpers.core.host import ( + list_nics, + get_nic_hwaddr, + mkdir, + write_file, +) +from charmhelpers.contrib.hahelpers.cluster import ( + determine_apache_port, + determine_api_port, + https, + is_clustered, +) +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, + install_ca_cert, +) +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, + parse_data_port_mappings, +) +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + INTERNAL, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv4_addr, + get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_address_in_network, + is_bridge_member, +) +from charmhelpers.contrib.openstack.utils import get_host_ip +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] + + +class OSContextError(Exception): + pass + + +def ensure_packages(packages): + """Install but do not upgrade required plugin packages.""" + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + +def context_complete(ctxt): + _missing = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + _missing.append(k) + + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level=INFO) + return False + + return True + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + {'key1': 'value1', + 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + {'key1', 'value1', + 'key2', 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + {'key1', 'subkey1=value1, subkey2=value2'} + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return yaml.safe_load(config_flags) + + if config_flags.find('==') >= 0: + log("config_flags is not in expected format (key=value)", level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = {} + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + log("Invalid config value(s) at index %s" % (i), level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags + + +class OSContextGenerator(object): + """Base class for all context generators.""" + interfaces = [] + + def __call__(self): + raise NotImplementedError + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __init__(self, + database=None, user=None, relation_prefix=None, ssl_dir=None): + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ + self.relation_prefix = relation_prefix + self.database = database + self.user = user + self.ssl_dir = ssl_dir + + def __call__(self): + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) + raise OSContextError + + ctxt = {} + + # NOTE(jamespage) if mysql charm provides a network upon which + # access to the database should be made, reconfigure relation + # with the service units local address and defer execution + access_network = relation_get('access-network') + if access_network is not None: + if self.relation_prefix is not None: + hostname_key = "{}_hostname".format(self.relation_prefix) + else: + hostname_key = "hostname" + access_hostname = get_address_in_network(access_network, + unit_get('private-address')) + set_hostname = relation_get(attribute=hostname_key, + unit=local_unit()) + if set_hostname != access_hostname: + relation_set(relation_settings={hostname_key: access_hostname}) + return None # Defer any further hook execution for now.... + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + + for rid in relation_ids('shared-db'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host + ctxt = { + 'database_host': host, + 'database': self.database, + 'database_user': self.user, + 'database_password': rdata.get(password_setting), + 'database_type': 'mysql' + } + if context_complete(ctxt): + db_ssl(rdata, ctxt, self.ssl_dir) + return ctxt + return {} + + +class PostgresqlDBContext(OSContextGenerator): + interfaces = ['pgsql-db'] + + def __init__(self, database=None): + self.database = database + + def __call__(self): + self.database = self.database or config('database') + if self.database is None: + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.interfaces[0]): + for unit in related_units(rid): + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} + if context_complete(ctxt): + return ctxt + + return {} + + +def db_ssl(rdata, ctxt, ssl_dir): + if 'ssl_ca' in rdata and ssl_dir: + ca_path = os.path.join(ssl_dir, 'db-client.ca') + with open(ca_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_ca'])) + + ctxt['database_ssl_ca'] = ca_path + elif 'ssl_ca' in rdata: + log("Charm not setup for ssl support but ssl ca found", level=INFO) + return ctxt + + if 'ssl_cert' in rdata: + cert_path = os.path.join( + ssl_dir, 'db-client.cert') + if not os.path.exists(cert_path): + log("Waiting 1m for ssl client cert validity", level=INFO) + time.sleep(60) + + with open(cert_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_cert'])) + + ctxt['database_ssl_cert'] = cert_path + key_path = os.path.join(ssl_dir, 'db-client.key') + with open(key_path, 'w') as fh: + fh.write(b64decode(rdata['ssl_key'])) + + ctxt['database_ssl_key'] = key_path + + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + + def __init__(self, service=None, service_user=None, rel_name='identity-service'): + self.service = service + self.service_user = service_user + self.rel_name = rel_name + self.interfaces = [self.rel_name] + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + if self.service and self.service_user: + # This is required for pki token signing if we don't want /tmp to + # be used. + cachedir = '/var/cache/%s' % (self.service) + if not os.path.isdir(cachedir): + log("Creating service cache dir %s" % (cachedir), level=DEBUG) + mkdir(path=cachedir, owner=self.service_user, + group=self.service_user, perms=0o700) + + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + ctxt.update({'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol}) + + if context_complete(ctxt): + # NOTE(jamespage) this is required for >= icehouse + # so a missing value just indicates keystone needs + # upgrading + ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + return ctxt + + return {} + + +class AMQPContext(OSContextGenerator): + + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): + self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] + + def __call__(self): + log('Generating template context for amqp', level=DEBUG) + conf = config() + if self.relation_prefix: + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + + try: + username = conf[user_setting] + vhost = conf[vhost_setting] + except KeyError as e: + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.rel_name): + ha_vip_only = False + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + ctxt['clustered'] = True + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip + else: + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host + + ctxt.update({ + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + }) + + ssl_port = relation_get('ssl_port', rid=rid, unit=unit) + if ssl_port: + ctxt['rabbit_ssl_port'] = ssl_port + + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) + if ssl_ca: + ctxt['rabbit_ssl_ca'] = ssl_ca + + if relation_get('ha_queues', rid=rid, unit=unit) is not None: + ctxt['rabbitmq_ha_queues'] = True + + ha_vip_only = relation_get('ha-vip-only', + rid=rid, unit=unit) is not None + + if context_complete(ctxt): + if 'rabbit_ssl_ca' in ctxt: + if not self.ssl_dir: + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) + break + + ca_path = os.path.join( + self.ssl_dir, 'rabbit-client-ca.pem') + with open(ca_path, 'w') as fh: + fh.write(b64decode(ctxt['rabbit_ssl_ca'])) + ctxt['rabbit_ssl_ca'] = ca_path + + # Sufficient information found = break out! + break + + # Used for active/active rabbitmq >= grizzly + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): + rabbitmq_hosts = [] + for unit in related_units(rid): + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) + + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + + oslo_messaging_flags = conf.get('oslo-messaging-flags', None) + if oslo_messaging_flags: + ctxt['oslo_messaging_flags'] = config_flags_parser( + oslo_messaging_flags) + + if not context_complete(ctxt): + return {} + + return ctxt + + +class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" + interfaces = ['ceph'] + + def __call__(self): + if not relation_ids('ceph'): + return {} + + log('Generating template context for ceph', level=DEBUG) + mon_hosts = [] + auth = None + key = None + use_syslog = str(config('use-syslog')).lower() + for rid in relation_ids('ceph'): + for unit in related_units(rid): + auth = relation_get('auth', rid=rid, unit=unit) + key = relation_get('key', rid=rid, unit=unit) + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr + mon_hosts.append(ceph_addr) + + ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), + 'auth': auth, + 'key': key, + 'use_syslog': use_syslog} + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + if not context_complete(ctxt): + return {} + + ensure_packages(['ceph-common']) + return ctxt + + +class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + """ + interfaces = ['cluster'] + + def __init__(self, singlenode_mode=False): + self.singlenode_mode = singlenode_mode + + def __call__(self): + if not relation_ids('cluster') and not self.singlenode_mode: + return {} + + if config('prefer-ipv6'): + addr = get_ipv6_addr(exc_list=[config('vip')])[0] + else: + addr = get_host_ip(unit_get('private-address')) + + l_unit = local_unit().replace('/', '-') + cluster_hosts = {} + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in ADDRESS_TYPES: + cfg_opt = 'os-{}-network'.format(addr_type) + laddr = get_address_in_network(config(cfg_opt)) + if laddr: + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, + netmask), + 'backends': {l_unit: laddr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) add backend based on private address - this + # with either be the only backend or the fallback if no acls + # match in the frontend + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr + + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } + + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + + if config('prefer-ipv6'): + ctxt['ipv6'] = True + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + ctxt['stat_port'] = ':::8888' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + ctxt['stat_port'] = ':8888' + + for frontend in cluster_hosts: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + + return ctxt + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) + return {} + + +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) + rids = relation_ids('image-service') + if not rids: + return {} + + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) + return {} + + +class ApacheSSLContext(OSContextGenerator): + """Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self, cn=None): + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert)) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key)) + + def configure_ca(self): + ca_cert = get_ca_cert() + if ca_cert: + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + """Figure out which canonical names clients will access this service. + """ + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + if config('vip'): + vips = config('vip').split() + else: + vips = [] + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level=WARNING) + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return sorted(addresses) + + def __call__(self): + if isinstance(self.external_ports, six.string_types): + self.external_ports = [self.external_ports] + + if not self.external_ports or not https(): + return {} + + self.configure_ca() + self.enable_modules() + + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} + + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + cn = resolve_address(endpoint_type=INTERNAL) + self.configure_cert(cn) + + addresses = self.get_network_addresses() + for address, endpoint in sorted(set(addresses)): + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) + return ctxt + + +class NeutronContext(OSContextGenerator): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + for pkgs in self.packages: + ensure_packages(pkgs) + + def _save_flag_file(self): + if self.network_manager == 'quantum': + _file = '/etc/nova/quantum_plugin.conf' + else: + _file = '/etc/nova/neutron_plugin.conf' + + with open(_file, 'wb') as out: + out.write(self.plugin + '\n') + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return ovs_ctxt + + def nuage_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nuage_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'vsp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nuage_ctxt + + def nvp_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nvp_ctxt + + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags + + return n1kv_ctxt + + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return ovs_ctxt + + def neutron_ctxt(self): + if https(): + proto = 'https' + else: + proto = 'http' + + if is_clustered(): + host = config('vip') + else: + host = unit_get('private-address') + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} + return ctxt + + def __call__(self): + self._ensure_packages() + + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = self.neutron_ctxt() + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + elif self.plugin in ['nvp', 'nsx']: + ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) + elif self.plugin == 'vsp': + ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) + + alchemy_flags = config('neutron-alchemy-flags') + if alchemy_flags: + flags = config_flags_parser(alchemy_flags) + ctxt['neutron_alchemy_flags'] = flags + + self._save_flag_file() + return ctxt + + +class NeutronPortContext(OSContextGenerator): + NIC_PREFIXES = ['eth', 'bond'] + + def resolve_ports(self, ports): + """Resolve NICs not yet bound to bridge(s) + + If hwaddress provided then returns resolved hwaddress otherwise NIC. + """ + if not ports: + return None + + hwaddr_to_nic = {} + hwaddr_to_ip = {} + for nic in list_nics(self.NIC_PREFIXES): + hwaddr = get_nic_hwaddr(nic) + hwaddr_to_nic[hwaddr] = nic + addresses = get_ipv4_addr(nic, fatal=False) + addresses += get_ipv6_addr(iface=nic, fatal=False) + hwaddr_to_ip[hwaddr] = addresses + + resolved = [] + mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) + for entry in ports: + if re.match(mac_regex, entry): + # NIC is in known NICs and does NOT hace an IP address + if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: + # If the nic is part of a bridge then don't use it + if is_bridge_member(hwaddr_to_nic[entry]): + continue + + # Entry is a MAC address for a valid interface that doesn't + # have an IP address assigned yet. + resolved.append(hwaddr_to_nic[entry]) + else: + # If the passed entry is not a MAC address, assume it's a valid + # interface, and that the user put it there on purpose (we can + # trust it to be the real external network). + resolved.append(entry) + + return resolved + + +class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. + + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. + + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ + + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + + def __call__(self): + config_flags = config(self._charm_flag) + if not config_flags: + return {} + + return {self._template_flag: + config_flags_parser(config_flags)} + + +class SubordinateConfigContext(OSContextGenerator): + + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple serivces. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json:: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as:: + + ctxt = { + ... other context ... + 'subordinate_config': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + """ + + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.service = service + self.config_file = config_file + self.interface = interface + + def __call__(self): + ctxt = {'sections': {}} + for rid in relation_ids(self.interface): + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except: + log('Could not parse JSON from subordinate_config ' + 'setting from %s' % rid, level=ERROR) + continue + + if self.service not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s service' % (rid, self.service), + level=INFO) + continue + + sub_config = sub_config[self.service] + if self.config_file not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s' % (rid, self.config_file), + level=INFO) + continue + + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_dict in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + ctxt[k][section] = config_dict + else: + ctxt[k] = v + + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + + +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + + return ctxt + + +class SyslogContext(OSContextGenerator): + + def __call__(self): + ctxt = {'use_syslog': config('use-syslog')} + return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return {'bind_host': '::'} + else: + return {'bind_host': '0.0.0.0'} + + +class WorkerConfigContext(OSContextGenerator): + + @property + def num_cpus(self): + try: + from psutil import NUM_CPUS + except ImportError: + apt_install('python-psutil', fatal=True) + from psutil import NUM_CPUS + + return NUM_CPUS + + def __call__(self): + multiplier = config('worker-multiplier') or 0 + ctxt = {"workers": self.num_cpus * multiplier} + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + + return ctxt + + +class SysctlContext(OSContextGenerator): + """This context check if the 'sysctl' option exists on configuration + then creates a file with the loaded contents""" + def __call__(self): + sysctl_dict = config('sysctl') + if sysctl_dict: + sysctl_create(sysctl_dict, + '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) + return {'sysctl': sysctl_dict} + + +class NeutronAPIContext(OSContextGenerator): + ''' + Inspects current neutron-plugin-api relation for neutron settings. Return + defaults if it is not present. + ''' + interfaces = ['neutron-plugin-api'] + + def __call__(self): + self.neutron_defaults = { + 'l2_population': { + 'rel_key': 'l2-population', + 'default': False, + }, + 'overlay_network_type': { + 'rel_key': 'overlay-network-type', + 'default': 'gre', + }, + 'neutron_security_groups': { + 'rel_key': 'neutron-security-groups', + 'default': False, + }, + 'network_device_mtu': { + 'rel_key': 'network-device-mtu', + 'default': None, + }, + 'enable_dvr': { + 'rel_key': 'enable-dvr', + 'default': False, + }, + 'enable_l3ha': { + 'rel_key': 'enable-l3ha', + 'default': False, + }, + } + ctxt = self.get_neutron_options({}) + for rid in relation_ids('neutron-plugin-api'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if 'l2-population' in rdata: + ctxt.update(self.get_neutron_options(rdata)) + + return ctxt + + def get_neutron_options(self, rdata): + settings = {} + for nkey in self.neutron_defaults.keys(): + defv = self.neutron_defaults[nkey]['default'] + rkey = self.neutron_defaults[nkey]['rel_key'] + if rkey in rdata.keys(): + if type(defv) is bool: + settings[nkey] = bool_from_string(rdata[rkey]) + else: + settings[nkey] = rdata[rkey] + else: + settings[nkey] = defv + return settings + + +class ExternalPortContext(NeutronPortContext): + + def __call__(self): + ctxt = {} + ports = config('ext-port') + if ports: + ports = [p.strip() for p in ports.split()] + ports = self.resolve_ports(ports) + if ports: + ctxt = {"ext_port": ports[0]} + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt['ext_port_mtu'] = mtu + + return ctxt + + +class DataPortContext(NeutronPortContext): + + def __call__(self): + ports = config('data-port') + if ports: + portmap = parse_data_port_mappings(ports) + ports = portmap.values() + resolved = self.resolve_ports(ports) + normalized = {get_nic_hwaddr(port): port for port in resolved + if port not in ports} + normalized.update({port: port for port in resolved + if port in ports}) + if resolved: + return {bridge: normalized[port] for bridge, port in + six.iteritems(portmap) if port in normalized.keys()} + + return None + + +class PhyNICMTUContext(DataPortContext): + + def __call__(self): + ctxt = {} + mappings = super(PhyNICMTUContext, self).__call__() + if mappings and mappings.values(): + ports = mappings.values() + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt["devs"] = '\\n'.join(ports) + ctxt['mtu'] = mtu + + return ctxt + + +class NetworkServiceContext(OSContextGenerator): + + def __init__(self, rel_name='quantum-network-service'): + self.rel_name = rel_name + self.interfaces = [rel_name] + + def __call__(self): + for rid in relation_ids(self.rel_name): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + ctxt = { + 'keystone_host': rdata.get('keystone_host'), + 'service_port': rdata.get('service_port'), + 'auth_port': rdata.get('auth_port'), + 'service_tenant': rdata.get('service_tenant'), + 'service_username': rdata.get('service_username'), + 'service_password': rdata.get('service_password'), + 'quantum_host': rdata.get('quantum_host'), + 'quantum_port': rdata.get('quantum_port'), + 'quantum_url': rdata.get('quantum_url'), + 'region': rdata.get('region'), + 'service_protocol': + rdata.get('service_protocol') or 'http', + 'auth_protocol': + rdata.get('auth_protocol') or 'http', + } + if context_complete(ctxt): + return ctxt + return {} diff --git a/hooks/charmhelpers-pl/contrib/openstack/files/__init__.py b/hooks/charmhelpers-pl/contrib/openstack/files/__init__.py new file mode 100644 index 0000000..7587679 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/files/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy.sh new file mode 100755 index 0000000..eb8527f --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy.sh @@ -0,0 +1,32 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +CRITICAL=0 +NOTACTIVE='' +LOGFILE=/var/log/nagios/check_haproxy.log +AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') + +for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); +do + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') + if [ $? != 0 ]; then + date >> $LOGFILE + echo $output >> $LOGFILE + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 + CRITICAL=1 + NOTACTIVE="${NOTACTIVE} $appserver" + fi +done + +if [ $CRITICAL = 1 ]; then + echo "CRITICAL:${NOTACTIVE}" + exit 2 +fi + +echo "OK: All haproxy instances looking good" +exit 0 diff --git a/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy_queue_depth.sh b/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy_queue_depth.sh new file mode 100755 index 0000000..3ebb532 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy_queue_depth.sh @@ -0,0 +1,30 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +# These should be config options at some stage +CURRQthrsh=0 +MAXQthrsh=100 + +AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') + +HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) + +for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') +do + CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) + MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) + + if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then + echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" + exit 2 + fi +done + +echo "OK: All haproxy queue depths looking good" +exit 0 + diff --git a/hooks/charmhelpers-pl/contrib/openstack/ip.py b/hooks/charmhelpers-pl/contrib/openstack/ip.py new file mode 100644 index 0000000..29bbddc --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/ip.py @@ -0,0 +1,146 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.core.hookenv import ( + config, + unit_get, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, +) +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +from functools import partial + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' + +ADDRESS_MAP = { + PUBLIC: { + 'config': 'os-public-network', + 'fallback': 'public-address' + }, + INTERNAL: { + 'config': 'os-internal-network', + 'fallback': 'private-address' + }, + ADMIN: { + 'config': 'os-admin-network', + 'fallback': 'private-address' + } +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + """Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + return '%s://%s' % (scheme, address) + + +def resolve_address(endpoint_type=PUBLIC): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured. + + :param endpoint_type: Network endpoing type + """ + resolved_address = None + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + clustered = is_clustered() + if clustered: + if not net_addr: + # If no net-splits defined, we expect a single vip + resolved_address = vips[0] + else: + for vip in vips: + if is_address_in_network(net_addr, vip): + resolved_address = vip + break + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr(exc_list=vips)[0] + else: + fallback_addr = unit_get(net_fallback) + + resolved_address = get_address_in_network(net_addr, fallback_addr) + + if resolved_address is None: + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address + + +def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC, + override=None): + """Returns the correct endpoint URL to advertise to Keystone. + + This method provides the correct endpoint URL which should be advertised to + the keystone charm for endpoint creation. This method allows for the url to + be overridden to force a keystone endpoint to have specific URL for any of + the defined scopes (admin, internal, public). + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param url_template: str format string for creating the url template. Only + two values will be passed - the scheme+hostname + returned by the canonical_url and the port. + :param endpoint_type: str endpoint type to resolve. + :param override: str the name of the config option which overrides the + endpoint URL defined by the charm itself. None will + disable any overrides (default). + """ + if override: + # Return any user-defined overrides for the keystone endpoint URL. + user_value = config(override) + if user_value: + return user_value.strip() + + return url_template % (canonical_url(configs, endpoint_type), port) + + +public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC) + +internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL) + +admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN) diff --git a/hooks/charmhelpers-pl/contrib/openstack/neutron.py b/hooks/charmhelpers-pl/contrib/openstack/neutron.py new file mode 100644 index 0000000..fa036e4 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/neutron.py @@ -0,0 +1,337 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Various utilies for dealing with Neutron and the renaming from Quantum. + +import six +from subprocess import check_output + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import os_release + + +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + return 'linux-headers-%s' % kver + +QUANTUM_CONF_DIR = '/etc/quantum' + + +def kernel_version(): + """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + kver = kver.split('.') + return (int(kver[0]), int(kver[1])) + + +def determine_dkms_package(): + """ Determine which DKMS package should be used based on kernel version """ + # NOTE: 3.13 kernels have support for GRE and VXLAN native + if kernel_version() >= (3, 13): + return [] + else: + return ['openvswitch-datapath-dkms'] + + +# legacy + + +def quantum_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=QUANTUM_CONF_DIR)], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['quantum-plugin-openvswitch-agent']], + 'server_packages': ['quantum-server', + 'quantum-plugin-openvswitch'], + 'server_services': ['quantum-server'] + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=QUANTUM_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['quantum-server', + 'quantum-plugin-nicira'], + 'server_services': ['quantum-server'] + } + } + +NEUTRON_CONF_DIR = '/etc/neutron' + + +def neutron_plugins(): + from charmhelpers.contrib.openstack import context + release = os_release('nova-common') + plugins = { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-openvswitch-agent']], + 'server_packages': ['neutron-server', + 'neutron-plugin-openvswitch'], + 'server_services': ['neutron-server'] + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-nicira'], + 'server_services': ['neutron-server'] + }, + 'nsx': { + 'config': '/etc/neutron/plugins/vmware/nsx.ini', + 'driver': 'vmware', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-vmware'], + 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata']], + 'server_packages': ['neutron-server', 'calico-control'], + 'server_services': ['neutron-server'] + }, + 'vsp': { + 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', + 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], + 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['plumgrid-lxc'], + ['iovisor-dkms'], + ['plumgrid-puppet']], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] + } + } + if release >= 'icehouse': + # NOTE: patch in ml2 plugin for icehouse onwards + plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['ovs']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + # NOTE: patch in vmware renames nvp->nsx for icehouse onwards + plugins['nvp'] = plugins['nsx'] + return plugins + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise Exception + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise Exception + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' + + +def parse_mappings(mappings): + parsed = {} + if mappings: + mappings = mappings.split(' ') + for m in mappings: + p = m.partition(':') + if p[1] == ':': + parsed[p[0].strip()] = p[2].strip() + + return parsed + + +def parse_bridge_mappings(mappings): + """Parse bridge mappings. + + Mappings must be a space-delimited list of provider:bridge mappings. + + Returns dict of the form {provider:bridge}. + """ + return parse_mappings(mappings) + + +def parse_data_port_mappings(mappings, default_bridge='br-data'): + """Parse data port mappings. + + Mappings must be a space-delimited list of bridge:port mappings. + + Returns dict of the form {bridge:port}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + if not mappings: + return {} + + # For backwards-compatibility we need to support port-only provided in + # config. + _mappings = {default_bridge: mappings.split(' ')[0]} + + bridges = _mappings.keys() + ports = _mappings.values() + if len(set(bridges)) != len(bridges): + raise Exception("It is not allowed to have more than one port " + "configured on the same bridge") + + if len(set(ports)) != len(ports): + raise Exception("It is not allowed to have the same port configured " + "on more than one bridge") + + return _mappings + + +def parse_vlan_range_mappings(mappings): + """Parse vlan range mappings. + + Mappings must be a space-delimited list of provider:start:end mappings. + + Returns dict of the form {provider: (start, end)}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + return {} + + mappings = {} + for p, r in six.iteritems(_mappings): + mappings[p] = tuple(r.split(':')) + + return mappings diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/__init__.py b/hooks/charmhelpers-pl/contrib/openstack/templates/__init__.py new file mode 100644 index 0000000..7587679 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers-pl/contrib/openstack/templates/ceph.conf new file mode 100644 index 0000000..81a9719 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/ceph.conf @@ -0,0 +1,15 @@ +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +[global] +{% if auth -%} + auth_supported = {{ auth }} + keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} +{% endif -%} + log to syslog = {{ use_syslog }} + err to syslog = {{ use_syslog }} + clog to syslog = {{ use_syslog }} + diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/git.upstart b/hooks/charmhelpers-pl/contrib/openstack/templates/git.upstart new file mode 100644 index 0000000..4bed404 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/git.upstart @@ -0,0 +1,17 @@ +description "{{ service_description }}" +author "Juju {{ service_name }} Charm " + +start on runlevel [2345] +stop on runlevel [!2345] + +respawn + +exec start-stop-daemon --start --chuid {{ user_name }} \ + --chdir {{ start_dir }} --name {{ process_name }} \ + --exec {{ executable_name }} -- \ + {% for config_file in config_files -%} + --config-file={{ config_file }} \ + {% endfor -%} + {% if log_file -%} + --log-file={{ log_file }} + {% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers-pl/contrib/openstack/templates/haproxy.cfg new file mode 100644 index 0000000..ad875f1 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/haproxy.cfg @@ -0,0 +1,58 @@ +global + log {{ local_host }} local0 + log {{ local_host }} local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + +defaults + log global + mode tcp + option tcplog + option dontlognull + retries 3 + timeout queue 1000 + timeout connect 1000 +{% if haproxy_client_timeout -%} + timeout client {{ haproxy_client_timeout }} +{% else -%} + timeout client 30000 +{% endif -%} + +{% if haproxy_server_timeout -%} + timeout server {{ haproxy_server_timeout }} +{% else -%} + timeout server 30000 +{% endif -%} + +listen stats {{ stat_port }} + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:password + +{% if frontends -%} +{% for service, ports in service_ports.items() -%} +frontend tcp-in_{{ service }} + bind *:{{ ports[0] }} + {% if ipv6 -%} + bind :::{{ ports[0] }} + {% endif -%} + {% for frontend in frontends -%} + acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} + use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} + {% endfor -%} + default_backend {{ service }}_{{ default_backend }} + +{% for frontend in frontends -%} +backend {{ service }}_{{ frontend }} + balance leastconn + {% for unit, address in frontends[frontend]['backends'].items() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor -%} +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend new file mode 100644 index 0000000..ce28fa3 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend @@ -0,0 +1,24 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend.conf new file mode 100644 index 0000000..ce28fa3 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend.conf @@ -0,0 +1,24 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + +{% endfor -%} + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers-pl/contrib/openstack/templates/section-keystone-authtoken new file mode 100644 index 0000000..2a37edd --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/section-keystone-authtoken @@ -0,0 +1,9 @@ +{% if auth_host -%} +[keystone_authtoken] +identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} +admin_tenant_name = {{ admin_tenant_name }} +admin_user = {{ admin_user }} +admin_password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/section-rabbitmq-oslo b/hooks/charmhelpers-pl/contrib/openstack/templates/section-rabbitmq-oslo new file mode 100644 index 0000000..b444c9c --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/section-rabbitmq-oslo @@ -0,0 +1,22 @@ +{% if rabbitmq_host or rabbitmq_hosts -%} +[oslo_messaging_rabbit] +rabbit_userid = {{ rabbitmq_user }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +rabbit_password = {{ rabbitmq_password }} +{% if rabbitmq_hosts -%} +rabbit_hosts = {{ rabbitmq_hosts }} +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +rabbit_durable_queues = False +{% endif -%} +{% else -%} +rabbit_host = {{ rabbitmq_host }} +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +rabbit_port = {{ rabbit_ssl_port }} +{% if rabbit_ssl_ca -%} +kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} +{% endif -%} +{% endif -%} +{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/section-zeromq b/hooks/charmhelpers-pl/contrib/openstack/templates/section-zeromq new file mode 100644 index 0000000..95f1a76 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templates/section-zeromq @@ -0,0 +1,14 @@ +{% if zmq_host -%} +# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) +rpc_backend = zmq +rpc_zmq_host = {{ zmq_host }} +{% if zmq_redis_address -%} +rpc_zmq_matchmaker = redis +matchmaker_heartbeat_freq = 15 +matchmaker_heartbeat_ttl = 30 +[matchmaker_redis] +host = {{ zmq_redis_address }} +{% else -%} +rpc_zmq_matchmaker = ring +{% endif -%} +{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templating.py b/hooks/charmhelpers-pl/contrib/openstack/templating.py new file mode 100644 index 0000000..24cb272 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/templating.py @@ -0,0 +1,295 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +import six + +from charmhelpers.fetch import apt_install +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO +) +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + # python-jinja2 may not be installed yet, or we're running unittests. + FileSystemLoader = ChoiceLoader = Environment = exceptions = None + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg:: + + hooks/charmhelpers/contrib/openstack/templates + + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in six.itervalues(OPENSTACK_CODENAMES)] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=INFO) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + def __init__(self, config_file, contexts): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have atisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage:: + + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + **OpenStack Releases and template loading** + + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + For the example above, '/tmp/templates' contains the following structure:: + + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + **Context generators** + + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + apt_install('python-jinja2') + + def register(self, config_file, contexts): + """ + Register a config file with a list of context generators to be called + during rendering. + """ + self.templates[config_file] = OSConfigTemplate(config_file=config_file, + contexts=contexts) + log('Registered config file: %s' % config_file, level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from %s' % template.filename, level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + ctxt = self.templates[config_file].context() + + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking for it + # using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from %s by %s or %s.' % + (self.templates_dir, os.path.basename(config_file), _tmpl), + level=ERROR) + raise e + + log('Rendering from template: %s' % _tmpl, level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in six.iterkeys(self.templates)] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in six.itervalues(self.templates)] + return interfaces diff --git a/hooks/charmhelpers-pl/contrib/openstack/utils.py b/hooks/charmhelpers-pl/contrib/openstack/utils.py new file mode 100644 index 0000000..f90a028 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/openstack/utils.py @@ -0,0 +1,642 @@ +#!/usr/bin/python + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict +from functools import wraps + +import subprocess +import json +import os +import sys + +import six +import yaml + +from charmhelpers.contrib.network import ip + +from charmhelpers.core import ( + unitdata, +) + +from charmhelpers.core.hookenv import ( + config, + log as juju_log, + charm_dir, + INFO, + relation_ids, + relation_set +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr +) + +from charmhelpers.core.host import lsb_release, mounts, umount +from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.contrib.python.packages import pip_install +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), + ('2015.1', 'kilo'), +]) + +# The ugly duckling +SWIFT_CODENAMES = OrderedDict([ + ('1.4.3', 'diablo'), + ('1.4.8', 'essex'), + ('1.7.4', 'folsom'), + ('1.8.0', 'grizzly'), + ('1.7.7', 'grizzly'), + ('1.7.6', 'grizzly'), + ('1.10.0', 'havana'), + ('1.9.1', 'havana'), + ('1.9.0', 'havana'), + ('1.13.1', 'icehouse'), + ('1.13.0', 'icehouse'), + ('1.12.0', 'icehouse'), + ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), + ('2.1.0', 'juno'), + ('2.2.0', 'juno'), + ('2.2.1', 'kilo'), + ('2.2.2', 'kilo'), +]) + +DEFAULT_LOOPBACK_SIZE = '5G' + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in six.iteritems(OPENSTACK_CODENAMES): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename): + '''Determine OpenStack version number from codename.''' + for k, v in six.iteritems(OPENSTACK_CODENAMES): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + import apt_pkg as apt + + cache = apt_cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + + try: + if 'swift' in pkg.name: + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + +def import_key(keyid): + cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel == 'distro-proposed': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(DISTRO_PROPOSED % ubuntu_rel) + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def config_value_changed(option): + """ + Determine if config value changed since last call to this function. + """ + hook_data = unitdata.HookData() + with hook_data(): + db = unitdata.kv() + current = config(option) + saved = db.get(option) + db.set(option, current) + if saved is None: + return False + return current != saved + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + import apt_pkg as apt + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def git_install_requested(): + """ + Returns true if openstack-origin-git is specified. + """ + return config('openstack-origin-git') is not None + + +requirements_dir = None + + +def git_clone_and_install(projects_yaml, core_project): + """ + Clone/install all specified OpenStack repositories. + + The expected format of projects_yaml is: + repositories: + - {name: keystone, + repository: 'git://git.openstack.org/openstack/keystone.git', + branch: 'stable/icehouse'} + - {name: requirements, + repository: 'git://git.openstack.org/openstack/requirements.git', + branch: 'stable/icehouse'} + directory: /mnt/openstack-git + http_proxy: http://squid.internal:3128 + https_proxy: https://squid.internal:3128 + + The directory, http_proxy, and https_proxy keys are optional. + """ + global requirements_dir + parent_dir = '/mnt/openstack-git' + + if not projects_yaml: + return + + projects = yaml.load(projects_yaml) + _git_validate_projects_yaml(projects, core_project) + + old_environ = dict(os.environ) + + if 'http_proxy' in projects.keys(): + os.environ['http_proxy'] = projects['http_proxy'] + if 'https_proxy' in projects.keys(): + os.environ['https_proxy'] = projects['https_proxy'] + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + for p in projects['repositories']: + repo = p['repository'] + branch = p['branch'] + if p['name'] == 'requirements': + repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + update_requirements=False) + requirements_dir = repo_dir + else: + repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + update_requirements=True) + + os.environ = old_environ + + +def _git_validate_projects_yaml(projects, core_project): + """ + Validate the projects yaml. + """ + _git_ensure_key_exists('repositories', projects) + + for project in projects['repositories']: + _git_ensure_key_exists('name', project.keys()) + _git_ensure_key_exists('repository', project.keys()) + _git_ensure_key_exists('branch', project.keys()) + + if projects['repositories'][0]['name'] != 'requirements': + error_out('{} git repo must be specified first'.format('requirements')) + + if projects['repositories'][-1]['name'] != core_project: + error_out('{} git repo must be specified last'.format(core_project)) + + +def _git_ensure_key_exists(key, keys): + """ + Ensure that key exists in keys. + """ + if key not in keys: + error_out('openstack-origin-git key \'{}\' is missing'.format(key)) + + +def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): + """ + Clone and install a single git repository. + """ + dest_dir = os.path.join(parent_dir, os.path.basename(repo)) + + if not os.path.exists(parent_dir): + juju_log('Directory already exists at {}. ' + 'No need to create directory.'.format(parent_dir)) + os.mkdir(parent_dir) + + if not os.path.exists(dest_dir): + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch) + else: + repo_dir = dest_dir + + if update_requirements: + if not requirements_dir: + error_out('requirements repo must be cloned before ' + 'updating from global requirements.') + _git_update_requirements(repo_dir, requirements_dir) + + juju_log('Installing git repo from dir: {}'.format(repo_dir)) + pip_install(repo_dir) + + return repo_dir + + +def _git_update_requirements(package_dir, reqs_dir): + """ + Update from global requirements. + + Update an OpenStack git directory's requirements.txt and + test-requirements.txt from global-requirements.txt. + """ + orig_dir = os.getcwd() + os.chdir(reqs_dir) + cmd = ['python', 'update.py', package_dir] + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + package = os.path.basename(package_dir) + error_out("Error updating {} from global-requirements.txt".format(package)) + os.chdir(orig_dir) + + +def git_src_dir(projects_yaml, project): + """ + Return the directory where the specified project's source is located. + """ + parent_dir = '/mnt/openstack-git' + + if not projects_yaml: + return + + projects = yaml.load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + for p in projects['repositories']: + if p['name'] == project: + return os.path.join(parent_dir, os.path.basename(p['repository'])) + + return None diff --git a/hooks/charmhelpers-pl/contrib/python/__init__.py b/hooks/charmhelpers-pl/contrib/python/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/python/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/python/packages.py b/hooks/charmhelpers-pl/contrib/python/packages.py new file mode 100644 index 0000000..8659516 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/python/packages.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import log + +try: + from pip import main as pip_execute +except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as pip_execute + +__author__ = "Jorge Niedbalski " + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, **options): + """Install a requirements file """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, upgrade=False, **options): + """Install a python package""" + command = ["install"] + + available_options = ('proxy', 'src', 'log', "index-url", ) + for option in parse_options(options, available_options): + command.append(option) + + if upgrade: + command.append('--upgrade') + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) diff --git a/hooks/charmhelpers-pl/contrib/storage/__init__.py b/hooks/charmhelpers-pl/contrib/storage/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/storage/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/__init__.py b/hooks/charmhelpers-pl/contrib/storage/linux/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/storage/linux/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/ceph.py b/hooks/charmhelpers-pl/contrib/storage/linux/ceph.py new file mode 100644 index 0000000..31ea7f9 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/storage/linux/ceph.py @@ -0,0 +1,444 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os +import shutil +import json +import time + +from subprocess import ( + check_call, + check_output, + CalledProcessError, +) +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, +) +from charmhelpers.fetch import ( + apt_install, +) + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] + auth supported = {auth} + keyring = {keyring} + mon host = {mon_hosts} + log to syslog = {use_syslog} + err to syslog = {use_syslog} + clog to syslog = {use_syslog} +""" + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def get_osds(service): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ + version = ceph_version() + if version and version >= '0.56': + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None + + +def create_pool(service, name, replicas=3): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pgnum = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pgnum = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] + check_call(cmd) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', + str(replicas)] + check_call(cmd) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + """Create a new Ceph keyring containing key.""" + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('Ceph keyring exists at %s.' % keyring, level=WARNING) + return + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + """Query named relation 'ceph' to determine current nodes.""" + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']).decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +# TODO: re-use +def modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + log('Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + """ + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +def ceph_version(): + """Retrieve the local version of ceph.""" + if os.path.exists('/usr/bin/ceph'): + cmd = ['ceph', '-v'] + output = check_output(cmd).decode('US-ASCII') + output = output.split() + if len(output) > 3: + return output[2] + else: + return None + else: + return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + def __init__(self, api_version=1): + self.api_version = api_version + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3): + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count}) + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/loopback.py b/hooks/charmhelpers-pl/contrib/storage/linux/loopback.py new file mode 100644 index 0000000..c296f09 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/storage/linux/loopback.py @@ -0,0 +1,78 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +from subprocess import ( + check_call, + check_output, +) + +import six + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + devs = [d.strip().split(' ') for d in + check_output(cmd).splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in six.iteritems(loopback_devices()): + if f == file_path: + return d + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in six.iteritems(loopback_devices()): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/lvm.py b/hooks/charmhelpers-pl/contrib/storage/linux/lvm.py new file mode 100644 index 0000000..34b5f71 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/storage/linux/lvm.py @@ -0,0 +1,105 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for l in pvd: + l = l.decode('UTF-8') + if l.strip().startswith('VG Name'): + vg = ' '.join(l.strip().split()[2:]) + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/utils.py b/hooks/charmhelpers-pl/contrib/storage/linux/utils.py new file mode 100644 index 0000000..c8373b7 --- /dev/null +++ b/hooks/charmhelpers-pl/contrib/storage/linux/utils.py @@ -0,0 +1,70 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +from stat import S_ISBLK + +from subprocess import ( + check_call, + check_output, + call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + if not os.path.exists(path): + return False + return S_ISBLK(os.stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + # sometimes sgdisk exits non-zero; this is OK, dd will clean up + call(['sgdisk', '--zap-all', '--mbrtogpt', + '--clear', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') + gpt_end = int(dev_end.split()[0]) - 100 + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=1M', 'count=1']) + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + + +def is_device_mounted(device): + '''Given a device path, return True if that device is mounted, and False + if it isn't. + + :param device: str: Full path of the device to check. + :returns: boolean: True if the path represents a mounted device, False if + it doesn't. + ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) + out = check_output(['mount']).decode('UTF-8') + if is_partition: + return bool(re.search(device + r"\b", out)) + return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers-pl/core/__init__.py b/hooks/charmhelpers-pl/core/__init__.py new file mode 100644 index 0000000..d1400a0 --- /dev/null +++ b/hooks/charmhelpers-pl/core/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/core/decorators.py b/hooks/charmhelpers-pl/core/decorators.py new file mode 100644 index 0000000..bb05620 --- /dev/null +++ b/hooks/charmhelpers-pl/core/decorators.py @@ -0,0 +1,57 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/hooks/charmhelpers-pl/core/fstab.py b/hooks/charmhelpers-pl/core/fstab.py new file mode 100644 index 0000000..3056fba --- /dev/null +++ b/hooks/charmhelpers-pl/core/fstab.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import io +import os + +__author__ = 'Jorge Niedbalski R. ' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/hooks/charmhelpers-pl/core/hookenv.py b/hooks/charmhelpers-pl/core/hookenv.py new file mode 100644 index 0000000..fa6193f --- /dev/null +++ b/hooks/charmhelpers-pl/core/hookenv.py @@ -0,0 +1,667 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +from __future__ import print_function +from functools import wraps +import os +import json +import yaml +import subprocess +import sys +import errno +from subprocess import CalledProcessError + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + """The relation ID for the current relation hook""" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def service_name(): + """The name service group this unit belongs to""" + return local_unit().split('/')[0] + + +def hook_name(): + """The name of the currently executing hook""" + return os.path.basename(sys.argv[0]) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path): + self.load_previous() + + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + list(dict.keys(self)))) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + self._prev_dict = json.load(f) + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + if self._prev_dict: + for k, v in six.iteritems(self._prev_dict): + if k not in self: + self[k] = v + with open(self.path, 'w') as f: + json.dump(self, f) + + +@cached +def config(scope=None): + """Juju charm configuration""" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + if scope is not None: + return config_data + return Config(config_data) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in (list(relation_settings.items()) + list(kwargs.items())): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=True): + super(Hooks, self).__init__() + self._hooks = {} + self._config_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + """Return the root directory of the current charm""" + return os.environ.get('CHARM_DIR') + + +@cached +def action_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +def action_set(values): + """Sets the values to be returned after the action finishes""" + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def action_fail(message): + """Sets the action status to failed and sets the error message. + + The results set by action_set are preserved.""" + subprocess.check_call(['action-fail', message]) + + +def status_set(workload_state, message): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message unstead. + + workload_state -- valid juju workload state. + message -- status update message + """ + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] + if workload_state not in valid_states: + raise ValueError( + '{!r} is not a valid workload state'.format(workload_state) + ) + cmd = ['status-set', workload_state, message] + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state + + If the status-set command is not found then assume this is juju < 1.23 and + return 'unknown' + """ + cmd = ['status-get'] + try: + raw_status = subprocess.check_output(cmd, universal_newlines=True) + status = raw_status.rstrip() + return status + except OSError as e: + if e.errno == errno.ENOENT: + return 'unknown' + else: + raise diff --git a/hooks/charmhelpers-pl/core/host.py b/hooks/charmhelpers-pl/core/host.py new file mode 100644 index 0000000..0d2ab4b --- /dev/null +++ b/hooks/charmhelpers-pl/core/host.py @@ -0,0 +1,450 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import os +import re +import pwd +import grp +import random +import string +import subprocess +import hashlib +from contextlib import contextmanager +from collections import OrderedDict + +import six + +from .hookenv import log +from .fstab import Fstab + + +def service_start(service_name): + """Start a system service""" + return service('start', service_name) + + +def service_stop(service_name): + """Stop a system service""" + return service('stop', service_name) + + +def service_restart(service_name): + """Restart a system service""" + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + """Reload a system service, optionally falling back to restart if + reload fails""" + service_result = service('reload', service_name) + if not service_result and restart_on_failure: + service_result = service('restart', service_name) + return service_result + + +def service(action, service_name): + """Control a system service""" + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def service_running(service): + """Determine whether a system service is running""" + try: + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user to the system""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_group(group_name, system_group=False): + """Add a group to the system""" + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + except KeyError: + log('creating group {0}'.format(group_name)) + cmd = ['addgroup'] + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab + """ + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file + """ + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + +def restart_on_change(restart_map, stopstart=False): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + """ + def wrap(f): + def wrapped_f(*args, **kwargs): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args, **kwargs) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + services_list = list(OrderedDict.fromkeys(restarts)) + if not stopstart: + for service_name in services_list: + service('restart', service_name) + else: + for action in ['stop', 'start']: + for service_name in services_list: + service(action, service_name) + return wrapped_f + return wrap + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def list_nics(nic_type): + '''Return a list of nics of given type(s)''' + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + interfaces = [] + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + '''Set MTU on a network interface''' + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +def cmp_pkgrevno(package, revno, pkgcache=None): + '''Compare supplied revno with the revno of the installed package + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + ''' + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + chownr(path, owner, group, follow_links=False) diff --git a/hooks/charmhelpers-pl/core/services/__init__.py b/hooks/charmhelpers-pl/core/services/__init__.py new file mode 100644 index 0000000..0928158 --- /dev/null +++ b/hooks/charmhelpers-pl/core/services/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hooks/charmhelpers-pl/core/services/base.py b/hooks/charmhelpers-pl/core/services/base.py new file mode 100644 index 0000000..2ff5338 --- /dev/null +++ b/hooks/charmhelpers-pl/core/services/base.py @@ -0,0 +1,329 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import re +import json +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hooks/charmhelpers-pl/core/services/helpers.py b/hooks/charmhelpers-pl/core/services/helpers.py new file mode 100644 index 0000000..3eb5fb4 --- /dev/null +++ b/hooks/charmhelpers-pl/core/services/helpers.py @@ -0,0 +1,267 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import yaml +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/hooks/charmhelpers-pl/core/strutils.py b/hooks/charmhelpers-pl/core/strutils.py new file mode 100644 index 0000000..a2a784a --- /dev/null +++ b/hooks/charmhelpers-pl/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/hooks/charmhelpers-pl/core/sysctl.py b/hooks/charmhelpers-pl/core/sysctl.py new file mode 100644 index 0000000..21cc8ab --- /dev/null +++ b/hooks/charmhelpers-pl/core/sysctl.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) + +__author__ = 'Jorge Niedbalski R. ' + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) diff --git a/hooks/charmhelpers-pl/core/templating.py b/hooks/charmhelpers-pl/core/templating.py new file mode 100644 index 0000000..4531999 --- /dev/null +++ b/hooks/charmhelpers-pl/core/templating.py @@ -0,0 +1,68 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8'): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/hooks/charmhelpers-pl/core/unitdata.py b/hooks/charmhelpers-pl/core/unitdata.py new file mode 100644 index 0000000..406a35c --- /dev/null +++ b/hooks/charmhelpers-pl/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/hooks/charmhelpers-pl/fetch/__init__.py b/hooks/charmhelpers-pl/fetch/__init__.py new file mode 100644 index 0000000..9a1a251 --- /dev/null +++ b/hooks/charmhelpers-pl/fetch/__init__.py @@ -0,0 +1,439 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import importlib +from tempfile import NamedTemporaryFile +import time +from yaml import safe_load +from charmhelpers.core.host import ( + lsb_release +) +import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) +import os + +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', +} + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', +) + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +class SourceConfigError(Exception): + pass + + +class UnhandledSource(Exception): + pass + + +class AptLockError(Exception): + pass + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_cache(in_memory=True): + """Build and return an apt cache""" + from apt import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages""" + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_hold(packages, fatal=False): + """Hold one or more packages""" + cmd = ['apt-mark', 'hold'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Holding {}".format(packages)) + + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ + if source is None: + log('Source is not present. Skipping') + return + + if (source.startswith('ppa:') or + source.startswith('http') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). + + Example config: + install_sources: | + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: | + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, six.string_types): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) + else: + if isinstance(keys, six.string_types): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) + if update: + apt_update(fatal=True) + + +def install_remote(source, *args, **kwargs): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + installed_to = None + for handler in handlers: + try: + installed_to = handler.install(source, *args, **kwargs) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr( + importlib.import_module(package), + classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format( + handler_name)) + return plugin_list + + +def _run_apt_command(cmd, fatal=False): + """ + Run an APT command, checking output and retrying if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/hooks/charmhelpers-pl/fetch/archiveurl.py b/hooks/charmhelpers-pl/fetch/archiveurl.py new file mode 100644 index 0000000..8dfce50 --- /dev/null +++ b/hooks/charmhelpers-pl/fetch/archiveurl.py @@ -0,0 +1,161 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import hashlib +import re + +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs + + +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ + # propogate all exceptions + # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse(source) + if proto in ('http', 'https'): + auth, barehost = splituser(netloc) + if auth is not None: + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) + try: + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except URLError as e: + raise UnhandledSource(e.reason) + except OSError as e: + raise UnhandledSource(e.strerror) + options = parse_qs(url_parts.fragment) + for key, value in options.items(): + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) diff --git a/hooks/charmhelpers-pl/fetch/bzrurl.py b/hooks/charmhelpers-pl/fetch/bzrurl.py new file mode 100644 index 0000000..3531315 --- /dev/null +++ b/hooks/charmhelpers-pl/fetch/bzrurl.py @@ -0,0 +1,78 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + +try: + from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-bzrlib") + from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors + + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp'): + return False + else: + return True + + def branch(self, source, dest): + url_parts = self.parse_url(source) + # If we use lp:branchname scheme we need to load plugins + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + if url_parts.scheme == "lp": + from bzrlib.plugin import load_plugins + load_plugins() + try: + local_branch = bzrdir.BzrDir.create_branch_convenience(dest) + except errors.AlreadyControlDirError: + local_branch = Branch.open(dest) + try: + remote_branch = Branch.open(source) + remote_branch.push(local_branch) + tree = workingtree.WorkingTree.open(dest) + tree.update() + except Exception as e: + raise e + + def install(self, source): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + try: + self.branch(source, dest_dir) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hooks/charmhelpers-pl/fetch/giturl.py b/hooks/charmhelpers-pl/fetch/giturl.py new file mode 100644 index 0000000..93aae87 --- /dev/null +++ b/hooks/charmhelpers-pl/fetch/giturl.py @@ -0,0 +1,71 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + +from git.exc import GitCommandError # noqa E402 + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master", dest=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + try: + self.clone(source, dest_dir, branch) + except GitCommandError as e: + raise UnhandledSource(e.message) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hooks/charmhelpers-pl/payload/__init__.py b/hooks/charmhelpers-pl/payload/__init__.py new file mode 100644 index 0000000..e6f4249 --- /dev/null +++ b/hooks/charmhelpers-pl/payload/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +"Tools for working with files injected into a charm just before deployment." diff --git a/hooks/charmhelpers-pl/payload/execd.py b/hooks/charmhelpers-pl/payload/execd.py new file mode 100644 index 0000000..4d4d81a --- /dev/null +++ b/hooks/charmhelpers-pl/payload/execd.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_call(submodule_path, shell=True, stderr=stderr) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hooks/config-changed b/hooks/config-changed new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/config-changed @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/install b/hooks/install new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/install @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-broken b/hooks/neutron-plugin-api-relation-broken new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/neutron-plugin-api-relation-broken @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-changed b/hooks/neutron-plugin-api-relation-changed new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/neutron-plugin-api-relation-changed @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-departed b/hooks/neutron-plugin-api-relation-departed new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/neutron-plugin-api-relation-departed @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-joined b/hooks/neutron-plugin-api-relation-joined new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/neutron-plugin-api-relation-joined @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/pg_dir_context.py b/hooks/pg_dir_context.py new file mode 100644 index 0000000..09a99f4 --- /dev/null +++ b/hooks/pg_dir_context.py @@ -0,0 +1,78 @@ +from charmhelpers.core.hookenv import ( + config, + unit_get, +) +from charmhelpers.contrib.openstack import context +from charmhelpers.contrib.openstack.utils import get_host_ip +from charmhelpers.contrib.network.ip import get_address_in_network + +import re +from socket import gethostname as get_unit_hostname + +''' +def _neutron_api_settings(): + neutron_settings = { + 'neutron_security_groups': False, + 'l2_population': True, + 'overlay_network_type': 'gre', + } + for rid in relation_ids('neutron-plugin-api'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if 'l2-population' not in rdata: + continue + neutron_settings = { + 'l2_population': rdata['l2-population'], + 'neutron_security_groups': rdata['neutron-security-groups'], + 'overlay_network_type': rdata['overlay-network-type'], + } + # Override with configuration if set to true + if config('disable-security-groups'): + neutron_settings['neutron_security_groups'] = False + return neutron_settings + return neutron_settings +''' + + +class PGDirContext(context.NeutronContext): + interfaces = [] + + @property + def plugin(self): + return 'plumgrid' + + @property + def network_manager(self): + return 'neutron' + + def _save_flag_file(self): + pass + + #@property + #def neutron_security_groups(self): + # neutron_api_settings = _neutron_api_settings() + # return neutron_api_settings['neutron_security_groups'] + + def pg_ctxt(self): + #Generated Config for all Plumgrid templates inside + #the templates folder + pg_ctxt = super(PGDirContext, self).pg_ctxt() + if not pg_ctxt: + return {} + + conf = config() + pg_ctxt['local_ip'] = \ + get_address_in_network(network=None, + fallback=get_host_ip(unit_get('private-address'))) + #neutron_api_settings = _neutron_api_settings() + + #TODO: Get this value from the neutron-api charm + pg_ctxt['virtual_ip'] = conf['plumgrid-virtual-ip'] + pg_ctxt['pg_hostname'] = "pg-director" + pg_ctxt['interface'] = "juju-br0" + pg_ctxt['label'] = get_unit_hostname() + pg_ctxt['fabric_mode'] = 'host' + virtual_ip_array = re.split('\.', conf['plumgrid-virtual-ip']) + pg_ctxt['virtual_router_id'] = virtual_ip_array[3] + + return pg_ctxt diff --git a/hooks/pg_dir_hooks.py b/hooks/pg_dir_hooks.py new file mode 100755 index 0000000..cb1a495 --- /dev/null +++ b/hooks/pg_dir_hooks.py @@ -0,0 +1,58 @@ +#!/usr/bin/python + +import sys + +from charmhelpers.core.hookenv import ( + Hooks, + UnregisteredHookError, + config, + log, + relation_set, +) + +from pg_dir_utils import ( + register_configs, + ensure_files, + restart_pg, + stop_pg, +) + +hooks = Hooks() +CONFIGS = register_configs() + + +@hooks.hook() +def install(): + ensure_files() + + +@hooks.hook('plumgrid-plugin-relation-joined') +def plumgrid_dir(): + ensure_files() + CONFIGS.write_all() + restart_pg() + + +@hooks.hook('plumgrid-relation-joined') +def plumgrid_joined(relation_id=None): + #We can pass information to the edge and gateway from there. + relation_data = { + 'pg_virtual_ip': config('plumgrid-virtual-ip'), + } + relation_set(relation_id=relation_id, **relation_data) + + +@hooks.hook('stop') +def stop(): + stop_pg() + + +def main(): + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) + + +if __name__ == '__main__': + main() diff --git a/hooks/pg_dir_utils.py b/hooks/pg_dir_utils.py new file mode 100644 index 0000000..0958690 --- /dev/null +++ b/hooks/pg_dir_utils.py @@ -0,0 +1,125 @@ +from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute +from copy import deepcopy +from charmhelpers.core.hookenv import log +from charmhelpers.contrib.openstack import templating +from collections import OrderedDict +from charmhelpers.contrib.openstack.utils import ( + os_release, +) +import pg_dir_context +import subprocess +import time + +#Dont need these right now +NOVA_CONF_DIR = "/etc/nova" +NEUTRON_CONF_DIR = "/etc/neutron" +NEUTRON_CONF = '%s/neutron.conf' % NEUTRON_CONF_DIR +NEUTRON_DEFAULT = '/etc/default/neutron-server' + +#Puppet Files +P_PGKA_CONF = '/opt/pg/etc/puppet/modules/sal/templates/keepalived.conf.erb' +P_PG_CONF = '/opt/pg/etc/puppet/modules/plumgrid/templates/plumgrid.conf.erb' +P_PGDEF_CONF = '/opt/pg/etc/puppet/modules/sal/templates/default.conf.erb' + +#Plumgrid Files +PGKA_CONF = '/var/lib/libvirt/filesystems/plumgrid/etc/keepalived/keepalived.conf' +PG_CONF = '/var/lib/libvirt/filesystems/plumgrid/opt/pg/etc/plumgrid.conf' +PGDEF_CONF = '/var/lib/libvirt/filesystems/plumgrid/opt/pg/sal/nginx/conf.d/default.conf' +PGHN_CONF = '/var/lib/libvirt/filesystems/plumgrid-data/conf/etc/hostname' +PGHS_CONF = '/var/lib/libvirt/filesystems/plumgrid-data/conf/etc/hosts' +PGIFCS_CONF = '/var/lib/libvirt/filesystems/plumgrid-data/conf/pg/ifcs.conf' +IFCTL_CONF = '/var/run/plumgrid/lxc/ifc_list_gateway' +IFCTL_P_CONF = '/var/lib/libvirt/filesystems/plumgrid/var/run/plumgrid/lxc/ifc_list_gateway' + +BASE_RESOURCE_MAP = OrderedDict([ + (PGKA_CONF, { + 'services': ['plumgrid'], + 'contexts': [pg_dir_context.PGDirContext()], + }), + (PG_CONF, { + 'services': ['plumgrid'], + 'contexts': [pg_dir_context.PGDirContext()], + }), + (PGDEF_CONF, { + 'services': ['plumgrid'], + 'contexts': [pg_dir_context.PGDirContext()], + }), + (PGHN_CONF, { + 'services': ['plumgrid'], + 'contexts': [pg_dir_context.PGDirContext()], + }), + (PGHS_CONF, { + 'services': ['plumgrid'], + 'contexts': [pg_dir_context.PGDirContext()], + }), + (PGIFCS_CONF, { + 'services': [], + 'contexts': [pg_dir_context.PGDirContext()], + }), +]) +TEMPLATES = 'templates/' + + +def determine_packages(): + return neutron_plugin_attribute('plumgrid', 'packages', 'neutron') + + +def register_configs(release=None): + release = release or os_release('neutron-common', base='icehouse') + configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, + openstack_release=release) + for cfg, rscs in resource_map().iteritems(): + configs.register(cfg, rscs['contexts']) + return configs + + +def resource_map(): + ''' + Dynamically generate a map of resources that will be managed for a single + hook execution. + ''' + resource_map = deepcopy(BASE_RESOURCE_MAP) + return resource_map + + +def restart_map(): + ''' + Constructs a restart map based on charm config settings and relation + state. + ''' + return {k: v['services'] for k, v in resource_map().iteritems()} + + +def ensure_files(): + _exec_cmd(cmd=['cp', '--remove-destination', '-f', P_PGKA_CONF, PGKA_CONF]) + _exec_cmd(cmd=['cp', '--remove-destination', '-f', P_PG_CONF, PG_CONF]) + _exec_cmd(cmd=['cp', '--remove-destination', '-f', P_PGDEF_CONF, PGDEF_CONF]) + _exec_cmd(cmd=['touch', PGIFCS_CONF]) + _exec_cmd(cmd=['mkdir', '/etc/nova']) + _exec_cmd(cmd=['touch', 'neutron_plugin.conf']) + + +def restart_pg(): + _exec_cmd(cmd=['virsh', '-c', 'lxc:', 'destroy', 'plumgrid'], error_msg='ERROR Destroying PLUMgrid') + _exec_cmd(cmd=['rm', IFCTL_CONF, IFCTL_P_CONF], error_msg='ERROR Removing ifc_ctl_gateway file') + _exec_cmd(cmd=['iptables', '-F']) + _exec_cmd(cmd=['virsh', '-c', 'lxc:', 'start', 'plumgrid'], error_msg='ERROR Starting PLUMgrid') + time.sleep(5) + _exec_cmd(cmd=['service', 'plumgrid', 'start'], error_msg='ERROR starting PLUMgrid service') + time.sleep(5) + + +def stop_pg(): + _exec_cmd(cmd=['virsh', '-c', 'lxc:', 'destroy', 'plumgrid'], error_msg='ERROR Destroying PLUMgrid') + time.sleep(2) + _exec_cmd(cmd=['rm', IFCTL_CONF, IFCTL_P_CONF], error_msg='ERROR Removing ifc_ctl_gateway file') + + +def _exec_cmd(cmd=None, error_msg='Command exited with ERRORs'): + if cmd is None: + log("NO command") + else: + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError, e: + log(error_msg) diff --git a/hooks/plumgrid-plugin-relation-broken b/hooks/plumgrid-plugin-relation-broken new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/plumgrid-plugin-relation-broken @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-plugin-relation-changed b/hooks/plumgrid-plugin-relation-changed new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/plumgrid-plugin-relation-changed @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-plugin-relation-departed b/hooks/plumgrid-plugin-relation-departed new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/plumgrid-plugin-relation-departed @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-plugin-relation-joined b/hooks/plumgrid-plugin-relation-joined new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/plumgrid-plugin-relation-joined @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-relation-broken b/hooks/plumgrid-relation-broken new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/plumgrid-relation-broken @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-relation-changed b/hooks/plumgrid-relation-changed new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/plumgrid-relation-changed @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-relation-departed b/hooks/plumgrid-relation-departed new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/plumgrid-relation-departed @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-relation-joined b/hooks/plumgrid-relation-joined new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/plumgrid-relation-joined @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/stop b/hooks/stop new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/stop @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/upgrade-charm b/hooks/upgrade-charm new file mode 120000 index 0000000..6530c5a --- /dev/null +++ b/hooks/upgrade-charm @@ -0,0 +1 @@ +pg_dir_hooks.py \ No newline at end of file diff --git a/icon.svg b/icon.svg new file mode 100644 index 0000000..c1c3ca2 --- /dev/null +++ b/icon.svg @@ -0,0 +1,384 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/metadata.yaml b/metadata.yaml new file mode 100644 index 0000000..e78f55d --- /dev/null +++ b/metadata.yaml @@ -0,0 +1,26 @@ +name: plumgrid-director +subordinate: false +maintainer: Bilal Baqar +summary: "OpenStack Neutron OpenvSwitch Agent" +description: | + Neutron is a virtual network service for Openstack, and a part of + Netstack. Just like OpenStack Nova provides an API to dynamically + request and configure virtual servers, Neutron provides an API to + dynamically request and configure virtual networks. These networks + connect "interfaces" from other OpenStack services (e.g., virtual NICs + from Nova VMs). The Neutron API supports extensions to provide + advanced network capabilities (e.g., QoS, ACLs, network monitoring, + etc.) + . + This charm provides the Plumgrid Director. +tags: + - openstack +requires: + plumgrid-plugin: + interface: plumgrid-plugin + neutron-plugin-api: + interface: neutron-plugin-api +provides: + plumgrid: + interface: plumgrid + diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..72c20b2 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,5 @@ +[nosetests] +verbosity=1 +with-coverage=1 +cover-erase=1 +cover-package=hooks diff --git a/templates/icehouse/default.conf b/templates/icehouse/default.conf new file mode 100644 index 0000000..3213c30 --- /dev/null +++ b/templates/icehouse/default.conf @@ -0,0 +1,138 @@ +upstream sal { + server unix:/opt/pg/tmp/sal-web.socket; + keepalive 16; +} + +upstream websocket { + server unix:/opt/pg/tmp/sal-ws.socket; + keepalive 16; +} + +upstream pgCli { + server {{ virtual_ip }}:3000; +} + +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +lua_socket_log_errors off; +#lua_code_cache off; +lua_shared_dict rest_servers 16K; +lua_shared_dict apache_servers 16K; +lua_shared_dict tc_servers 16K; +init_by_lua 'lb = require "lb" +init_servers = { + ["{{ local_ip }}"] = true, +}'; + +# Redirect http to https +server { + listen {{ virtual_ip }}:9080; + server_name $hostname; + return 301 https://$host$request_uri; +} + +server { + listen {{ virtual_ip }}:443 ssl; + ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers AES128-SHA:AES256-SHA:RC4-SHA:DES-CBC3-SHA:RC4-MD5; + ssl_certificate /opt/pg/sal/nginx/ssl/default.crt; + ssl_certificate_key /opt/pg/sal/nginx/ssl/default.key; + #ssl_session_cache shared:SSL:10m; + #ssl_session_timeout 10m; + + server_name $hostname; + root /opt/pg/web; + index login.html; + + location /cli/ { + proxy_pass http://pgCli/; + proxy_redirect off; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + } + + location /vtap/ { + alias /opt/pg/vtap; + } + + # REST API calls start with /v[0-9]/, a keyword, or a capital letter. + # Note: Regular expressions have higher precedence than prefix matches + # so don't combine with /0/... + location ~ ^/(v[0-9]/|pg/|docs|api-docs|[A-Z]) { + set $active_upstream "http://sal"; + access_by_lua 'if ngx.req.get_uri_args()["server"]~=nil then + if ngx.req.get_uri_args()["server"]~=ngx.var.host then + ngx.var.active_upstream = "https://"..ngx.req.get_uri_args()["server"]..ngx.var.request_uri + end + end'; + + proxy_pass $active_upstream; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /0/ { + set $active_upstream "http://sal"; + access_by_lua 'if ngx.req.get_uri_args()["server"]~=nil then + if ngx.req.get_uri_args()["server"]~=ngx.var.host then + ngx.var.active_upstream = "https://"..ngx.req.get_uri_args()["server"]..ngx.var.request_uri + end + end'; + + proxy_pass $active_upstream; + proxy_http_version 1.1; + proxy_set_header Connection ""; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location /0/websocket { + set $active_upstream "http://websocket"; + access_by_lua 'if ngx.req.get_uri_args()["server"]~=nil then + if ngx.req.get_uri_args()["server"]~=ngx.var.host then + ngx.var.active_upstream = "https://"..ngx.req.get_uri_args()["server"]..ngx.var.request_uri + end + end'; + proxy_pass $active_upstream; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + } +} + +server { + listen unix:/opt/pg/tmp/sal-rest.socket; + + # debug socket + listen 127.0.0.1:9080; + + location / { + set $active_upstream ""; + access_by_lua 'ngx.var.active_upstream = find_next(ngx.shared.rest_servers, 9180)'; + proxy_pass http://$active_upstream:9180; + } + + location /_debug/rest_servers { + access_by_lua 'find_next(ngx.shared.rest_servers, 9180)'; + content_by_lua ' + for _, ip in pairs(ngx.shared.rest_servers:get_keys()) do + ngx.say(ip.."="..ngx.shared.rest_servers:get(ip)) + end + '; + } + + location /_debug/tc_servers { + access_by_lua 'find_next(ngx.shared.tc_servers, 12349)'; + content_by_lua ' + for _, ip in pairs(ngx.shared.tc_servers:get_keys()) do + ngx.say(ip.."="..ngx.shared.tc_servers:get(ip)) + end + '; + } +} + diff --git a/templates/icehouse/hostname b/templates/icehouse/hostname new file mode 100644 index 0000000..1712a7b --- /dev/null +++ b/templates/icehouse/hostname @@ -0,0 +1,2 @@ +{{ pg_hostname }} + diff --git a/templates/icehouse/hosts b/templates/icehouse/hosts new file mode 100644 index 0000000..99e3be5 --- /dev/null +++ b/templates/icehouse/hosts @@ -0,0 +1,10 @@ +127.0.0.1 localhost +127.0.1.1 {{ pg_hostname }} + +# The following lines are desirable for IPv6 capable hosts +::1 ip6-localhost ip6-loopback +fe00::0 ip6-localnet +ff00::0 ip6-mcastprefix +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters + diff --git a/templates/icehouse/ifcs.conf b/templates/icehouse/ifcs.conf new file mode 100644 index 0000000..2ac1e59 --- /dev/null +++ b/templates/icehouse/ifcs.conf @@ -0,0 +1,2 @@ +{{ interface }} = fabric_core host + diff --git a/templates/icehouse/keepalived.conf b/templates/icehouse/keepalived.conf new file mode 100644 index 0000000..c5e08db --- /dev/null +++ b/templates/icehouse/keepalived.conf @@ -0,0 +1,31 @@ +global_defs { + router_id {{ pg_hostname }} +} + +vrrp_script chk_nginx { + script "killall -0 nginx" + interval 2 +} + +vrrp_instance nos { + virtual_router_id {{ virtual_router_id }} + + # for electing MASTER, highest priority wins. + priority 100 + state BACKUP + nopreempt + + interface {{ interface }} + + virtual_ipaddress { + {{ virtual_ip }} dev {{ interface }} label {{ interface }}:1 + } + track_script { + chk_nginx + } + authentication { + auth_type PASS + auth_pass keepaliv + } +} + diff --git a/templates/icehouse/plumgrid.conf b/templates/icehouse/plumgrid.conf new file mode 100644 index 0000000..52a9dc8 --- /dev/null +++ b/templates/icehouse/plumgrid.conf @@ -0,0 +1,11 @@ +plumgrid_ip={{ local_ip }} +plumgrid_port=8001 +mgmt_dev={{ interface }} +label={{ label}} +plumgrid_rsync_port=2222 +plumgrid_rest_addr=0.0.0.0:9180 +fabric_mode={{ fabric_mode }} +start_plumgrid_iovisor=yes +start_plumgrid=`/opt/pg/scripts/pg_is_director.sh $plumgrid_ip` +location= + diff --git a/templates/parts/rabbitmq b/templates/parts/rabbitmq new file mode 100644 index 0000000..7aabc51 --- /dev/null +++ b/templates/parts/rabbitmq @@ -0,0 +1,21 @@ +{% if rabbitmq_host or rabbitmq_hosts -%} +rabbit_userid = {{ rabbitmq_user }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +rabbit_password = {{ rabbitmq_password }} +{% if rabbitmq_hosts -%} +rabbit_hosts = {{ rabbitmq_hosts }} +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +rabbit_durable_queues = False +{% endif -%} +{% else -%} +rabbit_host = {{ rabbitmq_host }} +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +rabbit_port = {{ rabbit_ssl_port }} +{% if rabbit_ssl_ca -%} +kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} +{% endif -%} +{% endif -%} +{% endif -%} diff --git a/tests/00-setup b/tests/00-setup new file mode 100755 index 0000000..858fe13 --- /dev/null +++ b/tests/00-setup @@ -0,0 +1,5 @@ +#!/bin/bash + +sudo add-apt-repository ppa:juju/stable -y +sudo apt-get update +sudo apt-get install amulet python3-requests juju-deployer -y diff --git a/tests/14-juno b/tests/14-juno new file mode 100755 index 0000000..c26f030 --- /dev/null +++ b/tests/14-juno @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 + +import amulet +import requests +import unittest + +class TestDeployment(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.deployment = amulet.Deployment(series='trusty') + cls.deployment.load_bundle_file(bundle_file='files/plumgrid-director.yaml', deployment_name='test') + try: + cls.deployment.setup(timeout=2000) + cls.deployment.sentry.wait() + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.SKIP, msg="Environment wasn't stood up in time") + except: + raise + + def test_plumgrid_director_gui(self): + plumgrid_vip = self.deployment.services['plumgrid-director']['options']['plumgrid-virtual-ip'] + if not plumgrid_vip: + amulet.raise_status(amulet.FAIL, msg='plumgrid virtual ip was not found.') + gui_req = requests.get("https://{}/".format(plumgrid_vip), verify=False) + if not gui_req.ok: + amulet.raise_status(amulet.FAIL, msg='unable to access plumgrid gui.') + + def test_plumgrid_director_started(self): + agent_state = self.deployment.sentry['plumgrid-director/0'].info['agent-state'] + if agent_state != 'started': + amulet.raise_status(amulet.FAIL, msg='plumgrid director is not in a started state') + + def test_plumgrid_director_relation(self): + relation = self.deployment.sentry['plumgrid-director/0'].relation('plumgrid-plugin', 'neutron-iovisor:plumgrid-plugin') + if not relation['private-address']: + amulet.raise_status(amulet.FAIL, msg='private address was not set in the plumgrid director relation') + +if __name__ == '__main__': + unittest.main() diff --git a/tests/files/plumgrid-director.yaml b/tests/files/plumgrid-director.yaml new file mode 100644 index 0000000..cbf504a --- /dev/null +++ b/tests/files/plumgrid-director.yaml @@ -0,0 +1,98 @@ +test: + series: 'trusty' + relations: + - - neutron-api + - neutron-iovisor + - - neutron-iovisor + - plumgrid-director + - - nova-cloud-controller + - nova-compute + - - glance + - nova-compute + - - nova-compute + - rabbitmq-server + - - mysql + - nova-compute + - - cinder + - nova-cloud-controller + - - nova-cloud-controller + - rabbitmq-server + - - glance + - nova-cloud-controller + - - keystone + - nova-cloud-controller + - - mysql + - nova-cloud-controller + - - neutron-api + - nova-cloud-controller + services: + cinder: + charm: cs:trusty/cinder + num_units: 1 + options: + openstack-origin: cloud:trusty-juno + to: 'lxc:0' + glance: + charm: cs:trusty/glance + num_units: 1 + options: + openstack-origin: cloud:trusty-juno + to: 'lxc:0' + keystone: + charm: cs:trusty/keystone + num_units: 1 + options: + admin-password: plumgrid + openstack-origin: cloud:trusty-juno + to: 'lxc:0' + mysql: + charm: cs:trusty/mysql + num_units: 1 + to: 'lxc:0' + neutron-api: + charm: cs:~juliann/trusty/neutron-api + num_units: 1 + options: + install_keys: 'null' + install_sources: "deb http://10.22.24.200/debs ./" + neutron-plugin: "plumgrid" + neutron-security-groups: "true" + openstack-origin: "cloud:trusty-juno" + plumgrid-password: "plumgrid" + plumgrid-username: "plumgrid" + plumgrid-virtual-ip: "192.168.100.250" + to: 'lxc:0' + neutron-iovisor: + charm: cs:~juliann/trusty/neutron-iovisor + num_units: 1 + options: + install_keys: 'null' + install_sources: "deb http://10.22.24.200/debs ./" + to: 'nova-compute' + plumgrid-director: + charm: cs:~juliann/trusty/plumgrid-director + num_units: 1 + options: + plumgrid-virtual-ip: 192.168.100.250 + to: 'nova-compute' + nova-cloud-controller: + charm: cs:trusty/nova-cloud-controller + num_units: 1 + options: + console-access-protocol: novnc + network-manager: Neutron + openstack-origin: cloud:trusty-juno + quantum-security-groups: 'yes' + to: 'lxc:0' + nova-compute: + charm: cs:~juliann/trusty/nova-compute + num_units: 1 + options: + enable-live-migration: true + enable-resize: true + migration-auth-type: ssh + openstack-origin: cloud:trusty-juno + rabbitmq-server: + charm: cs:trusty/rabbitmq-server + num_units: 1 + to: 'lxc:0' diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py new file mode 100644 index 0000000..43aa361 --- /dev/null +++ b/unit_tests/__init__.py @@ -0,0 +1,4 @@ +import sys + +sys.path.append('actions/') +sys.path.append('hooks/') diff --git a/unit_tests/test_pg_dir_context.py b/unit_tests/test_pg_dir_context.py new file mode 100644 index 0000000..6c48610 --- /dev/null +++ b/unit_tests/test_pg_dir_context.py @@ -0,0 +1,87 @@ +from test_utils import CharmTestCase +from mock import patch +import pg_dir_context as context +import charmhelpers + +TO_PATCH = [ + #'resolve_address', + 'config', + 'unit_get', + 'get_host_ip', + 'get_unit_hostname', +] + + +def fake_context(settings): + def outer(): + def inner(): + return settings + return inner + return outer + + +class PGDirContextTest(CharmTestCase): + + def setUp(self): + super(PGDirContextTest, self).setUp(context, TO_PATCH) + self.config.side_effect = self.test_config.get + self.test_config.set('plumgrid-virtual-ip', '192.168.100.250') + + def tearDown(self): + super(PGDirContextTest, self).tearDown() + + @patch.object(charmhelpers.contrib.openstack.context, 'config', + lambda *args: None) + @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') + @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') + @patch.object(charmhelpers.contrib.openstack.context, 'related_units') + @patch.object(charmhelpers.contrib.openstack.context, 'config') + @patch.object(charmhelpers.contrib.openstack.context, 'unit_get') + @patch.object(charmhelpers.contrib.openstack.context, 'is_clustered') + @patch.object(charmhelpers.contrib.openstack.context, 'https') + @patch.object(context.PGDirContext, '_save_flag_file') + @patch.object(context.PGDirContext, '_ensure_packages') + @patch.object(charmhelpers.contrib.openstack.context, + 'neutron_plugin_attribute') + @patch.object(charmhelpers.contrib.openstack.context, 'unit_private_ip') + def test_neutroncc_context_api_rel(self, _unit_priv_ip, _npa, _ens_pkgs, + _save_ff, _https, _is_clus, _unit_get, + _config, _runits, _rids, _rget): + def mock_npa(plugin, section, manager): + if section == "driver": + return "neutron.randomdriver" + if section == "config": + return "neutron.randomconfig" + + config = {'plumgrid-virtual-ip': "192.168.100.250"} + + def mock_config(key=None): + if key: + return config.get(key) + + return config + + self.maxDiff = None + self.config.side_effect = mock_config + _npa.side_effect = mock_npa + _unit_get.return_value = '192.168.100.201' + _unit_priv_ip.return_value = '192.168.100.201' + self.get_unit_hostname.return_value = 'node0' + self.get_host_ip.return_value = '192.168.100.201' + napi_ctxt = context.PGDirContext() + expect = { + 'config': 'neutron.randomconfig', + 'core_plugin': 'neutron.randomdriver', + 'local_ip': '192.168.100.201', + 'network_manager': 'neutron', + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': None, + 'neutron_url': 'https://None:9696', + 'virtual_ip': '192.168.100.250', + 'pg_hostname': 'pg-director', + 'interface': 'juju-br0', + 'label': 'node0', + 'fabric_mode': 'host', + 'virtual_router_id': '250', + } + self.assertEquals(expect, napi_ctxt()) diff --git a/unit_tests/test_pg_dir_hooks.py b/unit_tests/test_pg_dir_hooks.py new file mode 100644 index 0000000..46a3598 --- /dev/null +++ b/unit_tests/test_pg_dir_hooks.py @@ -0,0 +1,76 @@ +from mock import MagicMock, patch + + +from test_utils import CharmTestCase + +with patch('charmhelpers.core.hookenv.config') as config: + config.return_value = 'neutron' + import pg_dir_utils as utils + +_reg = utils.register_configs +_map = utils.restart_map + +utils.register_configs = MagicMock() +utils.restart_map = MagicMock() + +import pg_dir_hooks as hooks + +utils.register_configs = _reg +utils.restart_map = _map + +TO_PATCH = [ + #'apt_update', + #'apt_install', + #'apt_purge', + 'config', + 'CONFIGS', + #'determine_packages', + #'determine_dvr_packages', + #'get_shared_secret', + #'git_install', + 'log', + #'relation_ids', + 'relation_set', + #'configure_ovs', + #'use_dvr', + 'ensure_files', + 'stop_pg', + 'restart_pg', +] +NEUTRON_CONF_DIR = "/etc/neutron" + +NEUTRON_CONF = '%s/neutron.conf' % NEUTRON_CONF_DIR + + +class PGDirHooksTests(CharmTestCase): + + def setUp(self): + super(PGDirHooksTests, self).setUp(hooks, TO_PATCH) + + self.config.side_effect = self.test_config.get + hooks.hooks._config_save = False + + def _call_hook(self, hookname): + hooks.hooks.execute([ + 'hooks/{}'.format(hookname)]) + + def test_install_hook(self): + self._call_hook('install') + self.ensure_files.assert_called_with() + + def test_plumgrid_edge_joined(self): + self._call_hook('plumgrid-plugin-relation-joined') + self.ensure_files.assert_called_with() + self.CONFIGS.write_all.assert_called_with() + self.restart_pg.assert_called_with() + + def test_plumgrid_joined(self): + relation_data = { + 'pg_virtual_ip': '192.168.100.250', + } + self._call_hook('plumgrid-relation-joined') + self.relation_set.assert_called_with(relation_id=None, **relation_data) + + def test_stop(self): + self._call_hook('stop') + self.stop_pg.assert_called_with() diff --git a/unit_tests/test_pg_dir_utils.py b/unit_tests/test_pg_dir_utils.py new file mode 100644 index 0000000..bf59add --- /dev/null +++ b/unit_tests/test_pg_dir_utils.py @@ -0,0 +1,82 @@ +from mock import MagicMock +from collections import OrderedDict +import charmhelpers.contrib.openstack.templating as templating + +templating.OSConfigRenderer = MagicMock() + +import pg_dir_utils as nutils + +from test_utils import ( + CharmTestCase, +) +import charmhelpers.core.hookenv as hookenv + + +TO_PATCH = [ + 'os_release', + 'neutron_plugin_attribute', +] + + +class DummyContext(): + + def __init__(self, return_value): + self.return_value = return_value + + def __call__(self): + return self.return_value + + +class TestPGDirUtils(CharmTestCase): + + def setUp(self): + super(TestPGDirUtils, self).setUp(nutils, TO_PATCH) + #self.config.side_effect = self.test_config.get + + def tearDown(self): + # Reset cached cache + hookenv.cache = {} + + def test_register_configs(self): + class _mock_OSConfigRenderer(): + def __init__(self, templates_dir=None, openstack_release=None): + self.configs = [] + self.ctxts = [] + + def register(self, config, ctxt): + self.configs.append(config) + self.ctxts.append(ctxt) + + self.os_release.return_value = 'trusty' + templating.OSConfigRenderer.side_effect = _mock_OSConfigRenderer + _regconfs = nutils.register_configs() + confs = ['/var/lib/libvirt/filesystems/plumgrid/etc/keepalived/keepalived.conf', + '/var/lib/libvirt/filesystems/plumgrid/opt/pg/etc/plumgrid.conf', + '/var/lib/libvirt/filesystems/plumgrid/opt/pg/sal/nginx/conf.d/default.conf', + '/var/lib/libvirt/filesystems/plumgrid-data/conf/etc/hostname', + '/var/lib/libvirt/filesystems/plumgrid-data/conf/etc/hosts', + '/var/lib/libvirt/filesystems/plumgrid-data/conf/pg/ifcs.conf'] + self.assertItemsEqual(_regconfs.configs, confs) + + def test_resource_map(self): + _map = nutils.resource_map() + svcs = ['plumgrid'] + confs = [nutils.PGKA_CONF] + [self.assertIn(q_conf, _map.keys()) for q_conf in confs] + self.assertEqual(_map[nutils.PGKA_CONF]['services'], svcs) + + def test_restart_map(self): + _restart_map = nutils.restart_map() + PGKA_CONF = '/var/lib/libvirt/filesystems/plumgrid/etc/keepalived/keepalived.conf' + expect = OrderedDict([ + (nutils.PG_CONF, ['plumgrid']), + (PGKA_CONF, ['plumgrid']), + (nutils.PGDEF_CONF, ['plumgrid']), + (nutils.PGHN_CONF, ['plumgrid']), + (nutils.PGHS_CONF, ['plumgrid']), + (nutils.PGIFCS_CONF, []), + ]) + self.assertEqual(expect, _restart_map) + for item in _restart_map: + self.assertTrue(item in _restart_map) + self.assertTrue(expect[item] == _restart_map[item]) diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py new file mode 100644 index 0000000..86ee0f7 --- /dev/null +++ b/unit_tests/test_utils.py @@ -0,0 +1,121 @@ +import logging +import unittest +import os +import yaml + +from contextlib import contextmanager +from mock import patch, MagicMock + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % file) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attribute=None, unit=None, rid=None): + if attribute is None: + return self.relation_data + elif attribute in self.relation_data: + return self.relation_data[attribute] + return None + + +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file