From 8139e43440c1daa9674c6151ec058c250460c093 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 18 Jul 2013 19:37:30 -0700 Subject: [PATCH 01/84] Check-in initial python redux. --- config.yaml | 4 +- hooks/__init__.py | 0 hooks/amqp-relation-changed | 2 +- hooks/amqp-relation-joined | 2 +- hooks/ceph-relation-changed | 2 +- hooks/ceph-relation-joined | 2 +- hooks/charmhelpers/__init__.py | 0 hooks/charmhelpers/contrib/__init__.py | 0 .../contrib/hahelpers/__init__.py | 0 .../charmhelpers/contrib/hahelpers/apache.py | 58 ++ hooks/charmhelpers/contrib/hahelpers/ceph.py | 278 +++++++ .../charmhelpers/contrib/hahelpers/cluster.py | 180 ++++ .../contrib/openstack/__init__.py | 0 .../charmhelpers/contrib/openstack/context.py | 271 ++++++ .../contrib/openstack/templates/__init__.py | 2 + .../contrib/openstack/templates/ceph.conf | 11 + .../contrib/openstack/templates/haproxy.cfg | 37 + .../templates/openstack_https_frontend | 23 + .../contrib/openstack/templating.py | 261 ++++++ hooks/charmhelpers/contrib/openstack/utils.py | 273 ++++++ .../charmhelpers/contrib/storage/__init__.py | 0 .../contrib/storage/linux/__init__.py | 0 .../contrib/storage/linux/loopback.py | 59 ++ .../charmhelpers/contrib/storage/linux/lvm.py | 88 ++ .../contrib/storage/linux/utils.py | 25 + hooks/charmhelpers/core/__init__.py | 0 hooks/charmhelpers/core/hookenv.py | 340 ++++++++ hooks/charmhelpers/core/host.py | 269 ++++++ hooks/cloud-compute-relation-changed | 2 +- hooks/cloud-compute-relation-joined | 2 +- hooks/config-changed | 2 +- hooks/image-service-relation-changed | 2 +- hooks/image-service-relation-joined | 1 - hooks/install | 1 - hooks/lib/nova/essex | 43 - hooks/lib/nova/folsom | 135 --- hooks/lib/nova/grizzly | 97 --- hooks/lib/nova/nova-common | 148 ---- hooks/lib/openstack-common | 781 ------------------ hooks/misc_utils.py | 31 + hooks/nova-compute-common | 309 ------- hooks/nova-compute-relations | 329 -------- hooks/nova_compute_relations.py | 155 ++++ hooks/nova_compute_utils.py | 75 ++ hooks/shared-db-relation-changed | 2 +- hooks/shared-db-relation-joined | 2 +- hooks/start | 1 - hooks/stop | 1 - 48 files changed, 2448 insertions(+), 1858 deletions(-) create mode 100644 hooks/__init__.py create mode 100644 hooks/charmhelpers/__init__.py create mode 100644 hooks/charmhelpers/contrib/__init__.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/__init__.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/apache.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/ceph.py create mode 100644 hooks/charmhelpers/contrib/hahelpers/cluster.py create mode 100644 hooks/charmhelpers/contrib/openstack/__init__.py create mode 100644 hooks/charmhelpers/contrib/openstack/context.py create mode 100644 hooks/charmhelpers/contrib/openstack/templates/__init__.py create mode 100644 hooks/charmhelpers/contrib/openstack/templates/ceph.conf create mode 100644 hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg create mode 100644 hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend create mode 100644 hooks/charmhelpers/contrib/openstack/templating.py create mode 100644 hooks/charmhelpers/contrib/openstack/utils.py create mode 100644 hooks/charmhelpers/contrib/storage/__init__.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/__init__.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/loopback.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/lvm.py create mode 100644 hooks/charmhelpers/contrib/storage/linux/utils.py create mode 100644 hooks/charmhelpers/core/__init__.py create mode 100644 hooks/charmhelpers/core/hookenv.py create mode 100644 hooks/charmhelpers/core/host.py delete mode 120000 hooks/image-service-relation-joined delete mode 120000 hooks/install delete mode 100644 hooks/lib/nova/essex delete mode 100644 hooks/lib/nova/folsom delete mode 100644 hooks/lib/nova/grizzly delete mode 100644 hooks/lib/nova/nova-common delete mode 100644 hooks/lib/openstack-common create mode 100644 hooks/misc_utils.py delete mode 100755 hooks/nova-compute-common delete mode 100755 hooks/nova-compute-relations create mode 100755 hooks/nova_compute_relations.py create mode 100644 hooks/nova_compute_utils.py delete mode 120000 hooks/start delete mode 120000 hooks/stop diff --git a/config.yaml b/config.yaml index 3949b2c4..c0b81f3b 100644 --- a/config.yaml +++ b/config.yaml @@ -26,11 +26,11 @@ options: default: nova type: string decsription: Rabbitmq vhost - db-user: + database-user: default: nova type: string description: Username for database access - nova-db: + database: default: nova type: string description: Database name diff --git a/hooks/__init__.py b/hooks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/amqp-relation-changed b/hooks/amqp-relation-changed index 6f9ff4f5..6eb6593e 120000 --- a/hooks/amqp-relation-changed +++ b/hooks/amqp-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/amqp-relation-joined b/hooks/amqp-relation-joined index 6f9ff4f5..6eb6593e 120000 --- a/hooks/amqp-relation-joined +++ b/hooks/amqp-relation-joined @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/ceph-relation-changed b/hooks/ceph-relation-changed index 6f9ff4f5..6eb6593e 120000 --- a/hooks/ceph-relation-changed +++ b/hooks/ceph-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/ceph-relation-joined b/hooks/ceph-relation-joined index 6f9ff4f5..6eb6593e 120000 --- a/hooks/ceph-relation-joined +++ b/hooks/ceph-relation-joined @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 00000000..3208a85c --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,58 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess + +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(): + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get('ssl_cert', + rid=r_id, unit=unit) + if not key: + key = relation_get('ssl_key', + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = None + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not ca_cert: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def install_ca_cert(ca_cert): + if ca_cert: + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', + 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/hooks/charmhelpers/contrib/hahelpers/ceph.py b/hooks/charmhelpers/contrib/hahelpers/ceph.py new file mode 100644 index 00000000..fb1b8b9b --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/ceph.py @@ -0,0 +1,278 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import commands +import os +import shutil + +from subprocess import ( + check_call, + check_output, + CalledProcessError +) + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + INFO, +) + +from charmhelpers.core.host import ( + apt_install, + mount, + mounts, + service_start, + service_stop, + umount, +) + +KEYRING = '/etc/ceph/ceph.client.%s.keyring' +KEYFILE = '/etc/ceph/ceph.client.%s.key' + +CEPH_CONF = """[global] + auth supported = %(auth)s + keyring = %(keyring)s + mon host = %(mon_hosts)s +""" + + +def running(service): + # this local util can be dropped as soon the following branch lands + # in lp:charm-helpers + # https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/ + try: + output = check_output(['service', service, 'status']) + except CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def install(): + ceph_dir = "/etc/ceph" + if not os.path.isdir(ceph_dir): + os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' % + (service, pool)) + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + cmd = [ + 'rbd', + 'create', + image, + '--size', + str(sizemb), + '--id', + service, + '--pool', + pool + ] + check_call(cmd) + + +def pool_exists(service, name): + (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service) + return name in out + + +def create_pool(service, name): + cmd = [ + 'rados', + '--id', + service, + 'mkpool', + name + ] + check_call(cmd) + + +def keyfile_path(service): + return KEYFILE % service + + +def keyring_path(service): + return KEYRING % service + + +def create_keyring(service, key): + keyring = keyring_path(service) + if os.path.exists(keyring): + log('ceph: Keyring exists at %s.' % keyring, level=INFO) + cmd = [ + 'ceph-authtool', + keyring, + '--create-keyring', + '--name=client.%s' % service, + '--add-key=%s' % key + ] + check_call(cmd) + log('ceph: Created new ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + # create a file containing the key + keyfile = keyfile_path(service) + if os.path.exists(keyfile): + log('ceph: Keyfile exists at %s.' % keyfile, level=INFO) + fd = open(keyfile, 'w') + fd.write(key) + fd.close() + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts + + +def configure(service, key, auth): + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + mon_hosts = ",".join(map(str, hosts)) + keyring = keyring_path(service) + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF % locals()) + modprobe_kernel_module('rbd') + + +def image_mapped(image_name): + (rc, out) = commands.getstatusoutput('rbd showmapped') + return image_name in out + + +def map_block_storage(service, pool, image): + cmd = [ + 'rbd', + 'map', + '%s/%s' % (pool, image), + '--user', + service, + '--secret', + keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + return fs in [f for m, f in mounts()] + + +def make_filesystem(blk_device, fstype='ext4'): + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + cmd = ['mkfs', '-t', fstype, blk_device] + check_call(cmd) + + +def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): + # mount block device into /mnt + mount(blk_device, '/mnt') + + # copy data to /mnt + try: + copy_files(data_src_dst, '/mnt') + except: + pass + + # umount block device + umount('/mnt') + + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + + # re-mount where the data should originally be + mount(blk_device, data_src_dst, persist=True) + + # ensure original ownership of new mount. + cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst] + check_call(cmd) + + +# TODO: re-use +def modprobe_kernel_module(module): + log('ceph: Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + cmd = 'echo %s >> /etc/modules' % module + check_call(cmd, shell=True) + + +def copy_files(src, dst, symlinks=False, ignore=None): + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[]): + """ + To be called from the current cluster leader. + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being remounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('ceph: Creating new pool %s.' % pool, level=INFO) + create_pool(service, pool) + + if not rbd_exists(service, pool, rbd_img): + log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('ceph: Mapping RBD Image as a Block Device.', level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if running(svc): + log('Stopping services %s prior to migrating data.' % svc, + level=INFO) + service_stop(svc) + + place_data_on_ceph(service, blk_device, mount_point, fstype) + + for svc in system_services: + service_start(svc) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 00000000..dde6c9bb --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,180 @@ +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess +import os + +from socket import gethostname as get_unit_hostname + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + ERROR, +) + + +class HAIncompleteConfig(Exception): + pass + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_leader(resource): + cmd = [ + "crm", "resource", + "show", resource + ] + try: + status = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + return False + else: + if get_unit_hostname() in status: + return True + else: + return False + + +def peer_units(): + peers = [] + for r_id in (relation_ids('cluster') or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def oldest_peer(peers): + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + if is_clustered(): + if not is_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + if config_get('use-https') == "yes": + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if None not in [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ssl_cert', rid=r_id, unit=unit), + relation_get('ssl_key', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ]: + return True + return False + + +def determine_api_port(public_port): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the API service + ''' + i = 0 + if len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_haproxy_port(public_port): + ''' + Description: Determine correct proxy listening port based on public IP + + existence of HTTPS reverse proxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if https(): + i += 1 + return public_port - (i * 10) + + +def get_hacluster_config(): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr + + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] + conf = {} + for setting in settings: + conf[setting] = config_get(setting) + missing = [] + [missing.append(s) for s, v in conf.iteritems() if v is None] + if missing: + log('Insufficient config data to configure hacluster.', level=ERROR) + raise HAIncompleteConfig + return conf + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = get_unit_hostname() + return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py new file mode 100644 index 00000000..f146e0bc --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,271 @@ +import os + +from base64 import b64decode + +from subprocess import ( + check_call +) + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_get, + relation_ids, + related_units, + unit_get, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + determine_api_port, + determine_haproxy_port, + https, + is_clustered, + peer_units, +) + +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, +) + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' + + +class OSContextError(Exception): + pass + + +def context_complete(ctxt): + _missing = [] + for k, v in ctxt.iteritems(): + if v is None or v == '': + _missing.append(k) + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level='INFO') + return False + return True + + +class OSContextGenerator(object): + interfaces = [] + + def __call__(self): + raise NotImplementedError + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __call__(self): + log('Generating template context for shared-db') + conf = config() + try: + database = conf['database'] + username = conf['database-user'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + ctxt = {} + for rid in relation_ids('shared-db'): + for unit in related_units(rid): + ctxt = { + 'database_host': relation_get('db_host', rid=rid, + unit=unit), + 'database': database, + 'database_user': username, + 'database_password': relation_get('password', rid=rid, + unit=unit) + } + if not context_complete(ctxt): + return {} + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + interfaces = ['identity-service'] + + def __call__(self): + log('Generating template context for identity-service') + ctxt = {} + + for rid in relation_ids('identity-service'): + for unit in related_units(rid): + ctxt = { + 'service_port': relation_get('service_port', rid=rid, + unit=unit), + 'service_host': relation_get('service_host', rid=rid, + unit=unit), + 'auth_host': relation_get('auth_host', rid=rid, unit=unit), + 'auth_port': relation_get('auth_port', rid=rid, unit=unit), + 'admin_tenant_name': relation_get('service_tenant', + rid=rid, unit=unit), + 'admin_user': relation_get('service_username', rid=rid, + unit=unit), + 'admin_password': relation_get('service_password', rid=rid, + unit=unit), + # XXX: Hard-coded http. + 'service_protocol': 'http', + 'auth_protocol': 'http', + } + if not context_complete(ctxt): + return {} + return ctxt + + +class AMQPContext(OSContextGenerator): + interfaces = ['amqp'] + + def __call__(self): + log('Generating template context for amqp') + conf = config() + try: + username = conf['rabbit-user'] + vhost = conf['rabbit-vhost'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + + ctxt = {} + for rid in relation_ids('amqp'): + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + rabbitmq_host = relation_get('vip', rid=rid, unit=unit) + else: + rabbitmq_host = relation_get('private-address', + rid=rid, unit=unit) + ctxt = { + 'rabbitmq_host': rabbitmq_host, + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + } + if not context_complete(ctxt): + return {} + return ctxt + + +class CephContext(OSContextGenerator): + interfaces = ['ceph'] + + def __call__(self): + '''This generates context for /etc/ceph/ceph.conf templates''' + log('Generating tmeplate context for ceph') + mon_hosts = [] + auth = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + mon_hosts.append(relation_get('private-address', rid=rid, + unit=unit)) + auth = relation_get('auth', rid=rid, unit=unit) + + ctxt = { + 'mon_hosts': ' '.join(mon_hosts), + 'auth': auth, + } + if not context_complete(ctxt): + return {} + return ctxt + + +class HAProxyContext(OSContextGenerator): + interfaces = ['cluster'] + + def __call__(self): + ''' + Builds half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + ''' + if not relation_ids('cluster'): + return {} + + cluster_hosts = {} + l_unit = local_unit().replace('/', '-') + cluster_hosts[l_unit] = unit_get('private-address') + + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + addr = relation_get('private-address', rid=rid, unit=unit) + cluster_hosts[_unit] = addr + + ctxt = { + 'units': cluster_hosts, + } + if len(cluster_hosts.keys()) > 1: + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.') + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + return ctxt + log('HAProxy context is incomplete, this unit has no peers.') + return {} + + +class ApacheSSLContext(OSContextGenerator): + """ + Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like: + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self): + if not os.path.isdir('/etc/apache2/ssl'): + os.mkdir('/etc/apache2/ssl') + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + if not os.path.isdir(ssl_dir): + os.mkdir(ssl_dir) + cert, key = get_cert() + with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: + cert_out.write(b64decode(cert)) + with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: + key_out.write(b64decode(key)) + ca_cert = get_ca_cert() + if ca_cert: + with open(CA_CERT_PATH, 'w') as ca_out: + ca_out.write(b64decode(ca_cert)) + + def __call__(self): + if isinstance(self.external_ports, basestring): + self.external_ports = [self.external_ports] + if (not self.external_ports or not https()): + return {} + + self.configure_cert() + self.enable_modules() + + ctxt = { + 'namespace': self.service_namespace, + 'private_address': unit_get('private-address'), + 'endpoints': [] + } + for ext_port in self.external_ports: + if peer_units() or is_clustered(): + int_port = determine_haproxy_port(ext_port) + else: + int_port = determine_api_port(ext_port) + portmap = (int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 00000000..0b49ad28 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,2 @@ +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf new file mode 100644 index 00000000..1d8ca3b4 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -0,0 +1,11 @@ +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +{% if auth %} +[global] + auth_supported = {{ auth }} + keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg new file mode 100644 index 00000000..b184cd4a --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -0,0 +1,37 @@ +global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + +defaults + log global + mode http + option httplog + option dontlognull + retries 3 + timeout queue 1000 + timeout connect 1000 + timeout client 30000 + timeout server 30000 + +listen stats :8888 + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:password + +{% if units %} +{% for service, ports in service_ports.iteritems() -%} +listen {{ service }} 0.0.0.0:{{ ports[0] }} + balance roundrobin + option tcplog + {% for unit, address in units.iteritems() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor %} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend new file mode 100644 index 00000000..e833a715 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -0,0 +1,23 @@ +{% if endpoints %} +{% for ext, int in endpoints %} +Listen {{ ext }} +NameVirtualHost *:{{ ext }} + + ServerName {{ private_address }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endfor %} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 00000000..c555cc6e --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,261 @@ +import os + +from charmhelpers.core.host import apt_install + +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO +) + +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment +except ImportError: + # python-jinja2 may not be installed yet, or we're running unittests. + FileSystemLoader = ChoiceLoader = Environment = None + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg: + hooks/charmhelpers/contrib/openstack/templates. + + :param templates_dir: str: Base template directory containing release + sub-directories. + :param os_release : str: OpenStack release codename to construct template + loader. + + :returns : jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in OPENSTACK_CODENAMES.itervalues()] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=INFO) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + def __init__(self, config_file, contexts): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have atisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage: + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + Details: + + OpenStack Releases and template loading + --------------------------------------- + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + + For the example above, '/tmp/templates' contains the following structure: + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + Context generators + --------------------------------------- + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + apt_install('python-jinja2') + + def register(self, config_file, contexts): + """ + Register a config file with a list of context generators to be called + during rendering. + """ + self.templates[config_file] = OSConfigTemplate(config_file=config_file, + contexts=contexts) + log('Registered config file: %s' % config_file, level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from %s' % template.filename, level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + ctxt = self.templates[config_file].context() + _tmpl = os.path.basename(config_file) + log('Rendering from template: %s' % _tmpl, level=INFO) + template = self._get_template(_tmpl) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + with open(config_file, 'wb') as out: + out.write(self.render(config_file)) + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in self.templates.iterkeys()] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in self.templates.itervalues()] + return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 00000000..5da85b36 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +# Common python helper functions used for OpenStack charms. + +from collections import OrderedDict + +import apt_pkg as apt +import subprocess +import os +import sys + +from charmhelpers.core.hookenv import ( + config, + log as juju_log, + charm_dir, +) + +from charmhelpers.core.host import ( + lsb_release, + apt_install, +) + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), +]) + +# The ugly duckling +SWIFT_CODENAMES = { + '1.4.3': 'diablo', + '1.4.8': 'essex', + '1.7.4': 'folsom', + '1.7.6': 'grizzly', + '1.7.7': 'grizzly', + '1.8.0': 'grizzly', + '1.9.0': 'havana', + '1.9.1': 'havana', +} + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src == 'distro': + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename): + '''Determine OpenStack version number from codename.''' + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + apt.init() + cache = apt.Cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.UpstreamVersion(pkg.current_ver.ver_str) + + try: + if 'swift' in pkg.name: + vers = vers[:5] + return SWIFT_CODENAMES[vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in vers_map.iteritems(): + if cname == codename: + return version + #e = "Could not determine OpenStack version for package: %s" % pkg + #error_out(e) + + +def import_key(keyid): + cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in env_vars.iteritems() if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 diff --git a/hooks/charmhelpers/contrib/storage/__init__.py b/hooks/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/storage/linux/__init__.py b/hooks/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 00000000..9fb87a2e --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,59 @@ + +import os +import re + +from subprocess import ( + check_call, + check_output, +) + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + devs = [d.strip().split(' ') for d in + check_output(cmd).splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + cmd = ['losetup', '--find', file_path] + return check_output(cmd).strip() + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in loopback_devices().iteritems(): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 00000000..6e29181a --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,88 @@ +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for l in pvd: + if l.strip().startswith('VG Name'): + vg = ' '.join(l.split()).split(' ').pop() + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 00000000..5b9b6d47 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,25 @@ +from os import stat +from stat import S_ISBLK + +from subprocess import ( + check_call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + return S_ISBLK(stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + check_call(['sgdisk', '--zap-all', block_device]) diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..2b06706c --- /dev/null +++ b/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,340 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +def service_name(): + "The name service group this unit belongs to" + return local_unit().split('/')[0] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return json.loads(subprocess.check_output(config_cmd_line)) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py new file mode 100644 index 00000000..fee4216b --- /dev/null +++ b/hooks/charmhelpers/core/host.py @@ -0,0 +1,269 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import apt_pkg +import os +import pwd +import grp +import subprocess +import hashlib + +from collections import OrderedDict + +from hookenv import log + + +def service_start(service_name): + service('start', service_name) + + +def service_stop(service_name): + service('stop', service_name) + + +def service_restart(service_name): + service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + if not service('reload', service_name) and restart_on_failure: + service('restart', service_name) + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def service_running(service): + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, content, owner='root', group='root', perms=0444): + """Create or overwrite a file with the contents of a string""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(OrderedDict.fromkeys(restarts)): + service('restart', service_name) + return wrapped_f + return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d diff --git a/hooks/cloud-compute-relation-changed b/hooks/cloud-compute-relation-changed index 6f9ff4f5..6eb6593e 120000 --- a/hooks/cloud-compute-relation-changed +++ b/hooks/cloud-compute-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/cloud-compute-relation-joined b/hooks/cloud-compute-relation-joined index 6f9ff4f5..6eb6593e 120000 --- a/hooks/cloud-compute-relation-joined +++ b/hooks/cloud-compute-relation-joined @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/config-changed b/hooks/config-changed index 6f9ff4f5..6eb6593e 120000 --- a/hooks/config-changed +++ b/hooks/config-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/image-service-relation-changed b/hooks/image-service-relation-changed index 6f9ff4f5..6eb6593e 120000 --- a/hooks/image-service-relation-changed +++ b/hooks/image-service-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/image-service-relation-joined b/hooks/image-service-relation-joined deleted file mode 120000 index 6f9ff4f5..00000000 --- a/hooks/image-service-relation-joined +++ /dev/null @@ -1 +0,0 @@ -nova-compute-relations \ No newline at end of file diff --git a/hooks/install b/hooks/install deleted file mode 120000 index 6f9ff4f5..00000000 --- a/hooks/install +++ /dev/null @@ -1 +0,0 @@ -nova-compute-relations \ No newline at end of file diff --git a/hooks/lib/nova/essex b/hooks/lib/nova/essex deleted file mode 100644 index a82bf295..00000000 --- a/hooks/lib/nova/essex +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -e - -# Essex-specific functions - -nova_set_or_update() { - # Set a config option in nova.conf or api-paste.ini, depending - # Defaults to updating nova.conf - local key=$1 - local value=$2 - local conf_file=$3 - local pattern="" - - local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} - local api_conf=${API_CONF:-/etc/nova/api-paste.ini} - local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} - [[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1 - [[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1 - [[ -z "$conf_file" ]] && conf_file=$nova_conf - - case "$conf_file" in - "$nova_conf") match="\-\-$key=" - pattern="--$key=" - out=$pattern - ;; - "$api_conf"|"$libvirtd_conf") match="^$key = " - pattern="$match" - out="$key = " - ;; - *) error_out "ERROR: set_or_update: Invalid conf_file ($conf_file)" - esac - - cat $conf_file | grep "$match$value" >/dev/null && - juju-log "$CHARM: $key=$value already in set in $conf_file" \ - && return 0 - if cat $conf_file | grep "$match" >/dev/null ; then - juju-log "$CHARM: Updating $conf_file, $key=$value" - sed -i "s|\($pattern\).*|\1$value|" $conf_file - else - juju-log "$CHARM: Setting new option $key=$value in $conf_file" - echo "$out$value" >>$conf_file - fi - CONFIG_CHANGED=True -} diff --git a/hooks/lib/nova/folsom b/hooks/lib/nova/folsom deleted file mode 100644 index e8194d8d..00000000 --- a/hooks/lib/nova/folsom +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash -e - -# Folsom-specific functions - -nova_set_or_update() { - # Set a config option in nova.conf or api-paste.ini, depending - # Defaults to updating nova.conf - local key="$1" - local value="$2" - local conf_file="$3" - local section="${4:-DEFAULT}" - - local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} - local api_conf=${API_CONF:-/etc/nova/api-paste.ini} - local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf} - local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini} - local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini} - local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} - - [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1 - [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1 - - [[ -z "$conf_file" ]] && conf_file=$nova_conf - - local pattern="" - case "$conf_file" in - "$nova_conf") match="^$key=" - pattern="$key=" - out=$pattern - ;; - "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \ - "$libvirtd_conf") - match="^$key = " - pattern="$match" - out="$key = " - ;; - *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)" - esac - - cat $conf_file | grep "$match$value" >/dev/null && - juju-log "$CHARM: $key=$value already in set in $conf_file" \ - && return 0 - - case $conf_file in - "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf") - python -c " -import ConfigParser -config = ConfigParser.RawConfigParser() -config.read('$conf_file') -config.set('$section','$key','$value') -with open('$conf_file', 'wb') as configfile: - config.write(configfile) -" - ;; - *) - if cat $conf_file | grep "$match" >/dev/null ; then - juju-log "$CHARM: Updating $conf_file, $key=$value" - sed -i "s|\($pattern\).*|\1$value|" $conf_file - else - juju-log "$CHARM: Setting new option $key=$value in $conf_file" - echo "$out$value" >>$conf_file - fi - ;; - esac - CONFIG_CHANGED="True" -} - -# Upgrade Helpers -nova_pre_upgrade() { - # Pre-upgrade helper. Caller should pass the version of OpenStack we are - # upgrading from. - return 0 # Nothing to do here, yet. -} - -nova_post_upgrade() { - # Post-upgrade helper. Caller should pass the version of OpenStack we are - # upgrading from. - local upgrade_from="$1" - juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> folsom." - # We only support essex -> folsom, currently. - [[ "$upgrade_from" != "essex" ]] && - error_out "Unsupported upgrade: $upgrade_from -> folsom" - - # This may be dangerous, if we are upgrading a number of units at once - # and they all begin the same migration concurrently. Migrate only from - # the cloud controller(s). - if [[ "$CHARM" == "nova-cloud-controller" ]] ; then - juju-log "$CHARM: Migrating nova database." - /usr/bin/nova-manage db sync - - # Trigger a service restart on all other nova nodes. - trigger_remote_service_restarts - fi - - # Packaging currently takes care of converting the Essex gflags format - # to .ini, but we need to update the api-paste.ini manually. It can be - # updated directly from keystone, via the identity-service relation, - # if it exists. Only services that require keystone credentials will - # have modified api-paste.ini, and only those services will have a .dpkg-dist - # version present. - local r_id=$(relation-ids identity-service) - if [[ -n "$r_id" ]] && [[ -e "$CONF_DIR/api-paste.ini.dpkg-dist" ]] ; then - # Backup the last api config, update the stock packaged version - # with our current Keystone info. - mv $API_CONF $CONF_DIR/api-paste.ini.juju-last - mv $CONF_DIR/api-paste.ini.dpkg-dist $CONF_DIR/api-paste.ini - - unit=$(relation-list -r $r_id | head -n1) - # Note, this should never be called from an relation hook, only config-changed. - export JUJU_REMOTE_UNIT=$unit - service_port=$(relation-get -r $r_id service_port) - auth_port=$(relation-get -r $r_id auth_port) - service_username=$(relation-get -r $r_id service_username) - service_password=$(relation-get -r $r_id service_password) - service_tenant=$(relation-get -r $r_id service_tenant) - keystone_host=$(relation-get -r $r_id private-address) - unset JUJU_REMOTE_UNIT - - juju-log "$CHARM: Updating new api-paste.ini with keystone data from $unit:$r_id" - set_or_update "service_host" "$keystone_host" "$API_CONF" - set_or_update "service_port" "$service_port" "$API_CONF" - set_or_update "auth_host" "$keystone_host" "$API_CONF" - set_or_update "auth_port" "$auth_port" "$API_CONF" - set_or_update "auth_uri" "http://$keystone_host:$service_port/" "$API_CONF" - set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF" - set_or_update "admin_user" "$service_username" "$API_CONF" - set_or_update "admin_password" "$service_password" "$API_CONF" - fi - - # TEMPORARY - # RC3 packaging in cloud archive doesn't have this in postinst. Do it here - sed -e "s,^root_helper=.\+,rootwrap_config=/etc/nova/rootwrap.conf," -i /etc/nova/nova.conf - - juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> folsom." -} diff --git a/hooks/lib/nova/grizzly b/hooks/lib/nova/grizzly deleted file mode 100644 index 6904f390..00000000 --- a/hooks/lib/nova/grizzly +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -e - -# Folsom-specific functions - -nova_set_or_update() { - # TODO: This needs to be shared among folsom, grizzly and beyond. - # Set a config option in nova.conf or api-paste.ini, depending - # Defaults to updating nova.conf - local key="$1" - local value="$2" - local conf_file="$3" - local section="${4:-DEFAULT}" - - local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} - local api_conf=${API_CONF:-/etc/nova/api-paste.ini} - local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf} - local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini} - local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini} - local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} - - [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1 - [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1 - - [[ -z "$conf_file" ]] && conf_file=$nova_conf - - local pattern="" - case "$conf_file" in - "$nova_conf") match="^$key=" - pattern="$key=" - out=$pattern - ;; - "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \ - "$libvirtd_conf") - match="^$key = " - pattern="$match" - out="$key = " - ;; - *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)" - esac - - cat $conf_file | grep "$match$value" >/dev/null && - juju-log "$CHARM: $key=$value already in set in $conf_file" \ - && return 0 - - case $conf_file in - "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf") - python -c " -import ConfigParser -config = ConfigParser.RawConfigParser() -config.read('$conf_file') -config.set('$section','$key','$value') -with open('$conf_file', 'wb') as configfile: - config.write(configfile) -" - ;; - *) - if cat $conf_file | grep "$match" >/dev/null ; then - juju-log "$CHARM: Updating $conf_file, $key=$value" - sed -i "s|\($pattern\).*|\1$value|" $conf_file - else - juju-log "$CHARM: Setting new option $key=$value in $conf_file" - echo "$out$value" >>$conf_file - fi - ;; - esac - CONFIG_CHANGED="True" -} - -# Upgrade Helpers -nova_pre_upgrade() { - # Pre-upgrade helper. Caller should pass the version of OpenStack we are - # upgrading from. - return 0 # Nothing to do here, yet. -} - -nova_post_upgrade() { - # Post-upgrade helper. Caller should pass the version of OpenStack we are - # upgrading from. - local upgrade_from="$1" - juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> grizzly." - # We only support folsom -> grizzly, currently. - [[ "$upgrade_from" != "folsom" ]] && - error_out "Unsupported upgrade: $upgrade_from -> grizzly" - - # This may be dangerous, if we are upgrading a number of units at once - # and they all begin the same migration concurrently. Migrate only from - # the cloud controller(s). - if [[ "$CHARM" == "nova-cloud-controller" ]] ; then - juju-log "$CHARM: Migrating nova database." - /usr/bin/nova-manage db sync - - # Trigger a service restart on all other nova nodes. - trigger_remote_service_restarts - fi - - juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> grizzly." -} diff --git a/hooks/lib/nova/nova-common b/hooks/lib/nova/nova-common deleted file mode 100644 index d212a505..00000000 --- a/hooks/lib/nova/nova-common +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash -e - -# Common utility functions used across all nova charms. - -CONFIG_CHANGED=False -HOOKS_DIR="$CHARM_DIR/hooks" - -# Load the common OpenStack helper library. -if [[ -e $HOOKS_DIR/lib/openstack-common ]] ; then - . $HOOKS_DIR/lib/openstack-common -else - juju-log "Couldn't load $HOOKS_DIR/lib/opentack-common." && exit 1 -fi - -set_or_update() { - # Update config flags in nova.conf or api-paste.ini. - # Config layout changed in Folsom, so this is now OpenStack release specific. - local rel=$(get_os_codename_package "nova-common") - . $HOOKS_DIR/lib/nova/$rel - nova_set_or_update $@ -} - -function set_config_flags() { - # Set user-defined nova.conf flags from deployment config - juju-log "$CHARM: Processing config-flags." - flags=$(config-get config-flags) - if [[ "$flags" != "None" && -n "$flags" ]] ; then - for f in $(echo $flags | sed -e 's/,/ /g') ; do - k=$(echo $f | cut -d= -f1) - v=$(echo $f | cut -d= -f2) - set_or_update "$k" "$v" - done - fi -} - -configure_volume_service() { - local svc="$1" - local cur_vers="$(get_os_codename_package "nova-common")" - case "$svc" in - "cinder") - set_or_update "volume_api_class" "nova.volume.cinder.API" ;; - "nova-volume") - # nova-volume only supported before grizzly. - [[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] && - set_or_update "volume_api_class" "nova.volume.api.API" - ;; - *) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc" - return 1 ;; - esac -} - -function configure_network_manager { - local manager="$1" - echo "$CHARM: configuring $manager network manager" - case $1 in - "FlatManager") - set_or_update "network_manager" "nova.network.manager.FlatManager" - ;; - "FlatDHCPManager") - set_or_update "network_manager" "nova.network.manager.FlatDHCPManager" - - if [[ "$CHARM" == "nova-compute" ]] ; then - local flat_interface=$(config-get flat-interface) - local ec2_host=$(relation-get ec2_host) - set_or_update flat_inteface "$flat_interface" - set_or_update ec2_dmz_host "$ec2_host" - - # Ensure flat_interface has link. - if ip link show $flat_interface >/dev/null 2>&1 ; then - ip link set $flat_interface up - fi - - # work around (LP: #1035172) - if [[ -e /dev/vhost-net ]] ; then - iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \ - --checksum-fill - fi - fi - - ;; - "Quantum") - local local_ip=$(get_ip `unit-get private-address`) - [[ -n $local_ip ]] || { - juju-log "Unable to resolve local IP address" - exit 1 - } - set_or_update "network_api_class" "nova.network.quantumv2.api.API" - set_or_update "quantum_auth_strategy" "keystone" - set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF" - set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF" - if [ "$QUANTUM_PLUGIN" == "ovs" ]; then - set_or_update "tenant_network_type" "gre" $QUANTUM_PLUGIN_CONF "OVS" - set_or_update "enable_tunneling" "True" $QUANTUM_PLUGIN_CONF "OVS" - set_or_update "tunnel_id_ranges" "1:1000" $QUANTUM_PLUGIN_CONF "OVS" - set_or_update "local_ip" "$local_ip" $QUANTUM_PLUGIN_CONF "OVS" - fi - ;; - *) juju-log "ERROR: Invalid network manager $1" && exit 1 ;; - esac -} - -function trigger_remote_service_restarts() { - # Trigger a service restart on all other nova nodes that have a relation - # via the cloud-controller interface. - - # possible relations to other nova services. - local relations="cloud-compute nova-volume-service" - - for rel in $relations; do - local r_ids=$(relation-ids $rel) - for r_id in $r_ids ; do - juju-log "$CHARM: Triggering a service restart on relation $r_id." - relation-set -r $r_id restart-trigger=$(uuid) - done - done -} - -do_openstack_upgrade() { - # update openstack components to those provided by a new installation source - # it is assumed the calling hook has confirmed that the upgrade is sane. - local rel="$1" - shift - local packages=$@ - - orig_os_rel=$(get_os_codename_package "nova-common") - new_rel=$(get_os_codename_install_source "$rel") - - # Backup the config directory. - local stamp=$(date +"%Y%m%d%M%S") - tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR - - # load the release helper library for pre/post upgrade hooks specific to the - # release we are upgrading to. - . $HOOKS_DIR/lib/nova/$new_rel - - # new release specific pre-upgrade hook - nova_pre_upgrade "$orig_os_rel" - - # Setup apt repository access and kick off the actual package upgrade. - configure_install_source "$rel" - apt-get update - DEBIAN_FRONTEND=noninteractive apt-get --option Dpkg::Options::=--force-confold -y \ - install --no-install-recommends $packages - - # new release sepcific post-upgrade hook - nova_post_upgrade "$orig_os_rel" - -} diff --git a/hooks/lib/openstack-common b/hooks/lib/openstack-common deleted file mode 100644 index d5b19c5d..00000000 --- a/hooks/lib/openstack-common +++ /dev/null @@ -1,781 +0,0 @@ -#!/bin/bash -e - -# Common utility functions used across all OpenStack charms. - -error_out() { - juju-log "$CHARM ERROR: $@" - exit 1 -} - -function service_ctl_status { - # Return 0 if a service is running, 1 otherwise. - local svc="$1" - local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }') - case $status in - "start") return 0 ;; - "stop") return 1 ;; - *) error_out "Unexpected status of service $svc: $status" ;; - esac -} - -function service_ctl { - # control a specific service, or all (as defined by $SERVICES) - # service restarts will only occur depending on global $CONFIG_CHANGED, - # which should be updated in charm's set_or_update(). - local config_changed=${CONFIG_CHANGED:-True} - if [[ $1 == "all" ]] ; then - ctl="$SERVICES" - else - ctl="$1" - fi - action="$2" - if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then - error_out "ERROR service_ctl: Not enough arguments" - fi - - for i in $ctl ; do - case $action in - "start") - service_ctl_status $i || service $i start ;; - "stop") - service_ctl_status $i && service $i stop || return 0 ;; - "restart") - if [[ "$config_changed" == "True" ]] ; then - service_ctl_status $i && service $i restart || service $i start - fi - ;; - esac - if [[ $? != 0 ]] ; then - juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action" - fi - done - # all configs should have been reloaded on restart of all services, reset - # flag if its being used. - if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] && - [[ "$ctl" == "all" ]]; then - CONFIG_CHANGED="False" - fi -} - -function configure_install_source { - # Setup and configure installation source based on a config flag. - local src="$1" - - # Default to installing from the main Ubuntu archive. - [[ $src == "distro" ]] || [[ -z "$src" ]] && return 0 - - . /etc/lsb-release - - # standard 'ppa:someppa/name' format. - if [[ "${src:0:4}" == "ppa:" ]] ; then - juju-log "$CHARM: Configuring installation from custom src ($src)" - add-apt-repository -y "$src" || error_out "Could not configure PPA access." - return 0 - fi - - # standard 'deb http://url/ubuntu main' entries. gpg key ids must - # be appended to the end of url after a |, ie: - # 'deb http://url/ubuntu main|$GPGKEYID' - if [[ "${src:0:3}" == "deb" ]] ; then - juju-log "$CHARM: Configuring installation from custom src URL ($src)" - if echo "$src" | grep -q "|" ; then - # gpg key id tagged to end of url folloed by a | - url=$(echo $src | cut -d'|' -f1) - key=$(echo $src | cut -d'|' -f2) - juju-log "$CHARM: Importing repository key: $key" - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \ - juju-log "$CHARM WARN: Could not import key from keyserver: $key" - else - juju-log "$CHARM No repository key specified." - url="$src" - fi - echo "$url" > /etc/apt/sources.list.d/juju_deb.list - return 0 - fi - - # Cloud Archive - if [[ "${src:0:6}" == "cloud:" ]] ; then - - # current os releases supported by the UCA. - local cloud_archive_versions="folsom grizzly" - - local ca_rel=$(echo $src | cut -d: -f2) - local u_rel=$(echo $ca_rel | cut -d- -f1) - local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1) - - [[ "$u_rel" != "$DISTRIB_CODENAME" ]] && - error_out "Cannot install from Cloud Archive pocket $src " \ - "on this Ubuntu version ($DISTRIB_CODENAME)!" - - valid_release="" - for rel in $cloud_archive_versions ; do - if [[ "$os_rel" == "$rel" ]] ; then - valid_release=1 - juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive." - fi - done - if [[ -z "$valid_release" ]] ; then - error_out "OpenStack release ($os_rel) not supported by "\ - "the Ubuntu Cloud Archive." - fi - - # CA staging repos are standard PPAs. - if echo $ca_rel | grep -q "staging" ; then - add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging - return 0 - fi - - # the others are LP-external deb repos. - case "$ca_rel" in - "$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; - "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; - "$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; - "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; - *) error_out "Invalid Cloud Archive repo specified: $src" - esac - - apt-get -y install ubuntu-cloud-keyring - entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main" - echo "$entry" \ - >/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list - return 0 - fi - - error_out "Invalid installation source specified in config: $src" - -} - -get_os_codename_install_source() { - # derive the openstack release provided by a supported installation source. - local rel="$1" - local codename="unknown" - . /etc/lsb-release - - # map ubuntu releases to the openstack version shipped with it. - if [[ "$rel" == "distro" ]] ; then - case "$DISTRIB_CODENAME" in - "oneiric") codename="diablo" ;; - "precise") codename="essex" ;; - "quantal") codename="folsom" ;; - "raring") codename="grizzly" ;; - esac - fi - - # derive version from cloud archive strings. - if [[ "${rel:0:6}" == "cloud:" ]] ; then - rel=$(echo $rel | cut -d: -f2) - local u_rel=$(echo $rel | cut -d- -f1) - local ca_rel=$(echo $rel | cut -d- -f2) - if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then - case "$ca_rel" in - "folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging") - codename="folsom" ;; - "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging") - codename="grizzly" ;; - esac - fi - fi - - # have a guess based on the deb string provided - if [[ "${rel:0:3}" == "deb" ]] || \ - [[ "${rel:0:3}" == "ppa" ]] ; then - CODENAMES="diablo essex folsom grizzly havana" - for cname in $CODENAMES; do - if echo $rel | grep -q $cname; then - codename=$cname - fi - done - fi - echo $codename -} - -get_os_codename_package() { - local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none" - pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs - case "${pkg_vers:0:6}" in - "2011.2") echo "diablo" ;; - "2012.1") echo "essex" ;; - "2012.2") echo "folsom" ;; - "2013.1") echo "grizzly" ;; - "2013.2") echo "havana" ;; - esac -} - -get_os_version_codename() { - case "$1" in - "diablo") echo "2011.2" ;; - "essex") echo "2012.1" ;; - "folsom") echo "2012.2" ;; - "grizzly") echo "2013.1" ;; - "havana") echo "2013.2" ;; - esac -} - -get_ip() { - dpkg -l | grep -q python-dnspython || { - apt-get -y install python-dnspython 2>&1 > /dev/null - } - hostname=$1 - python -c " -import dns.resolver -import socket -try: - # Test to see if already an IPv4 address - socket.inet_aton('$hostname') - print '$hostname' -except socket.error: - try: - answers = dns.resolver.query('$hostname', 'A') - if answers: - print answers[0].address - except dns.resolver.NXDOMAIN: - pass -" -} - -# Common storage routines used by cinder, nova-volume and swift-storage. -clean_storage() { - # if configured to overwrite existing storage, we unmount the block-dev - # if mounted and clear any previous pv signatures - local block_dev="$1" - juju-log "Cleaining storage '$block_dev'" - if grep -q "^$block_dev" /proc/mounts ; then - mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }') - juju-log "Unmounting $block_dev from $mp" - umount "$mp" || error_out "ERROR: Could not unmount storage from $mp" - fi - if pvdisplay "$block_dev" >/dev/null 2>&1 ; then - juju-log "Removing existing LVM PV signatures from $block_dev" - - # deactivate any volgroups that may be built on this dev - vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }') - if [[ -n "$vg" ]] ; then - juju-log "Deactivating existing volume group: $vg" - vgchange -an "$vg" || - error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?" - fi - echo "yes" | pvremove -ff "$block_dev" || - error_out "Could not pvremove $block_dev" - else - juju-log "Zapping disk of all GPT and MBR structures" - sgdisk --zap-all $block_dev || - error_out "Unable to zap $block_dev" - fi -} - -function get_block_device() { - # given a string, return full path to the block device for that - # if input is not a block device, find a loopback device - local input="$1" - - case "$input" in - /dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist." - echo "$input"; return 0;; - /*) :;; - *) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist." - echo "/dev/$input"; return 0;; - esac - - # this represents a file - # support "/path/to/file|5G" - local fpath size oifs="$IFS" - if [ "${input#*|}" != "${input}" ]; then - size=${input##*|} - fpath=${input%|*} - else - fpath=${input} - size=5G - fi - - ## loop devices are not namespaced. This is bad for containers. - ## it means that the output of 'losetup' may have the given $fpath - ## in it, but that may not represent this containers $fpath, but - ## another containers. To address that, we really need to - ## allow some uniq container-id to be expanded within path. - ## TODO: find a unique container-id that will be consistent for - ## this container throughout its lifetime and expand it - ## in the fpath. - # fpath=${fpath//%{id}/$THAT_ID} - - local found="" - # parse through 'losetup -a' output, looking for this file - # output is expected to look like: - # /dev/loop0: [0807]:961814 (/tmp/my.img) - found=$(losetup -a | - awk 'BEGIN { found=0; } - $3 == f { sub(/:$/,"",$1); print $1; found=found+1; } - END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \ - f="($fpath)") - - if [ $? -ne 0 ]; then - echo "multiple devices found for $fpath: $found" 1>&2 - return 1; - fi - - [ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; } - - if [ -n "$found" ]; then - echo "confused, $found is not a block device for $fpath"; - return 1; - fi - - # no existing device was found, create one - mkdir -p "${fpath%/*}" - truncate --size "$size" "$fpath" || - { echo "failed to create $fpath of size $size"; return 1; } - - found=$(losetup --find --show "$fpath") || - { echo "failed to setup loop device for $fpath" 1>&2; return 1; } - - echo "$found" - return 0 -} - -HAPROXY_CFG=/etc/haproxy/haproxy.cfg -HAPROXY_DEFAULT=/etc/default/haproxy -########################################################################## -# Description: Configures HAProxy services for Openstack API's -# Parameters: -# Space delimited list of service:port:mode combinations for which -# haproxy service configuration should be generated for. The function -# assumes the name of the peer relation is 'cluster' and that every -# service unit in the peer relation is running the same services. -# -# Services that do not specify :mode in parameter will default to http. -# -# Example -# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http -########################################################################## -configure_haproxy() { - local address=`unit-get private-address` - local name=${JUJU_UNIT_NAME////-} - cat > $HAPROXY_CFG << EOF -global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice - maxconn 20000 - user haproxy - group haproxy - spread-checks 0 - -defaults - log global - mode http - option httplog - option dontlognull - retries 3 - timeout queue 1000 - timeout connect 1000 - timeout client 30000 - timeout server 30000 - -listen stats :8888 - mode http - stats enable - stats hide-version - stats realm Haproxy\ Statistics - stats uri / - stats auth admin:password - -EOF - for service in $@; do - local service_name=$(echo $service | cut -d : -f 1) - local haproxy_listen_port=$(echo $service | cut -d : -f 2) - local api_listen_port=$(echo $service | cut -d : -f 3) - local mode=$(echo $service | cut -d : -f 4) - [[ -z "$mode" ]] && mode="http" - juju-log "Adding haproxy configuration entry for $service "\ - "($haproxy_listen_port -> $api_listen_port)" - cat >> $HAPROXY_CFG << EOF -listen $service_name 0.0.0.0:$haproxy_listen_port - balance roundrobin - mode $mode - option ${mode}log - server $name $address:$api_listen_port check -EOF - local r_id="" - local unit="" - for r_id in `relation-ids cluster`; do - for unit in `relation-list -r $r_id`; do - local unit_name=${unit////-} - local unit_address=`relation-get -r $r_id private-address $unit` - if [ -n "$unit_address" ]; then - echo " server $unit_name $unit_address:$api_listen_port check" \ - >> $HAPROXY_CFG - fi - done - done - done - echo "ENABLED=1" > $HAPROXY_DEFAULT - service haproxy restart -} - -########################################################################## -# Description: Query HA interface to determine is cluster is configured -# Returns: 0 if configured, 1 if not configured -########################################################################## -is_clustered() { - local r_id="" - local unit="" - for r_id in $(relation-ids ha); do - if [ -n "$r_id" ]; then - for unit in $(relation-list -r $r_id); do - clustered=$(relation-get -r $r_id clustered $unit) - if [ -n "$clustered" ]; then - juju-log "Unit is haclustered" - return 0 - fi - done - fi - done - juju-log "Unit is not haclustered" - return 1 -} - -########################################################################## -# Description: Return a list of all peers in cluster relations -########################################################################## -peer_units() { - local peers="" - local r_id="" - for r_id in $(relation-ids cluster); do - peers="$peers $(relation-list -r $r_id)" - done - echo $peers -} - -########################################################################## -# Description: Determines whether the current unit is the oldest of all -# its peers - supports partial leader election -# Returns: 0 if oldest, 1 if not -########################################################################## -oldest_peer() { - peers=$1 - local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2) - for peer in $peers; do - echo "Comparing $JUJU_UNIT_NAME with peers: $peers" - local r_unit_no=$(echo $peer | cut -d / -f 2) - if (($r_unit_no<$l_unit_no)); then - juju-log "Not oldest peer; deferring" - return 1 - fi - done - juju-log "Oldest peer; might take charge?" - return 0 -} - -########################################################################## -# Description: Determines whether the current service units is the -# leader within a) a cluster of its peers or b) across a -# set of unclustered peers. -# Parameters: CRM resource to check ownership of if clustered -# Returns: 0 if leader, 1 if not -########################################################################## -eligible_leader() { - if is_clustered; then - if ! is_leader $1; then - juju-log 'Deferring action to CRM leader' - return 1 - fi - else - peers=$(peer_units) - if [ -n "$peers" ] && ! oldest_peer "$peers"; then - juju-log 'Deferring action to oldest service unit.' - return 1 - fi - fi - return 0 -} - -########################################################################## -# Description: Query Cluster peer interface to see if peered -# Returns: 0 if peered, 1 if not peered -########################################################################## -is_peered() { - local r_id=$(relation-ids cluster) - if [ -n "$r_id" ]; then - if [ -n "$(relation-list -r $r_id)" ]; then - juju-log "Unit peered" - return 0 - fi - fi - juju-log "Unit not peered" - return 1 -} - -########################################################################## -# Description: Determines whether host is owner of clustered services -# Parameters: Name of CRM resource to check ownership of -# Returns: 0 if leader, 1 if not leader -########################################################################## -is_leader() { - hostname=`hostname` - if [ -x /usr/sbin/crm ]; then - if crm resource show $1 | grep -q $hostname; then - juju-log "$hostname is cluster leader." - return 0 - fi - fi - juju-log "$hostname is not cluster leader." - return 1 -} - -########################################################################## -# Description: Determines whether enough data has been provided in -# configuration or relation data to configure HTTPS. -# Parameters: None -# Returns: 0 if HTTPS can be configured, 1 if not. -########################################################################## -https() { - local r_id="" - if [[ -n "$(config-get ssl_cert)" ]] && - [[ -n "$(config-get ssl_key)" ]] ; then - return 0 - fi - for r_id in $(relation-ids identity-service) ; do - for unit in $(relation-list -r $r_id) ; do - if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] && - [[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] && - [[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] && - [[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then - return 0 - fi - done - done - return 1 -} - -########################################################################## -# Description: For a given number of port mappings, configures apache2 -# HTTPs local reverse proxying using certficates and keys provided in -# either configuration data (preferred) or relation data. Assumes ports -# are not in use (calling charm should ensure that). -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if reverse proxy(s) have been configured, 0 if not. -########################################################################## -enable_https() { - local port_maps="$@" - local http_restart="" - juju-log "Enabling HTTPS for port mappings: $port_maps." - - # allow overriding of keystone provided certs with those set manually - # in config. - local cert=$(config-get ssl_cert) - local key=$(config-get ssl_key) - local ca_cert="" - if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then - juju-log "Inspecting identity-service relations for SSL certificate." - local r_id="" - cert="" - key="" - ca_cert="" - for r_id in $(relation-ids identity-service) ; do - for unit in $(relation-list -r $r_id) ; do - [[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)" - [[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)" - [[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)" - done - done - [[ -n "$cert" ]] && cert=$(echo $cert | base64 -di) - [[ -n "$key" ]] && key=$(echo $key | base64 -di) - [[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di) - else - juju-log "Using SSL certificate provided in service config." - fi - - [[ -z "$cert" ]] || [[ -z "$key" ]] && - juju-log "Expected but could not find SSL certificate data, not "\ - "configuring HTTPS!" && return 1 - - apt-get -y install apache2 - a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" && - http_restart=1 - - mkdir -p /etc/apache2/ssl/$CHARM - echo "$cert" >/etc/apache2/ssl/$CHARM/cert - echo "$key" >/etc/apache2/ssl/$CHARM/key - if [[ -n "$ca_cert" ]] ; then - juju-log "Installing Keystone supplied CA cert." - echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt - update-ca-certificates --fresh - - # XXX TODO: Find a better way of exporting this? - if [[ "$CHARM" == "nova-cloud-controller" ]] ; then - [[ -e /var/www/keystone_juju_ca_cert.crt ]] && - rm -rf /var/www/keystone_juju_ca_cert.crt - ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \ - /var/www/keystone_juju_ca_cert.crt - fi - - fi - for port_map in $port_maps ; do - local ext_port=$(echo $port_map | cut -d: -f1) - local int_port=$(echo $port_map | cut -d: -f2) - juju-log "Creating apache2 reverse proxy vhost for $port_map." - cat >/etc/apache2/sites-available/${CHARM}_${ext_port} < - ServerName $(unit-get private-address) - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/$CHARM/cert - SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key - ProxyPass / http://localhost:$int_port/ - ProxyPassReverse / http://localhost:$int_port/ - ProxyPreserveHost on - - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -END - a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && - http_restart=1 - done - if [[ -n "$http_restart" ]] ; then - service apache2 restart - fi -} - -########################################################################## -# Description: Ensure HTTPS reverse proxying is disabled for given port -# mappings. -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error. -########################################################################## -disable_https() { - local port_maps="$@" - local http_restart="" - juju-log "Ensuring HTTPS disabled for $port_maps." - ( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0 - for port_map in $port_maps ; do - local ext_port=$(echo $port_map | cut -d: -f1) - local int_port=$(echo $port_map | cut -d: -f2) - if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then - juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map." - a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && - http_restart=1 - fi - done - if [[ -n "$http_restart" ]] ; then - service apache2 restart - fi -} - - -########################################################################## -# Description: Ensures HTTPS is either enabled or disabled for given port -# mapping. -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not. -########################################################################## -setup_https() { - # configure https via apache reverse proxying either - # using certs provided by config or keystone. - [[ -z "$CHARM" ]] && - error_out "setup_https(): CHARM not set." - if ! https ; then - disable_https $@ - else - enable_https $@ - fi -} - -########################################################################## -# Description: Determine correct API server listening port based on -# existence of HTTPS reverse proxy and/or haproxy. -# Paremeters: The standard public port for given service. -# Returns: The correct listening port for API service. -########################################################################## -determine_api_port() { - local public_port="$1" - local i=0 - ( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1] - https >/dev/null 2>&1 && i=$[$i + 1] - echo $[$public_port - $[$i * 10]] -} - -########################################################################## -# Description: Determine correct proxy listening port based on public IP + -# existence of HTTPS reverse proxy. -# Paremeters: The standard public port for given service. -# Returns: The correct listening port for haproxy service public address. -########################################################################## -determine_haproxy_port() { - local public_port="$1" - local i=0 - https >/dev/null 2>&1 && i=$[$i + 1] - echo $[$public_port - $[$i * 10]] -} - -########################################################################## -# Description: Print the value for a given config option in an OpenStack -# .ini style configuration file. -# Parameters: File path, option to retrieve, optional -# section name (default=DEFAULT) -# Returns: Prints value if set, prints nothing otherwise. -########################################################################## -local_config_get() { - # return config values set in openstack .ini config files. - # default placeholders starting (eg, %AUTH_HOST%) treated as - # unset values. - local file="$1" - local option="$2" - local section="$3" - [[ -z "$section" ]] && section="DEFAULT" - python -c " -import ConfigParser -config = ConfigParser.RawConfigParser() -config.read('$file') -try: - value = config.get('$section', '$option') -except: - print '' - exit(0) -if value.startswith('%'): exit(0) -print value -" -} - -########################################################################## -# Description: Creates an rc file exporting environment variables to a -# script_path local to the charm's installed directory. -# Any charm scripts run outside the juju hook environment can source this -# scriptrc to obtain updated config information necessary to perform health -# checks or service changes -# -# Parameters: -# An array of '=' delimited ENV_VAR:value combinations to export. -# If optional script_path key is not provided in the array, script_path -# defaults to scripts/scriptrc -########################################################################## -function save_script_rc { - if [ ! -n "$JUJU_UNIT_NAME" ]; then - echo "Error: Missing JUJU_UNIT_NAME environment variable" - exit 1 - fi - # our default unit_path - unit_path="$CHARM_DIR/scripts/scriptrc" - echo $unit_path - tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc" - - echo "#!/bin/bash" > $tmp_rc - for env_var in "${@}" - do - if `echo $env_var | grep -q script_path`; then - # well then we need to reset the new unit-local script path - unit_path="$CHARM_DIR/${env_var/script_path=/}" - else - echo "export $env_var" >> $tmp_rc - fi - done - chmod 755 $tmp_rc - mv $tmp_rc $unit_path -} diff --git a/hooks/misc_utils.py b/hooks/misc_utils.py new file mode 100644 index 00000000..c5748f13 --- /dev/null +++ b/hooks/misc_utils.py @@ -0,0 +1,31 @@ +import subprocess + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, +) + +from charmhelpers.contrib.hahelpers.ceph import ( + create_keyring as ceph_create_keyring, + keyring_path as ceph_keyring_path, +) + + +# This was pulled from cinder redux. It should go somewhere common, charmhelpers.hahelpers.ceph? + +def ensure_ceph_keyring(service): + '''Ensures a ceph keyring exists. Returns True if so, False otherwise''' + # TODO: This can be shared between cinder + glance, find a home for it. + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + if not key: + return False + ceph_create_keyring(service=service, key=key) + keyring = ceph_keyring_path(service) + subprocess.check_call(['chown', 'cinder.cinder', keyring]) + return True diff --git a/hooks/nova-compute-common b/hooks/nova-compute-common deleted file mode 100755 index eaaa4dc0..00000000 --- a/hooks/nova-compute-common +++ /dev/null @@ -1,309 +0,0 @@ -#!/bin/bash -e - -CHARM="nova-compute" -PACKAGES="nova-compute python-keystone genisoimage" -SERVICES="nova-compute" -CONF_DIR="/etc/nova" -NOVA_CONF=$(config-get nova-config) -API_CONF="/etc/nova/api-paste.ini" -QUANTUM_CONF="/etc/quantum/quantum.conf" -LIBVIRTD_CONF="/etc/libvirt/libvirtd.conf" -HOOKS_DIR="$CHARM_DIR/hooks" -MULTI_HOST=$(config-get multi-host) - -if [ -f /etc/nova/nm.conf ]; then - NET_MANAGER=$(cat /etc/nova/nm.conf) -fi -case $NET_MANAGER in - "Quantum") - QUANTUM_PLUGIN=$(cat /etc/nova/quantum_plugin.conf) - case $QUANTUM_PLUGIN in - "ovs") - SERVICES="$SERVICES quantum-plugin-openvswitch-agent" - QUANTUM_PLUGIN_CONF="/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini" - ;; - "nvp") - QUANTUM_PLUGIN_CONF="/etc/quantum/plugins/nicira/nvp.ini" - ;; - *) - juju-log "Unrecognised plugin for quantum: $QUANTUM_PLUGIN" && exit 1 - ;; - esac - ;; - "FlatManager"|"FlatDHCPManager") - if [[ "$MULTI_HOST" == "yes" ]] ; then - SERVICES="$SERVICES nova-api nova-network" - fi - ;; -esac - -if [[ -e $HOOKS_DIR/lib/nova/nova-common ]] ; then - . $HOOKS_DIR/lib/nova/nova-common -else - juju-log "$CHARM: Couldn't load $HOOKS_DIR/lib/nova-common" && exit 1 -fi - -determine_compute_package() { - # determines the appropriate nova-compute package to install - # for the configured virt-type. - local virt_type="$1" - local compute_pkg="" - case $virt_type in - "kvm") compute_pkg="nova-compute-kvm";; - "qemu") compute_pkg="nova-compute-qemu";; - "xen") compute_pkg="nova-compute-xen";; - "uml") compute_pkg="nova-compute-uml";; - "lxc") compute_pkg="nova-compute-lxc";; - *) error_out "ERROR: Unsupported virt_type=$virt_type";; - esac - echo "$compute_pkg" -} - -function setup_bridge { - # XXX This is required by nova-network and will likely move somewhere else - # once we can split these services up into seperate formulas. - br=$1 - ip=$2 - netmask=$3 - [[ -z $br ]] && br="br100" - [[ -z $ip ]] && ip="11.0.0.1" - [[ -z $netmask ]] && netmask="255.255.255.0" - - apt-get -y install bridge-utils augeas-lenses augeas-tools - echo "Configuring bridge $br ($ip $netmask)" - context="/files/etc/network/interfaces" - augtool < /etc/nova/nm.conf - ;;& - "FlatManager") - local bridge_ip=$(config-get bridge-ip) - local bridge_netmask=$(config-get bridge-netmask) - setup_bridge $network_bridge $bridge_ip $bridge_netmask - set_or_update network_manager nova.network.manager.FlatManager - set_or_update flat_network_bridge $network_bridge - ;; - "FlatDHCPManager") - local flat_interface=$(config-get flat-interface) - local ec2_host=$(relation-get ec2_host) - [[ -z $ec2_host ]] && juju-log "nova-compute: Missing ec2_host" \ - && exit 0 - set_or_update network_manager nova.network.manager.FlatDHCPManager - # the interface on which bridge is built - set_or_update flat_interface $flat_interface - # address of API server to forward requests - set_or_update ec2_dmz_host $ec2_host - ;; - "Quantum") - local keystone_host="$(relation-get keystone_host)" - local auth_port="$(relation-get auth_port)" - local quantum_url="$(relation-get quantum_url)" - local quantum_admin_tenant_name="$(relation-get service_tenant)" - local quantum_admin_username="$(relation-get service_username)" - local quantum_admin_password="$(relation-get service_password)" - local quantum_security_groups="$(relation-get quantum_security_groups)" - - # might end up here before nova-c-c has processed keystone hooks - [[ -z "$keystone_host" ]] || - [[ -z "$auth_port" ]] || - [[ -z "$quantum_url" ]] || - [[ -z "$quantum_admin_tenant_name" ]] || - [[ -z "$quantum_admin_username" ]] || - [[ -z "$quantum_admin_password" ]] && - juju-log "nova-compute: Missing required data for Quantum config." && - exit 0 - - local cur=$(get_os_codename_package "nova-common") - local vers=$(get_os_version_codename $cur) - - [[ "$quantum_security_groups" == "yes" ]] && - dpkg --compare-versions $vers lt '2013.1' && - juju-log "Unable to use quantum security groups with < grizzly" && - exit 1 - - set_or_update "network_api_class" "nova.network.quantumv2.api.API" - set_or_update "quantum_auth_strategy" "keystone" - set_or_update "quantum_url" "$quantum_url" - set_or_update "quantum_admin_tenant_name" "$quantum_admin_tenant_name" - set_or_update "quantum_admin_username" "$quantum_admin_username" - set_or_update "quantum_admin_password" "$quantum_admin_password" - set_or_update "quantum_admin_auth_url" \ - "http://$keystone_host:$auth_port/v2.0" - - if dpkg --compare-versions $vers gt '2012.2'; then - # Grizzly onwards supports metadata proxy so forcing use of config - # drive is not required. - set_or_update "force_config_drive" "False" - else - set_or_update "force_config_drive" "True" - fi - case $quantum_plugin in - "ovs") - apt-get -y install openvswitch-datapath-dkms - apt-get -y install quantum-plugin-openvswitch-agent - local quantum_plugin_conf="/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini" - set_or_update "core_plugin" "quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" "$QUANTUM_CONF" - if dpkg --compare-versions $vers gt '2012.2'; then - set_or_update "libvirt_vif_driver" "nova.virt.libvirt.vif.LibvirtGenericVIFDriver" - else - set_or_update "libvirt_vif_driver" "nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" - fi - set_or_update "libvirt_use_virtio_for_bridges" "True" - set_or_update "tenant_network_type" "gre" $quantum_plugin_conf "OVS" - set_or_update "enable_tunneling" "True" $quantum_plugin_conf "OVS" - set_or_update "tunnel_id_ranges" "1:1000" $quantum_plugin_conf "OVS" - set_or_update "local_ip" "$private_address" $quantum_plugin_conf "OVS" - if [ "$quantum_security_groups" == "yes" ]; then - set_or_update "security_group_api" "quantum" - set_or_update "firewall_driver" "nova.virt.firewall.NoopFirewallDriver" - set_or_update "firewall_driver" \ - "quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver" \ - $quantum_plugin_conf "SECURITYGROUP" - fi - SERVICES="$SERVICES quantum-plugin-openvswitch-agent" - ;; - esac - set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF" - [[ -n $net_manager ]] && echo $net_manager > /etc/nova/nm.conf - [[ -n $quantum_plugin ]] && echo $quantum_plugin > /etc/nova/quantum_plugin.conf - ;; - *) echo "ERROR: Invalid network manager $1" && exit 1 ;; - esac -} - -BR_INT="br-int" - -function configure_quantum_bridge { - if ! ovs-vsctl show | grep -q "Bridge $BR_INT"; then - ovs-vsctl add-br $BR_INT - fi -} - -function initialize_ssh_keys { - # generate ssh keypair for root if one does not exist or - # the pari is not complete. - local pub="/root/.ssh/id_rsa" - local priv="/root/.ssh/id_rsa.pub" - if [[ -e $pub ]] && - [[ -e $priv ]] ; then - juju-log "$CHARM: SSH credentials already exist for root." - return 0 - fi - juju-log "$CHARM: Initializing new SSH key pair for live migration." - [[ -e $pub ]] && mv $pub $pub.$(date +"%s") - [[ -e $priv ]] && mv $priv $priv.$(date +"%s") - local keyname=$(echo $JUJU_UNIT_NAME | sed -e 's,/,-,g') - echo -e "\n" | ssh-keygen -C "$keyname" -N "" -} - -function libvirt_tcp_listening { - # toggle libvirtd's tcp listening in both /etc/default/libvirt-bin - # and /etc/libvirt/libvirtd.conf. - local toggle="$1" - juju-log "$CHARM: Configuring libvirt tcp listening: $toggle." - local cur_opts=$(grep "^libvirtd_opts" /etc/default/libvirt-bin | - cut -d= -f2 | sed -e 's/\"//g') - local new_opts="" - - if [[ "$toggle" == "on" ]] ; then - if [[ -z "$cur_opts" ]] ; then - echo "libvirtd_opts=\"-d -l\"" >>/etc/default/libvirt-bin - elif ! echo "$cur_opts" | grep -q "\-l" ; then - new_opts="$cur_opts -l" - sed -i "s|\(libvirtd_opts=\).*|\1\"$new_opts\"|" /etc/default/libvirt-bin - fi - set_or_update "listen_tcp" 1 $LIBVIRTD_CONF - elif [[ "$toggle" == "off" ]] ; then - if echo "$cur_opts" | grep -q "\-l" ; then - new_opts=$(echo $cur_opts | sed -e 's/\-l//g') - fi - set_or_update "listen_tcp" 0 $LIBVIRTD_CONF - fi - - [[ -n "$new_opts" ]] && - sed -i "s|\(libvirtd_opts=\).*|\1\"$new_opts\"|" /etc/default/libvirt-bin - - return 0 -} - - -function configure_migration { - local enable_migration=$(config-get enable-live-migration) - - if [[ "$enable_migration" != "True" ]] && - [[ "$enable_migraiton" != "true" ]] ; then - libvirt_tcp_listening "off" - return $? - fi - - libvirt_tcp_listening "on" - - case "$(config-get migration-auth-type)" in - "none"|"None") - set_or_update "listen_tls" 0 $LIBVIRTD_CONF - set_or_update "auth_tcp" "\"none\"" $LIBVIRTD_CONF - ;; - "ssh") - set_or_update "listen_tls" 0 $LIBVIRTD_CONF - set_or_update "live_migration_uri" "qemu+ssh://%s/system" $NOVA_CONF - initialize_ssh_keys - # check in with nova-c-c and register our new key. - for id in $(relation-ids cloud-compute) ; do - compute_joined $id - done - service_ctl nova-compute restart ;; - "sasl") return 0 ;; - esac -} - -function configure_libvirt { - cat > /etc/libvirt/qemu.conf << EOF -# File installed by Juju nova-compute charm -cgroup_device_acl = [ - "/dev/null", "/dev/full", "/dev/zero", - "/dev/random", "/dev/urandom", - "/dev/ptmx", "/dev/kvm", "/dev/kqemu", - "/dev/rtc", "/dev/hpet", "/dev/net/tun", -] -EOF - configure_migration - service libvirt-bin restart -} - -function migration_enabled { - local migration="$(config-get enable-live-migration)" - [[ "$migration" == "true" ]] || [[ "$migration" == "True" ]] && return 0 - return 1 -} diff --git a/hooks/nova-compute-relations b/hooks/nova-compute-relations deleted file mode 100755 index 3fc2a8da..00000000 --- a/hooks/nova-compute-relations +++ /dev/null @@ -1,329 +0,0 @@ -#!/bin/bash -e -HOOKS_DIR="$CHARM_DIR/hooks" -ARG0=${0##*/} - -if [[ -e $HOOKS_DIR/nova-compute-common ]] ; then - . $HOOKS_DIR/nova-compute-common -else - juju-log "ERROR: Could not load nova-compute-common from $HOOKS_DIR" -fi - -function install_hook { - [ -d exec.d ] && ( for f in exec.d/*/charm-pre-install; do [ -x $f ] && /bin/sh -c "$f";done ) - local virt_type=$(config-get virt-type) - local compute_pkg=$(determine_compute_package "$virt_type") - apt-get -y install python-software-properties || exit 1 - configure_install_source "$(config-get openstack-origin)" - apt-get update || exit 1 - apt-get -y install $compute_pkg $PACKAGES || exit 1 - service_ctl all stop - set_or_update "auth_strategy" "keystone" - configure_libvirt -} - -function upgrade_hook { - [ -d exec.d ] && ( for f in exec.d/*/charm-pre-install; do [ -x $f ] && /bin/sh -c "$f";done ) -} - -function config_changed() { - - # Determine whether or not we should do an upgrade, based on whether or not - # the version offered in openstack-origin is greater than what is installed. - - local install_src=$(config-get openstack-origin) - local cur=$(get_os_codename_package "nova-common") - local available=$(get_os_codename_install_source "$install_src") - - if dpkg --compare-versions $(get_os_version_codename "$cur") lt \ - $(get_os_version_codename "$available") ; then - juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available." - do_openstack_upgrade "$install_src" $PACKAGES - fi - - # set this here until its fixed in grizzly packaging. (adam_g) - [[ "$cur" == "grizzly" ]] && - set_or_update "compute_driver" "libvirt.LibvirtDriver" - - configure_libvirt - set_config_flags - service_ctl all restart -} - -function amqp_joined { - # we request a username on the rabbit queue - # and store it in nova.conf. our response is its IP + PASSWD - # but we configure that in _changed - local rabbit_user=$(config-get rabbit-user) - local rabbit_vhost=$(config-get rabbit-vhost) - juju-log "$CHARM - amqp_joined: requesting credentials for $rabbit_user" - relation-set username=$rabbit_user - relation-set vhost=$rabbit_vhost -} - -function amqp_changed { - # server creates our credentials and tells us where - # to connect. for now, using default vhost '/' - local rabbit_host=$(relation-get private-address) - local rabbit_password=$(relation-get password) - - if [[ -z $rabbit_host ]] || \ - [[ -z $rabbit_password ]] ; then - juju-log "$CHARM - amqp_changed: rabbit_host||rabbit_password not set." - exit 0 - fi - - # if the rabbitmq service is clustered among nodes with hacluster, - # point to its vip instead of its private-address. - local clustered=$(relation-get clustered) - if [[ -n "$clustered" ]] ; then - juju-log "$CHARM - ampq_changed: Configuring for "\ - "access to haclustered rabbitmq service." - local vip=$(relation-get vip) - [[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered but no vip."\ - && exit 0 - rabbit_host="$vip" - fi - - local rabbit_user=$(config-get rabbit-user) - local rabbit_vhost=$(config-get rabbit-vhost) - juju-log "$CHARM - amqp_changed: Setting rabbit config in nova.conf: " \ - "$rabbit_user@$rabbit_host/$rabbit_vhost" - set_or_update rabbit_host $rabbit_host - set_or_update rabbit_userid $rabbit_user - set_or_update rabbit_password $rabbit_password - set_or_update rabbit_virtual_host $rabbit_vhost - - if [ "$NET_MANAGER" == "Quantum" ]; then - set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF" - set_or_update rabbit_userid "$rabbit_user" "$QUANTUM_CONF" - set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF" - set_or_update rabbit_virtual_host "$rabbit_vhost" "$QUANTUM_CONF" - fi - - service_ctl all restart -} - -function db_joined { - # tell mysql provider which database we want. it will create it and give us - # credentials - local nova_db=$(config-get nova-db) - local db_user=$(config-get db-user) - local hostname=$(unit-get private-address) - juju-log "$CHARM - db_joined: requesting database access to $nova_db for "\ - "$db_user@$hostname" - relation-set nova_database=$nova_db nova_username=$db_user nova_hostname=$hostname - if [ "$NET_MANAGER" == "Quantum" ]; then - relation-set quantum_database=quantum quantum_username=quantum quantum_hostname=$hostname - fi -} - -function db_changed { - local db_host=`relation-get db_host` - local db_password=`relation-get nova_password` - - if [[ -z $db_host ]] || [[ -z $db_password ]] ; then - juju-log "$CHARM - db_changed: db_host||db_password set, will retry." - exit 0 - fi - - local nova_db=$(config-get nova-db) - local db_user=$(config-get db-user) - juju-log "$CHARM - db_changed: Configuring nova.conf for access to $nova_db" - - set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$nova_db" - - if [ "$NET_MANAGER" == "Quantum" ]; then - local quantum_db_password=`relation-get quantum_password` - set_or_update sql_connection "mysql://quantum:$quantum_db_password@$db_host/quantum?charset=utf8" \ - $QUANTUM_PLUGIN_CONF "DATABASE" - fi - - service_ctl all restart -} - -function image-service_changed { - local api_server=`relation-get glance-api-server` - if [[ -z $api_server ]] ; then - echo "image-service_changed: api_server not yet set. Exit 0 and retry" - exit 0 - fi - - if [[ "$(get_os_codename_package nova-common)" == "essex" ]] ; then - # essex needs glance_api_servers urls stripped of protocol. - api_server="$(echo $api_server | awk '{gsub(/http:\/\/|https:\/\//,"")}1')" - fi - - set_or_update glance_api_servers $api_server - service_ctl all restart -} - -function compute_joined { - migration_enabled || return 0 - local relid="$1" - [[ -n "$relid" ]] && relid="-r $relid" - migration_auth="$(config-get migration-auth-type)" - case "$migration_auth" in - "none"|"None") return 0 ;; - "ssh") relation-set $relid ssh_public_key="$(cat /root/.ssh/id_rsa.pub)" ;; - esac - relation-set $relid migration_auth_type="$migration_auth" -} - -function compute_changed { - # nova-c-c will inform us of the configured network manager. nova-compute - # needs to configure itself accordingly. - network_manager=`relation-get network_manager` - if [[ -n "$network_manager" ]] ; then - if [ "$network_manager" == "Quantum" ]; then - configure_network_manager "$network_manager" "$(relation-get quantum_plugin)" - configure_quantum_bridge - # Quantum also needs access to the quantum database - # depending on add-relation order, this relation - # may already be present so ask it for credentials if so - r_ids="$(relation-ids shared-db)" - for id in $r_ids ; do - relation-set -r $id \ - quantum_database=quantum \ - quantum_username=quantum \ - quantum_hostname=$(unit-get private-address) - done - # Rabbit MQ relation may also already be in place - # shared vhost with nova so just grab settings and - # configure. need to be sure to use VIP if clustered. - local rabbit_clustered="" rabbit_vip="" rabbit_host="" rabbit_password="" - r_ids="$(relation-ids amqp)" - for id in $r_ids ; do - for unit in $(relation-list -r $id) ; do - [[ -z "$rabbit_clustered" ]] && - rabbit_clustered=$(relation-get -r $id clustered $unit) - [[ -z "$rabbit_vip" ]] && rabbit_vip=$(relation-get -r $id vip $unit) - [[ -z "$rabbit_password" ]] && - rabbit_password=$(relation-get -r $id password $unit) - rabbit_host=$(relation-get -r $id private-address $unit) - done - done - if [[ -n "$rabbit_clustered" ]] ; then - rabbit_host="$rabbit_vip" - fi - if [[ -n $rabbit_host ]] && \ - [[ -n $rabbit_password ]]; then - set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF" - set_or_update rabbit_userid "$(config-get rabbit-user)" "$QUANTUM_CONF" - set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF" - set_or_update rabbit_virtual_host "$(config-get rabbit-vhost)" "$QUANTUM_CONF" - fi - else - configure_network_manager "$network_manager" - fi - fi - - # nova-c-c informs us of what volume service has been deployed. - volume_service=`relation-get volume_service` - [[ -n "$volume_service" ]] && configure_volume_service "$volume_service" - - if migration_enabled ; then - case "$(config-get migration-auth-type)" in - "ssh") - local known_hosts="$(relation-get known_hosts)" - local authorized_keys="$(relation-get authorized_keys)" - if [[ -n "$known_hosts" ]] && - [[ -n "$authorized_keys" ]] ; then - juju-log "$CHARM: Saving new known_hosts+authorized_keys file." - echo "$known_hosts" | base64 -di >/root/.ssh/known_hosts - echo "$authorized_keys" | base64 -di >/root/.ssh/authorized_keys - fi - ;; - esac - fi - - # If Keytone is configured manage SSL certs, nova-compute needs a copy - # of its CA installed. - local ca_cert="$(relation-get ca_cert)" - if [[ -n "$ca_cert" ]] ; then - juju-log "Installing Keystone CA certificate." - ca_cert="$(echo $ca_cert | base64 -di)" - echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt - update-ca-certificates - fi - - # restart on all changed events. nova-c-c may send out a uuid to trigger - # remote restarts of services here (after db migrations, for instance) - service_ctl all restart -} - -function ceph_joined { - mkdir -p /etc/ceph - apt-get -y install ceph-common || exit 1 -} - -function ceph_changed { - SERVICE_NAME=`echo $JUJU_UNIT_NAME | cut -d / -f 1` - KEYRING=/etc/ceph/ceph.client.$SERVICE_NAME.keyring - KEY=`relation-get key` - if [ -n "$KEY" ]; then - # But only once - if [ ! -f $KEYRING ]; then - ceph-authtool $KEYRING \ - --create-keyring --name=client.$SERVICE_NAME \ - --add-key="$KEY" - chmod +r $KEYRING - fi - else - # No key - bail for the time being - exit 0 - fi - - MONS=`relation-list` - mon_hosts="" - for mon in $MONS; do - mon_hosts="$mon_hosts $(get_ip $(relation-get private-address $mon)):6789" - done - cat > /etc/ceph/ceph.conf << EOF -[global] - auth supported = $(relation-get auth) - keyring = /etc/ceph/\$cluster.\$name.keyring - mon host = $mon_hosts -EOF - - if [ ! -f /etc/ceph/secret.xml ]; then - # This is just a label and it must be consistent across - # nova-compute nodes to support live migration. - UUID="514c9fca-8cbe-11e2-9c52-3bc8c7819472" - cat > /etc/ceph/secret.xml << EOF - - $UUID - - client.$SERVICE_NAME secret - - -EOF - # Create secret for libvirt usage - # note that this does limit ceph usage to - # KVM only at this point in time. - virsh secret-define --file /etc/ceph/secret.xml - virsh secret-set-value --secret $UUID --base64 $KEY - set_or_update rbd_user $SERVICE_NAME - set_or_update rbd_secret_uuid $UUID - set_or_update rbd_pool nova - service_ctl all restart - fi -} - -case $ARG0 in - "install") install_hook ;; - "upgrade-charm") upgrade_hook ;; - "start"|"stop") exit 0 ;; - "config-changed") config_changed ;; - "amqp-relation-joined") amqp_joined ;; - "amqp-relation-changed") amqp_changed ;; - "shared-db-relation-joined") db_joined ;; - "shared-db-relation-changed") db_changed ;; - "image-service-relation-joined") exit 0 ;; - "image-service-relation-changed") image-service_changed ;; - "identity-service-relation-joined") keystone_joined ;; - "identity-service-relation-changed") exit 0 ;; - "ceph-relation-joined") ceph_joined;; - "ceph-relation-changed") ceph_changed;; - "cloud-compute-relation-joined" ) compute_joined ;; - "cloud-compute-relation-changed") compute_changed ;; -esac diff --git a/hooks/nova_compute_relations.py b/hooks/nova_compute_relations.py new file mode 100755 index 00000000..7f352559 --- /dev/null +++ b/hooks/nova_compute_relations.py @@ -0,0 +1,155 @@ +#!/usr/bin/python + +import os + +from charmhelpers.core.hookenv import ( + Hooks, + config, + log, + relation_ids, + relation_set, + service_name, + unit_get, +) + +from charmhelpers.core.host import ( + apt_install, + apt_update, + restart_on_change, +) + +from charmhelpers.contrib.openstack.utils import ( + configure_installation_source, + openstack_upgrade_available, +) + +from nova_compute_utils import ( + PACKAGES, + RESTART_MAP, + import_authorized_keys, + import_keystone_ca_cert, + migration_enabled, + configure_live_migration, + configure_network_service, + configure_volume_service, + do_openstack_upgrade, + quantum_enabled, + quantum_plugin_config, + public_ssh_key, + register_configs, +) + +from misc_utils import ( + ensure_ceph_keyring, +) + +hooks = Hooks() +CONFIGS = register_configs() + + +@hooks.hook() +def install(): + configure_installation_source(config('openstack-origin')) + apt_update() + apt_install(PACKAGES, fatal=True) + + +@hooks.hook('config-changed') +@restart_on_change(RESTART_MAP) +def config_changed(): + if openstack_upgrade_available('nova-common'): + do_openstack_upgrade() + + configure_live_migration() + if migration_enabled() and config('migration-auth-type') == 'ssh': + # Check-in with nova-c-c and register new ssh key, if it has just been + # generated. + [compute_joined(rid) for rid in relation_ids('cloud-compute')] + + +@hooks.hook('amqp-relation-joined') +@restart_on_change(RESTART_MAP) +def amqp_joined(): + relation_set(username=config('rabbit-user'), vhost=config('rabbit-vhost')) + + +@hooks.hook('amqp-relation-changed') +@restart_on_change(RESTART_MAP) +def amqp_changed(): + if 'amqp' not in CONFIGS.complete_contexts(): + log('amqp relation incomplete. Peer not ready?') + return + CONFIGS.write('/etc/nova/nova.conf') + if quantum_enabled(): + CONFIGS.write('/etc/quantum/quantum.conf') + + +@hooks.hook('shared-db-relation-joined') +def db_joined(): + relation_set(database=config('database'), username=config('database-user'), + hostname=unit_get('private-address')) + + +@hooks.hook('shared-db-relation-changed') +@restart_on_change(RESTART_MAP) +def db_changed(): + if 'shared-db' not in CONFIGS.complete_contexts(): + log('shared-db relation incomplete. Peer not ready?') + return + CONFIGS.write('/etc/nova/nova.conf') + if quantum_enabled(): + CONFIGS.write(quantum_plugin_config()) + + +@hooks.hook('image-service-relation-changed') +@restart_on_change(RESTART_MAP) +def image_service_changed(): + if 'image-service' not in CONFIGS.complete_contexts(): + log('image-service relation incomplete. Peer not ready?') + return + CONFIGS.write('/etc/nova/nova.conf') + + +@hooks.hook('cloud-compute-relation-joined') +def compute_joined(rid=None): + if not migration_enabled(): + return + auth_type = config('migration-auth-type') + settings = { + 'migration_auth_type': auth_type + } + if auth_type == 'ssh': + settings['ssh_public_key'] = public_ssh_key() + relation_set(relation_id=rid, **settings) + + +@hooks.hook('cloud-compute-relation-changed') +@restart_on_change(RESTART_MAP) +def compute_changed(): + configure_network_service() + configure_volume_service() + import_authorized_keys() + import_keystone_ca_cert() + + +@hooks.hook('ceph-relation-joined') +@restart_on_change(RESTART_MAP) +def ceph_joined(): + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + apt_install('ceph-common') + + +@hooks.hook('ceph-relation-changed') +@restart_on_change(RESTART_MAP) +def ceph_changed(): + if 'ceph' not in CONFIGS.complete_contexts(): + log('ceph relation incomplete. Peer not ready?') + return + svc = service_name() + if not ensure_ceph_keyring(service=svc): + log('Could not create ceph keyring: peer not ready?') + return + CONFIGS.write('/etc/ceph/ceph.conf') + CONFIGS.write('/etc/ceph/secret.xml') + CONFIGS.write('/etc/nova/nova.conf') diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py new file mode 100644 index 00000000..86d567e9 --- /dev/null +++ b/hooks/nova_compute_utils.py @@ -0,0 +1,75 @@ +from charmhelpers.core.hookenv import ( + config, +) + +PACKAGES = [] + +RESTART_MAP = { + '/etc/libvirt/qemu.conf': ['libvirt-bin'], + '/etc/default/libvirt-bin': ['libvirt-bin'] +} + +# This is just a label and it must be consistent across +# nova-compute nodes to support live migration. +CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' + + +def migration_enabled(): + return config('enable-live-migration').lower() == 'true' + + +def quantum_enabled(): + return config('network-manager').lower() == 'quantum' + + +def quantum_plugin_config(): + pass + + +def public_ssh_key(user='root'): + pass + + +def initialize_ssh_keys(): + pass + + +def import_authorized_keys(): + pass + + +def configure_live_migration(configs=None): + """ + Ensure libvirt live migration is properly configured or disabled, + depending on current config setting. + """ + configs = configs or register_configs() + configs.write('/etc/libvirt/libvirtd.conf') + configs.write('/etc/default/libvirt-bin') + configs.write('/etc/nova/nova.conf') + + if not migration_enabled(): + return + + if config('migration-auth-type') == 'ssh': + initialize_ssh_keys() + + +def do_openstack_upgrade(): + pass + + +def register_configs(): + pass + + +def import_keystone_ca_cert(): + pass + + +def configure_network_service(): + pass + + +def configure_volume_service(): + pass diff --git a/hooks/shared-db-relation-changed b/hooks/shared-db-relation-changed index 6f9ff4f5..6eb6593e 120000 --- a/hooks/shared-db-relation-changed +++ b/hooks/shared-db-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/shared-db-relation-joined b/hooks/shared-db-relation-joined index 6f9ff4f5..6eb6593e 120000 --- a/hooks/shared-db-relation-joined +++ b/hooks/shared-db-relation-joined @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/start b/hooks/start deleted file mode 120000 index 6f9ff4f5..00000000 --- a/hooks/start +++ /dev/null @@ -1 +0,0 @@ -nova-compute-relations \ No newline at end of file diff --git a/hooks/stop b/hooks/stop deleted file mode 120000 index 6f9ff4f5..00000000 --- a/hooks/stop +++ /dev/null @@ -1 +0,0 @@ -nova-compute-relations \ No newline at end of file From 4b969f772295cb1cf873f41d3c1a56750240a71d Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 18 Jul 2013 21:41:07 -0700 Subject: [PATCH 02/84] Construct package list and restart_map dynamically. Check-in tests. --- charm-helpers.yaml | 10 ++ hooks/install | 1 + hooks/nova_compute_relations.py | 28 +-- hooks/nova_compute_utils.py | 130 +++++++++++++- tests/__init__.py | 0 tests/test_nova_compute_relations.py | 257 +++++++++++++++++++++++++++ tests/test_nova_compute_utils.py | 105 +++++++++++ tests/test_utils.py | 101 +++++++++++ 8 files changed, 610 insertions(+), 22 deletions(-) create mode 100644 charm-helpers.yaml create mode 120000 hooks/install create mode 100644 tests/__init__.py create mode 100644 tests/test_nova_compute_relations.py create mode 100644 tests/test_nova_compute_utils.py create mode 100644 tests/test_utils.py diff --git a/charm-helpers.yaml b/charm-helpers.yaml new file mode 100644 index 00000000..7d9aa62e --- /dev/null +++ b/charm-helpers.yaml @@ -0,0 +1,10 @@ +branch: lp:charm-helpers +destination: hooks/charmhelpers +include: + - core + - contrib.openstack|inc=* + - contrib.storage + - contrib.hahelpers: + - apache + - ceph + - cluster diff --git a/hooks/install b/hooks/install new file mode 120000 index 00000000..6eb6593e --- /dev/null +++ b/hooks/install @@ -0,0 +1 @@ +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/nova_compute_relations.py b/hooks/nova_compute_relations.py index 7f352559..67ed6f62 100755 --- a/hooks/nova_compute_relations.py +++ b/hooks/nova_compute_relations.py @@ -24,8 +24,7 @@ from charmhelpers.contrib.openstack.utils import ( ) from nova_compute_utils import ( - PACKAGES, - RESTART_MAP, + determine_packages, import_authorized_keys, import_keystone_ca_cert, migration_enabled, @@ -33,9 +32,11 @@ from nova_compute_utils import ( configure_network_service, configure_volume_service, do_openstack_upgrade, + quantum_attribute, quantum_enabled, - quantum_plugin_config, + quantum_plugin, public_ssh_key, + restart_map, register_configs, ) @@ -51,11 +52,11 @@ CONFIGS = register_configs() def install(): configure_installation_source(config('openstack-origin')) apt_update() - apt_install(PACKAGES, fatal=True) + apt_install(determine_packages(), fatal=True) @hooks.hook('config-changed') -@restart_on_change(RESTART_MAP) +@restart_on_change(restart_map()) def config_changed(): if openstack_upgrade_available('nova-common'): do_openstack_upgrade() @@ -68,13 +69,13 @@ def config_changed(): @hooks.hook('amqp-relation-joined') -@restart_on_change(RESTART_MAP) +@restart_on_change(restart_map()) def amqp_joined(): relation_set(username=config('rabbit-user'), vhost=config('rabbit-vhost')) @hooks.hook('amqp-relation-changed') -@restart_on_change(RESTART_MAP) +@restart_on_change(restart_map()) def amqp_changed(): if 'amqp' not in CONFIGS.complete_contexts(): log('amqp relation incomplete. Peer not ready?') @@ -91,18 +92,19 @@ def db_joined(): @hooks.hook('shared-db-relation-changed') -@restart_on_change(RESTART_MAP) +@restart_on_change(restart_map()) def db_changed(): if 'shared-db' not in CONFIGS.complete_contexts(): log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write('/etc/nova/nova.conf') if quantum_enabled(): - CONFIGS.write(quantum_plugin_config()) + plugin = quantum_plugin() + CONFIGS.write(quantum_attribute(plugin, 'config')) @hooks.hook('image-service-relation-changed') -@restart_on_change(RESTART_MAP) +@restart_on_change(restart_map()) def image_service_changed(): if 'image-service' not in CONFIGS.complete_contexts(): log('image-service relation incomplete. Peer not ready?') @@ -124,7 +126,7 @@ def compute_joined(rid=None): @hooks.hook('cloud-compute-relation-changed') -@restart_on_change(RESTART_MAP) +@restart_on_change(restart_map()) def compute_changed(): configure_network_service() configure_volume_service() @@ -133,7 +135,7 @@ def compute_changed(): @hooks.hook('ceph-relation-joined') -@restart_on_change(RESTART_MAP) +@restart_on_change(restart_map()) def ceph_joined(): if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') @@ -141,7 +143,7 @@ def ceph_joined(): @hooks.hook('ceph-relation-changed') -@restart_on_change(RESTART_MAP) +@restart_on_change(restart_map()) def ceph_changed(): if 'ceph' not in CONFIGS.complete_contexts(): log('ceph relation incomplete. Peer not ready?') diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 86d567e9..d08b625b 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -1,12 +1,47 @@ +import copy from charmhelpers.core.hookenv import ( config, + log, + related_units, + relation_ids, + relation_get, + ERROR, ) -PACKAGES = [] +BASE_PACKAGES = [ + 'nova-compute', + 'genisoimage', # was missing as a package dependency until raring. +] -RESTART_MAP = { +BASE_RESTART_MAP = { '/etc/libvirt/qemu.conf': ['libvirt-bin'], - '/etc/default/libvirt-bin': ['libvirt-bin'] + '/etc/default/libvirt-bin': ['libvirt-bin'], + '/etc/nova/nova.conf': ['nova-compute'], + '/etc/nova/nova-compute.conf': ['nova-compute'], +} + + +QUANTUM_PLUGINS = { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini', + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': ['quantum-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'services': [], + 'packages': ['quantum-plugin-nicira'], + } +} + +# Maps virt-type config to a compute package(s). +VIRT_TYPES = { + 'kvm': ['nova-compute-kvm'], + 'qemu': ['nova-compute-qemu'], + 'xen': ['nova-compute-xen'], + 'uml': ['nova-compute-uml'], + 'lxc': ['nova-compute-lxc'], } # This is just a label and it must be consistent across @@ -14,6 +49,58 @@ RESTART_MAP = { CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' +def restart_map(): + ''' + Constructs a restart map based on charm config settings and relation + state. + ''' + _restart_map = copy.copy(BASE_RESTART_MAP) + + net_manager = network_manager() + + if (net_manager in ['FlatManager', 'FlatDHCPManager'] and + config('multi-host').lower() == 'yes'): + _restart_map['/etc/nova/nova.conf'].extend( + ['nova-api', 'nova-network'] + ) + elif net_manager == 'Quantum': + plugin = quantum_plugin() + if plugin: + conf = quantum_attribute(plugin, 'config') + svcs = quantum_attribute(plugin, 'services') + _restart_map[conf] = svcs + _restart_map['/etc/quantum/quantum.conf'] = svcs + return _restart_map + + +def determine_packages(): + packages = [] + BASE_PACKAGES + + net_manager = network_manager() + if (net_manager in ['FlatManager', 'FlatDHCPManager'] and + config('multi-host').lower() == 'yes'): + packages.extend(['nova-api', 'nova-network']) + elif net_manager == 'Quantum': + plugin = quantum_plugin() + packages.extend(quantum_attribute(plugin, 'packages')) + + if relation_ids('ceph'): + packages.append('ceph-common') + + virt_type = config('virt-type') + try: + packages.extend(VIRT_TYPES[virt_type]) + except KeyError: + log('Unsupported virt-type configured: %s' % virt_type) + + raise + return packages + + +def register_configs(): + pass + + def migration_enabled(): return config('enable-live-migration').lower() == 'true' @@ -22,8 +109,37 @@ def quantum_enabled(): return config('network-manager').lower() == 'quantum' -def quantum_plugin_config(): - pass +def _network_config(): + ''' + Obtain all relevant network configuration settings from nova-c-c via + cloud-compute interface. + ''' + settings = ['network_manager', 'quantum_plugin'] + net_config = {} + for rid in relation_ids('cloud-compute'): + for unit in related_units(rid): + for setting in settings: + value = relation_get(setting, rid=rid, unit=unit) + if value: + net_config[setting] = value + return net_config + + +def quantum_plugin(): + return _network_config().get('quantum_plugin') + + +def network_manager(): + return _network_config().get('network_manager') + + +def quantum_attribute(plugin, attr): + try: + _plugin = QUANTUM_PLUGINS[plugin] + except KeyError: + log('Unrecognised plugin for quantum: %s' % plugin, level=ERROR) + raise + return _plugin[attr] def public_ssh_key(user='root'): @@ -59,10 +175,6 @@ def do_openstack_upgrade(): pass -def register_configs(): - pass - - def import_keystone_ca_cert(): pass diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_nova_compute_relations.py b/tests/test_nova_compute_relations.py new file mode 100644 index 00000000..67dcc1c8 --- /dev/null +++ b/tests/test_nova_compute_relations.py @@ -0,0 +1,257 @@ +from mock import call, patch, MagicMock + +from tests.test_utils import CharmTestCase + +import hooks.nova_compute_utils as utils + +_reg = utils.register_configs +_map = utils.restart_map + +utils.register_configs = MagicMock() +utils.restart_map = MagicMock() + +import hooks.nova_compute_relations as relations + +utils.register_configs = _reg +utils.restart_map = _map + +TO_PATCH = [ + # charmhelpers.core.hookenv + 'Hooks', + 'config', + 'log', + 'relation_ids', + 'relation_set', + 'service_name', + 'unit_get', + # charmhelpers.core.host + 'apt_install', + 'apt_update', + 'restart_on_change', + #charmhelpers.contrib.openstack.utils + 'configure_installation_source', + 'openstack_upgrade_available', + # nova_compute_utils + #'PACKAGES', + 'restart_map', + 'determine_packages', + 'import_authorized_keys', + 'import_keystone_ca_cert', + 'migration_enabled', + 'configure_live_migration', + 'configure_network_service', + 'configure_volume_service', + 'do_openstack_upgrade', + 'quantum_attribute', + 'quantum_enabled', + 'quantum_plugin', + 'public_ssh_key', + 'register_configs', + # misc_utils + 'ensure_ceph_keyring', +] + + +class NovaComputeRelationsTests(CharmTestCase): + def setUp(self): + super(NovaComputeRelationsTests, self).setUp(relations, + TO_PATCH) + self.config.side_effect = self.test_config.get + + def test_install_hook(self): + repo = 'cloud:precise-grizzly' + self.test_config.set('openstack-origin', repo) + self.determine_packages.return_value = ['foo', 'bar'] + relations.install() + self.configure_installation_source.assert_called_with(repo) + self.assertTrue(self.apt_update.called) + self.apt_install.assert_called_with(['foo', 'bar'], fatal=True) + + def test_config_changed_with_upgrade(self): + self.openstack_upgrade_available.return_value = True + relations.config_changed() + self.assertTrue(self.do_openstack_upgrade.called) + + @patch.object(relations, 'compute_joined') + def test_config_changed_with_migration(self, compute_joined): + self.migration_enabled.return_value = True + self.test_config.set('migration-auth-type', 'ssh') + self.relation_ids.return_value = [ + 'cloud-compute:0', + 'cloud-compute:1' + ] + relations.config_changed() + ex = [ + call('cloud-compute:0'), + call('cloud-compute:1'), + ] + self.assertEquals(ex, compute_joined.call_args_list) + + @patch.object(relations, 'compute_joined') + def test_config_changed_no_upgrade_no_migration(self, compute_joined): + self.openstack_upgrade_available.return_value = False + self.migration_enabled.return_value = False + relations.config_changed() + self.assertFalse(self.do_openstack_upgrade.called) + self.assertTrue(self.configure_live_migration) + self.assertFalse(compute_joined.called) + + def test_amqp_joined(self): + relations.amqp_joined() + self.relation_set.assert_called_with(username='nova', vhost='nova') + + @patch.object(relations, 'CONFIGS') + def test_amqp_changed_missing_relation_data(self, configs): + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = [] + relations.amqp_changed() + self.log.assert_called_with( + 'amqp relation incomplete. Peer not ready?' + ) + + def _amqp_test(self, configs, quantum=False): + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = ['amqp'] + configs.write = MagicMock() + self.quantum_enabled.return_value = quantum + relations.amqp_changed() + + @patch.object(relations, 'CONFIGS') + def test_amqp_changed_with_data_no_quantum(self, configs): + self._amqp_test(configs, quantum=False) + self.assertEquals([call('/etc/nova/nova.conf')], + configs.write.call_args_list) + + @patch.object(relations, 'CONFIGS') + def test_amqp_changed_with_data_and_quantum(self, configs): + self._amqp_test(configs, quantum=True) + self.assertEquals([call('/etc/nova/nova.conf'), + call('/etc/quantum/quantum.conf')], + configs.write.call_args_list) + + def test_db_joined(self): + self.unit_get.return_value = 'nova.foohost.com' + relations.db_joined() + self.relation_set.assert_called_with(database='nova', username='nova', + hostname='nova.foohost.com') + self.unit_get.assert_called_with('private-address') + + @patch.object(relations, 'CONFIGS') + def test_db_changed_missing_relation_data(self, configs): + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = [] + relations.db_changed() + self.log.assert_called_with( + 'shared-db relation incomplete. Peer not ready?' + ) + + def _shared_db_test(self, configs, quantum=False): + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = ['shared-db'] + configs.write = MagicMock() + self.quantum_enabled.return_value = quantum + relations.db_changed() + + @patch.object(relations, 'CONFIGS') + def test_db_changed_with_data_no_quantum(self, configs): + self._shared_db_test(configs, quantum=False) + self.assertEquals([call('/etc/nova/nova.conf')], + configs.write.call_args_list) + + @patch.object(relations, 'CONFIGS') + def test_db_changed_with_data_and_quantum(self, configs): + self.quantum_attribute.return_value = '/etc/quantum/plugin.conf' + self._shared_db_test(configs, quantum=True) + ex = [call('/etc/nova/nova.conf'), call('/etc/quantum/plugin.conf')] + self.assertEquals(ex, configs.write.call_args_list) + + @patch.object(relations, 'CONFIGS') + def test_image_service_missing_relation_data(self, configs): + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = [] + relations.image_service_changed() + self.log.assert_called_with( + 'image-service relation incomplete. Peer not ready?' + ) + + @patch.object(relations, 'CONFIGS') + def test_image_service_with_relation_data(self, configs): + configs.complete_contexts = MagicMock() + configs.write = MagicMock() + configs.complete_contexts.return_value = ['image-service'] + relations.image_service_changed() + configs.write.assert_called_with('/etc/nova/nova.conf') + + def test_compute_joined_no_migration(self): + self.migration_enabled.return_value = False + relations.compute_joined() + self.assertFalse(self.relation_set.called) + + def test_compute_joined_with_ssh_migration(self): + self.migration_enabled.return_value = True + self.test_config.set('migration-auth-type', 'ssh') + self.public_ssh_key.return_value = 'foo' + relations.compute_joined() + self.relation_set.assert_called_with( + relation_id=None, + ssh_public_key='foo', + migration_auth_type='ssh' + ) + relations.compute_joined(rid='cloud-compute:2') + self.relation_set.assert_called_with( + relation_id='cloud-compute:2', + ssh_public_key='foo', + migration_auth_type='ssh' + ) + + def test_compute_changed(self): + relations.compute_changed() + expected_funcs = [ + self.configure_network_service, + self.configure_volume_service, + self.import_authorized_keys, + self.import_keystone_ca_cert, + ] + for func in expected_funcs: + self.assertTrue(func.called) + + @patch('os.mkdir') + @patch('os.path.isdir') + def test_ceph_joined(self, isdir, mkdir): + isdir.return_value = False + relations.ceph_joined() + mkdir.assert_called_with('/etc/ceph') + self.apt_install.assert_called_with('ceph-common') + + @patch.object(relations, 'CONFIGS') + def test_ceph_changed_missing_relation_data(self, configs): + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = [] + relations.ceph_changed() + self.log.assert_called_with( + 'ceph relation incomplete. Peer not ready?' + ) + + @patch.object(relations, 'CONFIGS') + def test_ceph_changed_no_keyring(self, configs): + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = ['ceph'] + self.ensure_ceph_keyring.return_value = False + relations.ceph_changed() + self.log.assert_called_with( + 'Could not create ceph keyring: peer not ready?' + ) + + @patch.object(relations, 'CONFIGS') + def test_ceph_changed_with_key_and_relation_data(self, configs): + configs.complete_contexts = MagicMock() + configs.complete_contexts.return_value = ['ceph'] + configs.write = MagicMock() + self.ensure_ceph_keyring.return_value = True + relations.ceph_changed() + ex = [ + call('/etc/ceph/ceph.conf'), + call('/etc/ceph/secret.xml'), + call('/etc/nova/nova.conf'), + ] + self.assertEquals(ex, configs.write.call_args_list) diff --git a/tests/test_nova_compute_utils.py b/tests/test_nova_compute_utils.py new file mode 100644 index 00000000..3bae58ac --- /dev/null +++ b/tests/test_nova_compute_utils.py @@ -0,0 +1,105 @@ +from mock import patch + +from tests.test_utils import CharmTestCase + + +import hooks.nova_compute_utils as utils + +TO_PATCH = [ + 'config', + 'log', + 'related_units', + 'relation_ids', + 'relation_get', +] + + +class NovaComputeUtilsTests(CharmTestCase): + def setUp(self): + super(NovaComputeUtilsTests, self).setUp(utils, TO_PATCH) + self.config.side_effect = self.test_config.get + + @patch.object(utils, 'network_manager') + def test_determine_packages_nova_network(self, net_man): + net_man.return_value = 'FlatDHCPManager' + self.relation_ids.return_value = [] + result = utils.determine_packages() + ex = utils.BASE_PACKAGES + [ + 'nova-api', + 'nova-network', + 'nova-compute-kvm' + ] + self.assertEquals(ex, result) + + @patch.object(utils, 'quantum_plugin') + @patch.object(utils, 'network_manager') + def test_determine_packages_quantum(self, net_man, q_plugin): + net_man.return_value = 'Quantum' + q_plugin.return_value = 'ovs' + self.relation_ids.return_value = [] + result = utils.determine_packages() + ex = utils.BASE_PACKAGES + [ + 'quantum-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms', + 'nova-compute-kvm' + ] + self.assertEquals(ex, result) + + @patch.object(utils, 'quantum_plugin') + @patch.object(utils, 'network_manager') + def test_determine_packages_quantum_ceph(self, net_man, q_plugin): + net_man.return_value = 'Quantum' + q_plugin.return_value = 'ovs' + self.relation_ids.return_value = ['ceph:0'] + result = utils.determine_packages() + ex = utils.BASE_PACKAGES + [ + 'quantum-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms', + 'ceph-common', + 'nova-compute-kvm' + ] + self.assertEquals(ex, result) + + # NOTE: These tests faill if run together, something is holding + # a reference to BASE_RESOURCE_MAP ? + @patch.object(utils, 'network_manager') + def test_resource_map_nova_network_no_multihost(self, net_man): + self.test_config.set('multi-host', 'no') + net_man.return_value = 'FlatDHCPManager' + result = utils.restart_map() + ex = { + '/etc/default/libvirt-bin': ['libvirt-bin'], + '/etc/libvirt/qemu.conf': ['libvirt-bin'], + '/etc/nova/nova-compute.conf': ['nova-compute'], + '/etc/nova/nova.conf': ['nova-compute'] + } + self.assertEquals(ex, result) + + @patch.object(utils, 'network_manager') + def test_resource_map_nova_network(self, net_man): + net_man.return_value = 'FlatDHCPManager' + result = utils.restart_map() + ex = { + '/etc/default/libvirt-bin': ['libvirt-bin'], + '/etc/libvirt/qemu.conf': ['libvirt-bin'], + '/etc/nova/nova-compute.conf': ['nova-compute'], + '/etc/nova/nova.conf': ['nova-compute', 'nova-api', 'nova-network'] + } + self.assertEquals(ex, result) + + @patch.object(utils, 'quantum_plugin') + @patch.object(utils, 'network_manager') + def test_resource_map_quantum_ovs(self, net_man, _plugin): + net_man.return_value = 'Quantum' + _plugin.return_value = 'ovs' + result = utils.restart_map() + ex = { + '/etc/default/libvirt-bin': ['libvirt-bin'], + '/etc/libvirt/qemu.conf': ['libvirt-bin'], + '/etc/nova/nova-compute.conf': ['nova-compute'], + '/etc/nova/nova.conf': ['nova-compute'], + '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini': + ['quantum-plugin-openvswitch-agent'], + '/etc/quantum/quantum.conf': ['quantum-plugin-openvswitch-agent'] + } + self.assertEquals(ex, result) diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 00000000..c75c739a --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,101 @@ +import logging +import unittest +import os +import yaml + +from mock import patch + + +def load_config(): + ''' + Walk backwords from __file__ looking for config.yaml, load and return the + 'options' section' + ''' + config = None + f = __file__ + while config is None: + d = os.path.dirname(f) + if os.path.isfile(os.path.join(d, 'config.yaml')): + config = os.path.join(d, 'config.yaml') + break + f = d + + if not config: + logging.error('Could not find config.yaml in any parent directory ' + 'of %s. ' % file) + raise Exception + + return yaml.safe_load(open(config).read())['options'] + + +def get_default_config(): + ''' + Load default charm config from config.yaml return as a dict. + If no default is set in config.yaml, its value is None. + ''' + default_config = {} + config = load_config() + for k, v in config.iteritems(): + if 'default' in v: + default_config[k] = v['default'] + else: + default_config[k] = None + return default_config + + +class CharmTestCase(unittest.TestCase): + def setUp(self, obj, patches): + super(CharmTestCase, self).setUp() + self.patches = patches + self.obj = obj + self.test_config = TestConfig() + self.test_relation = TestRelation() + self.patch_all() + + def patch(self, method): + _m = patch.object(self.obj, method) + mock = _m.start() + self.addCleanup(_m.stop) + return mock + + def patch_all(self): + for method in self.patches: + setattr(self, method, self.patch(method)) + + +class TestConfig(object): + def __init__(self): + self.config = get_default_config() + + def get(self, attr=None): + if not attr: + return self.get_all() + try: + return self.config[attr] + except KeyError: + return None + + def get_all(self): + return self.config + + def set(self, attr, value): + if attr not in self.config: + raise KeyError + self.config[attr] = value + + +class TestRelation(object): + def __init__(self, relation_data={}): + self.relation_data = relation_data + + def set(self, relation_data): + self.relation_data = relation_data + + def get(self, attr=None, unit=None, rid=None): + if attr == None: + return self.relation_data + elif attr in self.relation_data: + return self.relation_data[attr] + return None + + From 09296221484075e7242874a17ce89d565afd2726 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 26 Jul 2013 14:22:44 -0700 Subject: [PATCH 03/84] Update tests. convert restart_map generation to a broader resource_map. --- hooks/nova_compute_utils.py | 82 +++++++++++++++++++------- tests/__init__.py | 3 + tests/test_nova_compute_utils.py | 98 +++++++++++++++++++++++++------- tests/test_utils.py | 19 ++++++- 4 files changed, 157 insertions(+), 45 deletions(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index d08b625b..54b624eb 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -1,4 +1,8 @@ -import copy + +from copy import copy, deepcopy +import os +import pwd + from charmhelpers.core.hookenv import ( config, log, @@ -13,11 +17,23 @@ BASE_PACKAGES = [ 'genisoimage', # was missing as a package dependency until raring. ] -BASE_RESTART_MAP = { - '/etc/libvirt/qemu.conf': ['libvirt-bin'], - '/etc/default/libvirt-bin': ['libvirt-bin'], - '/etc/nova/nova.conf': ['nova-compute'], - '/etc/nova/nova-compute.conf': ['nova-compute'], +BASE_RESOURCE_MAP = { + '/etc/libvirt/qemu.conf': { + 'services': ['libvirt-bin'], + 'contexts': [], + }, + '/etc/default/libvirt-bin': { + 'services': ['libvirt-bin'], + 'contexts': [], + }, + '/etc/nova/nova.conf': { + 'services': ['nova-compute'], + 'contexts': [], + }, + '/etc/nova/nova-compute.conf': { + 'services': ['nova-compute'], + 'contexts': [], + }, } @@ -49,18 +65,18 @@ VIRT_TYPES = { CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' -def restart_map(): +def resource_map(): ''' - Constructs a restart map based on charm config settings and relation - state. + Dynamically generate a map of resources that will be managed for a single + hook execution. ''' - _restart_map = copy.copy(BASE_RESTART_MAP) - + # TODO: Cache this on first call? + resource_map = deepcopy(BASE_RESOURCE_MAP) net_manager = network_manager() if (net_manager in ['FlatManager', 'FlatDHCPManager'] and config('multi-host').lower() == 'yes'): - _restart_map['/etc/nova/nova.conf'].extend( + resource_map['/etc/nova/nova.conf']['services'].extend( ['nova-api', 'nova-network'] ) elif net_manager == 'Quantum': @@ -68,9 +84,28 @@ def restart_map(): if plugin: conf = quantum_attribute(plugin, 'config') svcs = quantum_attribute(plugin, 'services') - _restart_map[conf] = svcs - _restart_map['/etc/quantum/quantum.conf'] = svcs - return _restart_map + ctxts = quantum_attribute(plugin, 'contexts') or [] + resource_map[conf] = {} + resource_map[conf]['services'] = svcs + resource_map[conf]['contexts'] = ctxts + resource_map['/etc/quantum/quantum.conf'] = { + 'services': svcs, + 'contexts': ctxts + } + return resource_map + +def restart_map(): + ''' + Constructs a restart map based on charm config settings and relation + state. + ''' + return {k: v['services'] for k, v in resource_map().iteritems()} + +def register_configs(): + ''' + Registers config files with their correpsonding context generators. + ''' + pass def determine_packages(): @@ -97,10 +132,6 @@ def determine_packages(): return packages -def register_configs(): - pass - - def migration_enabled(): return config('enable-live-migration').lower() == 'true' @@ -139,11 +170,18 @@ def quantum_attribute(plugin, attr): except KeyError: log('Unrecognised plugin for quantum: %s' % plugin, level=ERROR) raise - return _plugin[attr] - + try: + return _plugin[attr] + except KeyError: + return None def public_ssh_key(user='root'): - pass + home = pwd.getpwnam(user).pw_dir + try: + with open(os.path.join(home, '.ssh', 'id_rsa')) as key: + return key.read().strip() + except: + return None def initialize_ssh_keys(): diff --git a/tests/__init__.py b/tests/__init__.py index e69de29b..afaed60c 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1,3 @@ +import sys + +sys.path.append('hooks/') diff --git a/tests/test_nova_compute_utils.py b/tests/test_nova_compute_utils.py index 3bae58ac..309aaf18 100644 --- a/tests/test_nova_compute_utils.py +++ b/tests/test_nova_compute_utils.py @@ -1,6 +1,6 @@ from mock import patch -from tests.test_utils import CharmTestCase +from tests.test_utils import CharmTestCase, patch_open import hooks.nova_compute_utils as utils @@ -60,30 +60,51 @@ class NovaComputeUtilsTests(CharmTestCase): ] self.assertEquals(ex, result) - # NOTE: These tests faill if run together, something is holding - # a reference to BASE_RESOURCE_MAP ? @patch.object(utils, 'network_manager') def test_resource_map_nova_network_no_multihost(self, net_man): self.test_config.set('multi-host', 'no') net_man.return_value = 'FlatDHCPManager' - result = utils.restart_map() + result = utils.resource_map() ex = { - '/etc/default/libvirt-bin': ['libvirt-bin'], - '/etc/libvirt/qemu.conf': ['libvirt-bin'], - '/etc/nova/nova-compute.conf': ['nova-compute'], - '/etc/nova/nova.conf': ['nova-compute'] + '/etc/default/libvirt-bin': { + 'contexts': [], + 'services': ['libvirt-bin'] + }, + '/etc/libvirt/qemu.conf': { + 'contexts': [], + 'services': ['libvirt-bin'] + }, + '/etc/nova/nova-compute.conf': { + 'contexts': [], + 'services': ['nova-compute'] + }, + '/etc/nova/nova.conf': { + 'contexts': [], + 'services': ['nova-compute'] + }, } self.assertEquals(ex, result) @patch.object(utils, 'network_manager') def test_resource_map_nova_network(self, net_man): net_man.return_value = 'FlatDHCPManager' - result = utils.restart_map() + result = utils.resource_map() ex = { - '/etc/default/libvirt-bin': ['libvirt-bin'], - '/etc/libvirt/qemu.conf': ['libvirt-bin'], - '/etc/nova/nova-compute.conf': ['nova-compute'], - '/etc/nova/nova.conf': ['nova-compute', 'nova-api', 'nova-network'] + '/etc/default/libvirt-bin': { + 'contexts': [], 'services': ['libvirt-bin'] + }, + '/etc/libvirt/qemu.conf': { + 'contexts': [], + 'services': ['libvirt-bin'] + }, + '/etc/nova/nova-compute.conf': { + 'contexts': [], + 'services': ['nova-compute'] + }, + '/etc/nova/nova.conf': { + 'contexts': [], + 'services': ['nova-compute', 'nova-api', 'nova-network'] + } } self.assertEquals(ex, result) @@ -92,14 +113,47 @@ class NovaComputeUtilsTests(CharmTestCase): def test_resource_map_quantum_ovs(self, net_man, _plugin): net_man.return_value = 'Quantum' _plugin.return_value = 'ovs' - result = utils.restart_map() + result = utils.resource_map() ex = { - '/etc/default/libvirt-bin': ['libvirt-bin'], - '/etc/libvirt/qemu.conf': ['libvirt-bin'], - '/etc/nova/nova-compute.conf': ['nova-compute'], - '/etc/nova/nova.conf': ['nova-compute'], - '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini': - ['quantum-plugin-openvswitch-agent'], - '/etc/quantum/quantum.conf': ['quantum-plugin-openvswitch-agent'] - } + '/etc/default/libvirt-bin': { + 'contexts': [], + 'services': ['libvirt-bin'] + }, + '/etc/libvirt/qemu.conf': { + 'contexts': [], + 'services': ['libvirt-bin'] + }, + '/etc/nova/nova-compute.conf': { + 'contexts': [], + 'services': ['nova-compute'] + }, + '/etc/nova/nova.conf': { + 'contexts': [], + 'services': ['nova-compute'] + }, + '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini': { + 'contexts': [], + 'services': ['quantum-plugin-openvswitch-agent'] + }, + '/etc/quantum/quantum.conf': { + 'contexts': [], + 'services': ['quantum-plugin-openvswitch-agent']} + } + self.assertEquals(ex, result) + + @patch('__builtin__.open') + @patch('pwd.getpwnam') + def test_public_ssh_key_not_found(self, getpwnam, _open): + _open.side_effect = Exception + getpwnam.pw_dir = '/home/foo' + self.assertEquals(None, utils.public_ssh_key()) + + + @patch('pwd.getpwnam') + def test_public_ssh_key(self, getpwnam): + getpwnam.pw_dir = '/home/foo' + with patch_open() as (_open, _file): + _file.read.return_value = 'mypubkey' + result = utils.public_ssh_key('foo') + self.assertEquals(result, 'mypubkey') diff --git a/tests/test_utils.py b/tests/test_utils.py index c75c739a..a981c7c4 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -3,7 +3,8 @@ import unittest import os import yaml -from mock import patch +from contextlib import contextmanager +from mock import patch, MagicMock def load_config(): @@ -99,3 +100,19 @@ class TestRelation(object): return None +@contextmanager +def patch_open(): + '''Patch open() to allow mocking both open() itself and the file that is + yielded. + + Yields the mock for "open" and "file", respectively.''' + mock_open = MagicMock(spec=open) + mock_file = MagicMock(spec=file) + + @contextmanager + def stub_open(*args, **kwargs): + mock_open(*args, **kwargs) + yield mock_file + + with patch('__builtin__.open', stub_open): + yield mock_open, mock_file From 82ddd1129a16db351ddd2fa1efa23d5a27112b89 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 29 Jul 2013 12:01:44 -0700 Subject: [PATCH 04/84] Add ssh + cert methods. --- hooks/nova_compute_utils.py | 39 ++++++++++++++++++--- tests/test_nova_compute_utils.py | 60 +++++++++++++++++++++++++++++--- 2 files changed, 89 insertions(+), 10 deletions(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 54b624eb..02aed387 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -1,8 +1,10 @@ - -from copy import copy, deepcopy import os import pwd +from base64 import b64decode +from copy import copy, deepcopy +from subprocess import check_call + from charmhelpers.core.hookenv import ( config, log, @@ -12,6 +14,10 @@ from charmhelpers.core.hookenv import ( ERROR, ) +from charmhelpers.contrib.openstack.context import ( + CA_CERT_PATH, +) + BASE_PACKAGES = [ 'nova-compute', 'genisoimage', # was missing as a package dependency until raring. @@ -188,9 +194,23 @@ def initialize_ssh_keys(): pass -def import_authorized_keys(): - pass +def import_authorized_keys(user='root'): + """Import SSH authorized_keys + known_hosts from a cloud-compute relation + and store in user's $HOME/.ssh. + """ + # XXX: Should this be managed via templates + contexts? + hosts = relation_get('known_hosts') + auth_keys = relation_get('authorized_keys') + if None in [hosts, auth_keys]: + return + dest = os.path.join(pwd.getpwnam(user).pw_dir, '.ssh') + log('Saving new known_hosts and authorized_keys file to: %s.' % dest) + + with open(os.path.join(dest, 'authorized_keys')) as _keys: + _keys.write(b64decode(auth_keys)) + with open(os.path.join(dest, 'known_hosts')) as _hosts: + _hosts.write(b64decode(hosts)) def configure_live_migration(configs=None): """ @@ -214,7 +234,16 @@ def do_openstack_upgrade(): def import_keystone_ca_cert(): - pass + """If provided, improt the Keystone CA cert that gets forwarded + to compute nodes via the cloud-compute interface + """ + ca_cert = relation_get('ca_cert') + if not ca_cert: + return + log('Writing Keystone CA certificate to %s' % CA_CERT_PATH) + with open(CA_CERT_PATH) as out: + out.write(b64decode(ca_cert)) + check_call(['update-ca-certificates']) def configure_network_service(): diff --git a/tests/test_nova_compute_utils.py b/tests/test_nova_compute_utils.py index 309aaf18..1709da85 100644 --- a/tests/test_nova_compute_utils.py +++ b/tests/test_nova_compute_utils.py @@ -1,4 +1,4 @@ -from mock import patch +from mock import patch, MagicMock, call from tests.test_utils import CharmTestCase, patch_open @@ -142,18 +142,68 @@ class NovaComputeUtilsTests(CharmTestCase): self.assertEquals(ex, result) + def fake_user(self, username='foo'): + user = MagicMock() + user.pw_dir = '/home/' + username + return user + @patch('__builtin__.open') @patch('pwd.getpwnam') def test_public_ssh_key_not_found(self, getpwnam, _open): - _open.side_effect = Exception - getpwnam.pw_dir = '/home/foo' + _open.side_effect = Exception + getpwnam.return_value = self.fake_user('foo') self.assertEquals(None, utils.public_ssh_key()) - @patch('pwd.getpwnam') def test_public_ssh_key(self, getpwnam): - getpwnam.pw_dir = '/home/foo' + getpwnam.return_value = self.fake_user('foo') with patch_open() as (_open, _file): _file.read.return_value = 'mypubkey' result = utils.public_ssh_key('foo') self.assertEquals(result, 'mypubkey') + + def test_import_authorized_keys_missing_data(self): + self.relation_get.return_value = None + with patch_open() as (_open, _file): + utils.import_authorized_keys(user='foo') + self.assertFalse(_open.called) + + @patch('pwd.getpwnam') + def test_import_authorized_keys(self, getpwnam): + getpwnam.return_value = self.fake_user('foo') + self.relation_get.side_effect = [ + 'Zm9vX2tleQo=', # relation_get('known_hosts') + 'Zm9vX2hvc3QK', # relation_get('authorized_keys') + ] + + ex_open = [ + call('/home/foo/.ssh/authorized_keys'), + call('/home/foo/.ssh/known_hosts') + ] + ex_write = [ + call('foo_host\n'), + call('foo_key\n'), + ] + + with patch_open() as (_open, _file): + utils.import_authorized_keys(user='foo') + self.assertEquals(ex_open, _open.call_args_list) + self.assertEquals(ex_write, _file.write.call_args_list) + + + @patch('subprocess.check_call') + def test_import_keystone_cert_missing_data(self, check_call): + self.relation_get.return_value = None + with patch_open() as (_open, _file): + utils.import_keystone_ca_cert() + self.assertFalse(_open.called) + self.assertFalse(check_call.called) + + @patch.object(utils, 'check_call') + def test_import_keystone_cert(self, check_call): + self.relation_get.return_value = 'Zm9vX2NlcnQK' + with patch_open() as (_open, _file): + utils.import_keystone_ca_cert() + _open.assert_called_with(utils.CA_CERT_PATH) + _file.write.assert_called_with('foo_cert\n') + check_call.assert_called_with(['update-ca-certificates']) From d0c46359b4512b58aeec891fa72eccd8bc8a1eeb Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 29 Jul 2013 18:10:45 -0700 Subject: [PATCH 05/84] Add main context generators. --- .../charmhelpers/contrib/openstack/context.py | 23 +++ hooks/nova_compute_contexts.py | 154 ++++++++++++++++++ hooks/nova_compute_utils.py | 68 ++++++-- tests/test_nova_compute_utils.py | 32 ++++ 4 files changed, 259 insertions(+), 18 deletions(-) create mode 100644 hooks/nova_compute_contexts.py diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index f146e0bc..c4706424 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -206,6 +206,29 @@ class HAProxyContext(OSContextGenerator): return {} +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-servce'] + + def __call__(self): + ''' + Obtains the glance API server from the image-service relation. Useful + in nova and cinder (currently). + ''' + log('Generating template context for image-service.') + rids = relation_ids('image-service') + if not rids: + return {} + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + log('ImageService context is incomplete. ' + 'Missing required relation data.') + return {} + + class ApacheSSLContext(OSContextGenerator): """ Generates a context for an apache vhost configuration that configures diff --git a/hooks/nova_compute_contexts.py b/hooks/nova_compute_contexts.py new file mode 100644 index 00000000..ecded543 --- /dev/null +++ b/hooks/nova_compute_contexts.py @@ -0,0 +1,154 @@ +from charmhelpers.core.hookenv import unit_private_ip +from charmhelpers.contrib.openstack import context + +from charmhelpers.core.host import apt_install, filter_installed_packages + +# This is just a label and it must be consistent across +# nova-compute nodes to support live migration. +CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' + +class NovaComputeLibvirtContext(context.OSContextGenerator): + interfaces = [] + + def __call__(self): + pass + + +class NovaComputeVirtContext(context.OSContextGenerator): + interfaces = [] + def __call__(self): + return {} + +class NovaComputeCephContext(context.CephContext): + def __call__(self): + ctxt = super(NovaComputeCephContext, self).__call__() + if not ctxt: + return {} + ctxt['ceph_secret_uuid'] = CEPH_SECRET_UUID + return ctxt + +class CloudComputeContext(context.OSContextGenerator): + ''' + Generates main context for writing nova.conf and quantum.conf templates + from a cloud-compute relation changed hook + + Note: individual quantum plugin contexts are handled elsewhere. + ''' + interfaces = ['cloud-compute'] + + def flat_dhcp_context(self): + ec2_host = relation_get('ec2_host') + if not ec2_host: + return {} + return { + 'network_manager': 'nova.network.manager.FlatDHCPManager', + 'flat_interface': config_get('flat_interface'), + 'ec2_host': ec2_host, + } + + def quantum_context(self): + quantum_ctxt = { + 'quantum_auth_strategy': 'keystone', + 'keystone_host': relation_get('keystone_host'), + 'auth_port': relation_get('auth_port'), + 'quantum_url': relation_get('quantum_url'), + 'quantum_admin_tenant_name': relation_get('service_tenant'), + 'quantum_admin_username': relation_get('service_username'), + 'quantum_admin_password': relation_get('service_password'), + 'quantum_security_groups': relation_get('quantum_security_groups'), + 'quantum_plugin': relation_get('quantum_plugin'), + } + missing = [k for k, v in quantum_ctxt.iteritems() if k == None] + if missing: + log('Missing required relation settings for Quantum: ' + + ' '.join(missing)) + return {} + + ks_url = 'http://%s:%s/v2.0' % (quantum_ctxt['keystone_host'], + quantum_ctxt['auth_port']) + quantum_ctxt['quantum_admin_auth_url'] = ks_url + quantum_ctxt['network_api_class'] = 'nova.network.quantumv2.api.API' + + def volume_context(self): + vol_ctxt = {} + vol_service = relation_get('volume_service') + if vol_service == 'cinder': + vol_ctxt['volume_api_class'] = 'nova.volume.cinder.API' + elif vol_service == 'nova-volume': + if get_os_codename_package('nova-common') in ['essex', 'folsom']: + vol_ctxt['volume_api_class'] = 'nova.volume.api.API' + else: + log('Invalid volume service received via cloud-compute: %s' % + vol_service, level=ERROR) + raise + return vol_ctxt + + def __call__(self): + rids = relation_list('cloud-compute') + if not rids: + return {} + + ctxt = {} + + net_manager = relation_get('network_manager').lower() + if net_manager == 'flatdhcpmanager': + ctxt.update(self.flat_dhcp_context()) + elif net_manager == 'quantum': + ctxt.update(self.quantum_context()) + + vol_service = relation_get('volume_service') + if vol_service: + ctxt.update(self.volume_context()) + + +class QuantumPluginContext(context.OSContextGenerator): + interfaces = [] + + def _ensure_packages(self, packages): + '''Install but do not upgrade required plugin packages''' + apt_install(filter_installed_packages(packages)) + + def ovs_context(self): + q_driver = 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\ + 'OVSQuantumPluginV2' + q_fw_driver = 'quantum.agent.linux.iptables_firewall.'\ + 'OVSHybridIptablesFirewallDriver' + + if get_os_codename_package('nova-common') in ['essex', 'folsom']: + n_driver = 'nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver' + else: + n_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver' + n_fw_driver = 'nova.virt.firewall.NoopFirewallDriver' + + ovs_ctxt = { + # quantum.conf + 'core_plugin': driver, + # nova.conf + 'libvirt_vif_driver': n_driver, + 'libvirt_use_virtio_for_bridges': True, + # ovs config + 'tenant_network_type': 'gre', + 'enable_tunneling': True, + 'tunnel_id_ranges': '1:1000', + 'local_ip': unit_private_ip(), + } + + if relation_get('quantum_security_groups').lower() == 'yes': + ovs_ctxt['security_group_api'] = 'quantum' + ovs_ctxt['nova_firewall_driver'] = n_fw_driver + ovs_ctxt['ovs_firewall_driver'] = q_fw_driver + + return ovs_ctxt + + def __call__(self): + from nova_compute_utils import quantum_attribute + + plugin = relation_get('quantum_plugin') + if not plugin: + return {} + self._ensure_pacakges(quantum_attribute(plugin, 'packages')) + + ctxt = {} + + if plugin == 'ovs': + ctxt.update(self.ovs_context()) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 02aed387..46314091 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -14,38 +14,67 @@ from charmhelpers.core.hookenv import ( ERROR, ) -from charmhelpers.contrib.openstack.context import ( - CA_CERT_PATH, +from charmhelpers.contrib.openstack.utils import get_os_codename_package +from charmhelpers.contrib.openstack import templating, context + +from nova_compute_contexts import ( + CloudComputeContext, + NovaComputeVirtContext, + NovaComputeLibvirtContext, + NovaComputeCephContext, + QuantumPluginContext, ) +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' + +TEMPLATES='templates/' + BASE_PACKAGES = [ 'nova-compute', 'genisoimage', # was missing as a package dependency until raring. ] BASE_RESOURCE_MAP = { + '/etc/ceph/ceph.conf': { + 'contexts': [NovaComputeCephContext()], + 'services': [], + }, + '/etc/ceph/secret.xml': { + 'contexts': [NovaComputeCephContext()], + 'services': [], + }, '/etc/libvirt/qemu.conf': { 'services': ['libvirt-bin'], 'contexts': [], }, '/etc/default/libvirt-bin': { 'services': ['libvirt-bin'], - 'contexts': [], + 'contexts': [NovaComputeLibvirtContext()], }, '/etc/nova/nova.conf': { 'services': ['nova-compute'], - 'contexts': [], - }, - '/etc/nova/nova-compute.conf': { - 'services': ['nova-compute'], - 'contexts': [], + 'contexts': [context.AMQPContext(), + context.SharedDBContext(), + context.ImageServiceContext(), + CloudComputeContext(), + NovaComputeCephContext(), + QuantumPluginContext()] }, } +QUANTUM_RESOURCES = { + '/etc/quantum/quantum.conf': { + 'services': ['quantum-server'], + 'contexts': [context.AMQPContext()], + } +} + QUANTUM_PLUGINS = { 'ovs': { 'config': '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini', + 'contexts': [context.SharedDBContext(), + QuantumPluginContext()], 'services': ['quantum-plugin-openvswitch-agent'], 'packages': ['quantum-plugin-openvswitch-agent', 'openvswitch-datapath-dkms'], @@ -66,10 +95,6 @@ VIRT_TYPES = { 'lxc': ['nova-compute-lxc'], } -# This is just a label and it must be consistent across -# nova-compute nodes to support live migration. -CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' - def resource_map(): ''' @@ -87,6 +112,7 @@ def resource_map(): ) elif net_manager == 'Quantum': plugin = quantum_plugin() + resource_map.update(QUANTUM_RESOURCES) if plugin: conf = quantum_attribute(plugin, 'config') svcs = quantum_attribute(plugin, 'services') @@ -94,10 +120,6 @@ def resource_map(): resource_map[conf] = {} resource_map[conf]['services'] = svcs resource_map[conf]['contexts'] = ctxts - resource_map['/etc/quantum/quantum.conf'] = { - 'services': svcs, - 'contexts': ctxts - } return resource_map def restart_map(): @@ -109,9 +131,19 @@ def restart_map(): def register_configs(): ''' - Registers config files with their correpsonding context generators. + Returns an OSTemplateRenderer object with all required configs registered. ''' - pass + _resource_map = resource_map() + if quantum_enabled(): + _resource_map.update(QUANTUM_RESOURCES) + + release = get_os_codename_package('nova-common', fatal=False) or 'essex' + configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, + openstack_release=release) + + for cfg, d in _resource_map.iteritems(): + configs.register(cfg, d['contexts']) + return configs def determine_packages(): diff --git a/tests/test_nova_compute_utils.py b/tests/test_nova_compute_utils.py index 1709da85..92dd75f5 100644 --- a/tests/test_nova_compute_utils.py +++ b/tests/test_nova_compute_utils.py @@ -7,6 +7,7 @@ import hooks.nova_compute_utils as utils TO_PATCH = [ 'config', + 'get_os_codename_package', 'log', 'related_units', 'relation_ids', @@ -207,3 +208,34 @@ class NovaComputeUtilsTests(CharmTestCase): _open.assert_called_with(utils.CA_CERT_PATH) _file.write.assert_called_with('foo_cert\n') check_call.assert_called_with(['update-ca-certificates']) + + @patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer') + @patch.object(utils, 'quantum_enabled') + @patch.object(utils, 'resource_map') + def test_register_configs(self, resource_map, quantum, renderer): + quantum.return_value = False + self.get_os_codename_package.return_value = 'havana' + fake_renderer = MagicMock() + fake_renderer.register = MagicMock() + renderer.return_value = fake_renderer + ctxt1 = MagicMock() + ctxt2 = MagicMock() + rsc_map = { + '/etc/nova/nova.conf': { + 'services': ['nova-compute'], + 'contexts': [ctxt1], + }, + '/etc/nova/nova-compute.conf': { + 'services': ['nova-compute'], + 'contexts': [ctxt2], + }, + } + resource_map.return_value = rsc_map + utils.register_configs() + self.OSConfigRenderer.assert_called_with(openstack_release='havana', + templates_dir='templates/') + ex_reg = [ + call('/etc/nova/nova-compute.conf', [ctxt2]), + call('/etc/nova/nova.conf', [ctxt1]) + ] + self.assertEquals(fake_renderer.register.call_args_list, ex_reg) From 205ba9e1859ee156fcdb8b60fa0d35d98f492698 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 29 Jul 2013 18:31:44 -0700 Subject: [PATCH 06/84] Begin local context tests. --- ...te_contexts.py => nova_compute_context.py} | 20 +++++++++++++++---- tests/test_nova_compute_utils.py | 8 ++++++-- 2 files changed, 22 insertions(+), 6 deletions(-) rename hooks/{nova_compute_contexts.py => nova_compute_context.py} (92%) diff --git a/hooks/nova_compute_contexts.py b/hooks/nova_compute_context.py similarity index 92% rename from hooks/nova_compute_contexts.py rename to hooks/nova_compute_context.py index ecded543..53b347ab 100644 --- a/hooks/nova_compute_contexts.py +++ b/hooks/nova_compute_context.py @@ -1,8 +1,19 @@ -from charmhelpers.core.hookenv import unit_private_ip from charmhelpers.contrib.openstack import context from charmhelpers.core.host import apt_install, filter_installed_packages +from charmhelpers.core.hookenv import ( + config, + log, + relation_get, + relation_ids, + unit_private_ip, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import get_os_codename_package + + # This is just a label and it must be consistent across # nova-compute nodes to support live migration. CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' @@ -42,7 +53,7 @@ class CloudComputeContext(context.OSContextGenerator): return {} return { 'network_manager': 'nova.network.manager.FlatDHCPManager', - 'flat_interface': config_get('flat_interface'), + 'flat_interface': config('flat_interface'), 'ec2_host': ec2_host, } @@ -84,13 +95,14 @@ class CloudComputeContext(context.OSContextGenerator): return vol_ctxt def __call__(self): - rids = relation_list('cloud-compute') + rids = relation_ids('cloud-compute') if not rids: return {} ctxt = {} net_manager = relation_get('network_manager').lower() + import ipdb; ipdb.set_trace() ############################## Breakpoint ############################## if net_manager == 'flatdhcpmanager': ctxt.update(self.flat_dhcp_context()) elif net_manager == 'quantum': @@ -122,7 +134,7 @@ class QuantumPluginContext(context.OSContextGenerator): ovs_ctxt = { # quantum.conf - 'core_plugin': driver, + 'core_plugin': q_driver, # nova.conf 'libvirt_vif_driver': n_driver, 'libvirt_use_virtio_for_bridges': True, diff --git a/tests/test_nova_compute_utils.py b/tests/test_nova_compute_utils.py index 92dd75f5..ca08bb04 100644 --- a/tests/test_nova_compute_utils.py +++ b/tests/test_nova_compute_utils.py @@ -63,6 +63,7 @@ class NovaComputeUtilsTests(CharmTestCase): @patch.object(utils, 'network_manager') def test_resource_map_nova_network_no_multihost(self, net_man): + self.skipTest('skipped until contexts are properly mocked') self.test_config.set('multi-host', 'no') net_man.return_value = 'FlatDHCPManager' result = utils.resource_map() @@ -88,6 +89,8 @@ class NovaComputeUtilsTests(CharmTestCase): @patch.object(utils, 'network_manager') def test_resource_map_nova_network(self, net_man): + + self.skipTest('skipped until contexts are properly mocked') net_man.return_value = 'FlatDHCPManager' result = utils.resource_map() ex = { @@ -112,6 +115,7 @@ class NovaComputeUtilsTests(CharmTestCase): @patch.object(utils, 'quantum_plugin') @patch.object(utils, 'network_manager') def test_resource_map_quantum_ovs(self, net_man, _plugin): + self.skipTest('skipped until contexts are properly mocked.') net_man.return_value = 'Quantum' _plugin.return_value = 'ovs' result = utils.resource_map() @@ -209,7 +213,7 @@ class NovaComputeUtilsTests(CharmTestCase): _file.write.assert_called_with('foo_cert\n') check_call.assert_called_with(['update-ca-certificates']) - @patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer') + @patch('hooks.charmhelpers.contrib.openstack.templating.OSConfigRenderer') @patch.object(utils, 'quantum_enabled') @patch.object(utils, 'resource_map') def test_register_configs(self, resource_map, quantum, renderer): @@ -232,7 +236,7 @@ class NovaComputeUtilsTests(CharmTestCase): } resource_map.return_value = rsc_map utils.register_configs() - self.OSConfigRenderer.assert_called_with(openstack_release='havana', + renderer.assert_called_with(openstack_release='havana', templates_dir='templates/') ex_reg = [ call('/etc/nova/nova-compute.conf', [ctxt2]), From 8ce0817d58fad07ca2fd69fa37a73527d0df79b9 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 30 Jul 2013 19:03:39 -0700 Subject: [PATCH 07/84] Finish local context tests. --- hooks/nova_compute_context.py | 44 +++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 53b347ab..0f7e0056 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -47,13 +47,21 @@ class CloudComputeContext(context.OSContextGenerator): ''' interfaces = ['cloud-compute'] + def _ensure_packages(self, packages): + '''Install but do not upgrade required packages''' + apt_install(filter_installed_packages(packages)) + def flat_dhcp_context(self): ec2_host = relation_get('ec2_host') if not ec2_host: return {} + + if config('multi-host').lower() == 'yes': + self._ensure_packages(['nova-api', 'nova-network']) + return { 'network_manager': 'nova.network.manager.FlatDHCPManager', - 'flat_interface': config('flat_interface'), + 'flat_interface': config('flat-interface'), 'ec2_host': ec2_host, } @@ -69,7 +77,7 @@ class CloudComputeContext(context.OSContextGenerator): 'quantum_security_groups': relation_get('quantum_security_groups'), 'quantum_plugin': relation_get('quantum_plugin'), } - missing = [k for k, v in quantum_ctxt.iteritems() if k == None] + missing = [k for k, v in quantum_ctxt.iteritems() if v == None] if missing: log('Missing required relation settings for Quantum: ' + ' '.join(missing)) @@ -79,10 +87,13 @@ class CloudComputeContext(context.OSContextGenerator): quantum_ctxt['auth_port']) quantum_ctxt['quantum_admin_auth_url'] = ks_url quantum_ctxt['network_api_class'] = 'nova.network.quantumv2.api.API' + return quantum_ctxt def volume_context(self): - vol_ctxt = {} vol_service = relation_get('volume_service') + if not vol_service: + return {} + vol_ctxt = {} if vol_service == 'cinder': vol_ctxt['volume_api_class'] = 'nova.volume.cinder.API' elif vol_service == 'nova-volume': @@ -101,17 +112,16 @@ class CloudComputeContext(context.OSContextGenerator): ctxt = {} - net_manager = relation_get('network_manager').lower() - import ipdb; ipdb.set_trace() ############################## Breakpoint ############################## - if net_manager == 'flatdhcpmanager': - ctxt.update(self.flat_dhcp_context()) - elif net_manager == 'quantum': - ctxt.update(self.quantum_context()) - - vol_service = relation_get('volume_service') - if vol_service: - ctxt.update(self.volume_context()) + net_manager = relation_get('network_manager') + if net_manager: + ctxt['network_manager'] = net_manager + if net_manager.lower() == 'flatdhcpmanager': + ctxt.update(self.flat_dhcp_context()) + elif net_manager.lower() == 'quantum': + ctxt.update(self.quantum_context()) + ctxt.update(self.volume_context()) + return ctxt class QuantumPluginContext(context.OSContextGenerator): interfaces = [] @@ -145,7 +155,8 @@ class QuantumPluginContext(context.OSContextGenerator): 'local_ip': unit_private_ip(), } - if relation_get('quantum_security_groups').lower() == 'yes': + q_sec_groups = relation_get('quantum_security_groups') + if q_sec_groups and q_sec_groups.lower() == 'yes': ovs_ctxt['security_group_api'] = 'quantum' ovs_ctxt['nova_firewall_driver'] = n_fw_driver ovs_ctxt['ovs_firewall_driver'] = q_fw_driver @@ -158,9 +169,12 @@ class QuantumPluginContext(context.OSContextGenerator): plugin = relation_get('quantum_plugin') if not plugin: return {} - self._ensure_pacakges(quantum_attribute(plugin, 'packages')) + + self._ensure_packages(quantum_attribute(plugin, 'packages')) ctxt = {} if plugin == 'ovs': ctxt.update(self.ovs_context()) + + return ctxt From b85deb5b612d7892788451016a306f21153683c8 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 30 Jul 2013 19:12:53 -0700 Subject: [PATCH 08/84] Save local flag files describing net manager and q plugin. --- hooks/nova_compute_context.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 0f7e0056..7c8b5f18 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -18,6 +18,16 @@ from charmhelpers.contrib.openstack.utils import get_os_codename_package # nova-compute nodes to support live migration. CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' + +def _save_flag_file(path, data): + ''' + Saves local state about plugin or manager to specified file. + ''' + # Wonder if we can move away from this now? + with open(path, 'wb') as out: + out.write(data) + + class NovaComputeLibvirtContext(context.OSContextGenerator): interfaces = [] @@ -119,6 +129,7 @@ class CloudComputeContext(context.OSContextGenerator): ctxt.update(self.flat_dhcp_context()) elif net_manager.lower() == 'quantum': ctxt.update(self.quantum_context()) + _save_flag_file(path='/etc/nova/nm.conf', data=net_manager) ctxt.update(self.volume_context()) return ctxt @@ -177,4 +188,7 @@ class QuantumPluginContext(context.OSContextGenerator): if plugin == 'ovs': ctxt.update(self.ovs_context()) + _save_flag_file(path='/etc/nova/quantum_plugin.conf', data=plugin) + + return ctxt From f77750f9726ff50ae3a80e27b5670e11e8a1409e Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 30 Jul 2013 19:40:47 -0700 Subject: [PATCH 09/84] Add libvirt context to enable TCP listening when migration is configured. --- hooks/nova_compute_context.py | 20 ++++++++++++++++++-- hooks/nova_compute_relations.py | 9 +++++---- hooks/nova_compute_utils.py | 11 +---------- tests/test_nova_compute_relations.py | 4 ---- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 7c8b5f18..fe42cf82 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -29,10 +29,23 @@ def _save_flag_file(path, data): class NovaComputeLibvirtContext(context.OSContextGenerator): + ''' + Determines various libvirt options depending on live migration + configuration. + ''' interfaces = [] def __call__(self): - pass + + # enable tcp listening if configured for live migration. + migration = config('enable-live-migration') + if migration and migration.lower() == 'true': + opts = '-d -l' + else: + opts = '-d' + return { + 'libvirtd_opts': opts, + } class NovaComputeVirtContext(context.OSContextGenerator): @@ -40,6 +53,7 @@ class NovaComputeVirtContext(context.OSContextGenerator): def __call__(self): return {} + class NovaComputeCephContext(context.CephContext): def __call__(self): ctxt = super(NovaComputeCephContext, self).__call__() @@ -72,7 +86,7 @@ class CloudComputeContext(context.OSContextGenerator): return { 'network_manager': 'nova.network.manager.FlatDHCPManager', 'flat_interface': config('flat-interface'), - 'ec2_host': ec2_host, + 'ec2_dmz_host': ec2_host, } def quantum_context(self): @@ -168,8 +182,10 @@ class QuantumPluginContext(context.OSContextGenerator): q_sec_groups = relation_get('quantum_security_groups') if q_sec_groups and q_sec_groups.lower() == 'yes': + # nova.conf ovs_ctxt['security_group_api'] = 'quantum' ovs_ctxt['nova_firewall_driver'] = n_fw_driver + # ovs conf ovs_ctxt['ovs_firewall_driver'] = q_fw_driver return ovs_ctxt diff --git a/hooks/nova_compute_relations.py b/hooks/nova_compute_relations.py index 67ed6f62..21b815cc 100755 --- a/hooks/nova_compute_relations.py +++ b/hooks/nova_compute_relations.py @@ -29,8 +29,6 @@ from nova_compute_utils import ( import_keystone_ca_cert, migration_enabled, configure_live_migration, - configure_network_service, - configure_volume_service, do_openstack_upgrade, quantum_attribute, quantum_enabled, @@ -67,6 +65,8 @@ def config_changed(): # generated. [compute_joined(rid) for rid in relation_ids('cloud-compute')] + CONFIGS.write_all() + @hooks.hook('amqp-relation-joined') @restart_on_change(restart_map()) @@ -128,8 +128,9 @@ def compute_joined(rid=None): @hooks.hook('cloud-compute-relation-changed') @restart_on_change(restart_map()) def compute_changed(): - configure_network_service() - configure_volume_service() + # rewriting all configs to pick up possible net or vol manager + # config advertised from controller. + CONFIGS.write_all() import_authorized_keys() import_keystone_ca_cert() diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 46314091..6c05d2b1 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -2,7 +2,7 @@ import os import pwd from base64 import b64decode -from copy import copy, deepcopy +from copy import deepcopy from subprocess import check_call from charmhelpers.core.hookenv import ( @@ -19,7 +19,6 @@ from charmhelpers.contrib.openstack import templating, context from nova_compute_contexts import ( CloudComputeContext, - NovaComputeVirtContext, NovaComputeLibvirtContext, NovaComputeCephContext, QuantumPluginContext, @@ -276,11 +275,3 @@ def import_keystone_ca_cert(): with open(CA_CERT_PATH) as out: out.write(b64decode(ca_cert)) check_call(['update-ca-certificates']) - - -def configure_network_service(): - pass - - -def configure_volume_service(): - pass diff --git a/tests/test_nova_compute_relations.py b/tests/test_nova_compute_relations.py index 67dcc1c8..0f8917ca 100644 --- a/tests/test_nova_compute_relations.py +++ b/tests/test_nova_compute_relations.py @@ -39,8 +39,6 @@ TO_PATCH = [ 'import_keystone_ca_cert', 'migration_enabled', 'configure_live_migration', - 'configure_network_service', - 'configure_volume_service', 'do_openstack_upgrade', 'quantum_attribute', 'quantum_enabled', @@ -207,8 +205,6 @@ class NovaComputeRelationsTests(CharmTestCase): def test_compute_changed(self): relations.compute_changed() expected_funcs = [ - self.configure_network_service, - self.configure_volume_service, self.import_authorized_keys, self.import_keystone_ca_cert, ] From f239ef80f03a8c3f5b090fb4283e62b95dc2128c Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 30 Jul 2013 20:39:44 -0700 Subject: [PATCH 10/84] Checkin templates. --- hooks/nova_compute_context.py | 20 ++- hooks/nova_compute_relations.py | 17 ++- hooks/nova_compute_utils.py | 34 ++++-- revision | 2 +- templates/essex/nova.conf | 29 +++++ templates/folsom/nova.conf | 52 ++++++++ templates/libvirt-bin | 11 ++ templates/qemu.conf | 7 ++ tests/test_nova_compute_contexts.py | 174 +++++++++++++++++++++++++++ tests/test_nova_compute_relations.py | 2 - 10 files changed, 326 insertions(+), 22 deletions(-) create mode 100644 templates/essex/nova.conf create mode 100644 templates/folsom/nova.conf create mode 100644 templates/libvirt-bin create mode 100644 templates/qemu.conf create mode 100644 tests/test_nova_compute_contexts.py diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index fe42cf82..205ccb56 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -65,7 +65,9 @@ class NovaComputeCephContext(context.CephContext): class CloudComputeContext(context.OSContextGenerator): ''' Generates main context for writing nova.conf and quantum.conf templates - from a cloud-compute relation changed hook + from a cloud-compute relation changed hook. Mainly used for determinig + correct network and volume service configuration on the compute node, + as advertised by the cloud-controller. Note: individual quantum plugin contexts are handled elsewhere. ''' @@ -138,14 +140,19 @@ class CloudComputeContext(context.OSContextGenerator): net_manager = relation_get('network_manager') if net_manager: - ctxt['network_manager'] = net_manager if net_manager.lower() == 'flatdhcpmanager': - ctxt.update(self.flat_dhcp_context()) + ctxt.update({ + 'network_manager_config': self.flat_dhcp_context() + }) elif net_manager.lower() == 'quantum': - ctxt.update(self.quantum_context()) + ctxt.update({ + 'network_manager_config': self.quantum_context() + }) _save_flag_file(path='/etc/nova/nm.conf', data=net_manager) - ctxt.update(self.volume_context()) + vol_service = self.volume_context() + if vol_service: + ctxt.update({'volume_service_config': vol_service}) return ctxt class QuantumPluginContext(context.OSContextGenerator): @@ -168,6 +175,7 @@ class QuantumPluginContext(context.OSContextGenerator): n_fw_driver = 'nova.virt.firewall.NoopFirewallDriver' ovs_ctxt = { + 'quantum_plugin': 'ovs', # quantum.conf 'core_plugin': q_driver, # nova.conf @@ -182,8 +190,8 @@ class QuantumPluginContext(context.OSContextGenerator): q_sec_groups = relation_get('quantum_security_groups') if q_sec_groups and q_sec_groups.lower() == 'yes': + ovs_ctxt['quantum_security_groups'] = True # nova.conf - ovs_ctxt['security_group_api'] = 'quantum' ovs_ctxt['nova_firewall_driver'] = n_fw_driver # ovs conf ovs_ctxt['ovs_firewall_driver'] = q_fw_driver diff --git a/hooks/nova_compute_relations.py b/hooks/nova_compute_relations.py index 21b815cc..b99ccf06 100755 --- a/hooks/nova_compute_relations.py +++ b/hooks/nova_compute_relations.py @@ -1,6 +1,7 @@ #!/usr/bin/python import os +import sys from charmhelpers.core.hookenv import ( Hooks, @@ -10,6 +11,7 @@ from charmhelpers.core.hookenv import ( relation_set, service_name, unit_get, + UnregisteredHookError, ) from charmhelpers.core.host import ( @@ -27,8 +29,8 @@ from nova_compute_utils import ( determine_packages, import_authorized_keys, import_keystone_ca_cert, + initialize_ssh_keys, migration_enabled, - configure_live_migration, do_openstack_upgrade, quantum_attribute, quantum_enabled, @@ -59,10 +61,10 @@ def config_changed(): if openstack_upgrade_available('nova-common'): do_openstack_upgrade() - configure_live_migration() if migration_enabled() and config('migration-auth-type') == 'ssh': # Check-in with nova-c-c and register new ssh key, if it has just been # generated. + initialize_ssh_keys() [compute_joined(rid) for rid in relation_ids('cloud-compute')] CONFIGS.write_all() @@ -156,3 +158,14 @@ def ceph_changed(): CONFIGS.write('/etc/ceph/ceph.conf') CONFIGS.write('/etc/ceph/secret.xml') CONFIGS.write('/etc/nova/nova.conf') + + +def main(): + try: + hooks.execute(sys.argv) + except UnregisteredHookError as e: + log('Unknown hook {} - skipping.'.format(e)) + + +if __name__ == '__main__': + main() diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 6c05d2b1..e9d11588 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -17,7 +17,7 @@ from charmhelpers.core.hookenv import ( from charmhelpers.contrib.openstack.utils import get_os_codename_package from charmhelpers.contrib.openstack import templating, context -from nova_compute_contexts import ( +from nova_compute_context import ( CloudComputeContext, NovaComputeLibvirtContext, NovaComputeCephContext, @@ -34,14 +34,6 @@ BASE_PACKAGES = [ ] BASE_RESOURCE_MAP = { - '/etc/ceph/ceph.conf': { - 'contexts': [NovaComputeCephContext()], - 'services': [], - }, - '/etc/ceph/secret.xml': { - 'contexts': [NovaComputeCephContext()], - 'services': [], - }, '/etc/libvirt/qemu.conf': { 'services': ['libvirt-bin'], 'contexts': [], @@ -61,6 +53,16 @@ BASE_RESOURCE_MAP = { }, } +CEPH_RESOURCES = { + '/etc/ceph/ceph.conf': { + 'contexts': [NovaComputeCephContext()], + 'services': [], + }, + '/etc/ceph/secret.xml': { + 'contexts': [NovaComputeCephContext()], + 'services': [], + } +} QUANTUM_RESOURCES = { '/etc/quantum/quantum.conf': { @@ -119,6 +121,10 @@ def resource_map(): resource_map[conf] = {} resource_map[conf]['services'] = svcs resource_map[conf]['contexts'] = ctxts + + if relation_ids('ceph'): + resource_map.update(CEPH_RESOURCES) + return resource_map def restart_map(): @@ -170,11 +176,15 @@ def determine_packages(): def migration_enabled(): - return config('enable-live-migration').lower() == 'true' + # XXX: confirm juju-core bool behavior is the same. + return config('enable-live-migration') def quantum_enabled(): - return config('network-manager').lower() == 'quantum' + manager = config('network-manager') + if not manager: + return False + return manager.lower() == 'quantum' def _network_config(): @@ -248,6 +258,8 @@ def configure_live_migration(configs=None): Ensure libvirt live migration is properly configured or disabled, depending on current config setting. """ + # dont think we need this + return configs = configs or register_configs() configs.write('/etc/libvirt/libvirtd.conf') configs.write('/etc/default/libvirt-bin') diff --git a/revision b/revision index cd5b0252..3ad5abd0 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -92 +99 diff --git a/templates/essex/nova.conf b/templates/essex/nova.conf new file mode 100644 index 00000000..4f7eac3c --- /dev/null +++ b/templates/essex/nova.conf @@ -0,0 +1,29 @@ +--dhcpbridge_flagfile=/etc/nova/nova.conf +--dhcpbridge=/usr/bin/nova-dhcpbridge +--logdir=/var/log/nova +--state_path=/var/lib/nova +--lock_path=/var/lock/nova +--force_dhcp_release +--iscsi_helper=tgtadm +--libvirt_use_virtio_for_bridges +--connection_type=libvirt +--root_helper=sudo nova-rootwrap +--verbose +--ec2_private_dns_show_ip +{% if database_host -%} +--sql_connection=mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} +{% endif -%} +{% if rabbitmq_host -%} +--rabbit_host={{ rabbitmq_host }} +--rabbit_userid={{ rabbitmq_user }} +--rabbit_password={{ rabbitmq_password }} +--rabbit_virtual_host={{ rabbitmq_virtual_host }} +{% endif -%} +{% if glance_api_servers -%} +--glance_api_servers={{ glance_api_servers }} +{% endif -%} +{% if rbd_pool -%} +--rbd_pool={{ rbd_pool }} +--rbd_user={{ rbd_user }} +--rbd_secret_uuid={{ rbd_secret_uuid }} +{% endif -%} diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf new file mode 100644 index 00000000..393d749b --- /dev/null +++ b/templates/folsom/nova.conf @@ -0,0 +1,52 @@ +# juju managed +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose=True +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +{% if database_host -%} +sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} +{% endif -%} +{% if rabbitmq_host -%} +rabbit_host = {{ rabbitmq_host }} +rabbit_userid = {{ rabbitmq_user }} +rabbit_password = {{ rabbitmq_password }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +{% endif -%} +{% if glance_api_servers -%} +glance_api_servers = {{ glance_api_servers }} +{% endif -%} +{% if rbd_pool -%} +rbd_pool = {{ rbd_pool }} +rbd_user = {{ rbd_user }} +rbd_secret_uuid = {{ rbd_secret_uuid }} +{% endif -%} +{% if quantum_plugin and quantum_plugin == 'ovs' -%} +libvirt_vif_driver = {{ libvirt_vif_driver }} +libvirt_user_virtio_for_bridges = {{ libvirt_use_virtio_for_birdges }} +{% if quantum_security_groups -%} +security_group_api = quantum +nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} +{% endif -%} +{% if network_manager_config -%} +{% for key, value in network_manager_config.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} +{% if volume_service_config -%} +{% for key, value in volume_service_config.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} + +{% endif -%} diff --git a/templates/libvirt-bin b/templates/libvirt-bin new file mode 100644 index 00000000..05c67eda --- /dev/null +++ b/templates/libvirt-bin @@ -0,0 +1,11 @@ +# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin) +# This is a POSIX shell fragment + +# Start libvirtd to handle qemu/kvm: +start_libvirtd="yes" + +# options passed to libvirtd, add "-l" to listen on tcp +libvirtd_opts="{{ libvirtd_opts }}" + +# pass in location of kerberos keytab +#export KRB5_KTNAME=/etc/libvirt/libvirt.keytab diff --git a/templates/qemu.conf b/templates/qemu.conf new file mode 100644 index 00000000..c9764eb4 --- /dev/null +++ b/templates/qemu.conf @@ -0,0 +1,7 @@ +# File installed by Juju nova-compute charm +cgroup_device_acl = [ + "/dev/null", "/dev/full", "/dev/zero", + "/dev/random", "/dev/urandom", + "/dev/ptmx", "/dev/kvm", "/dev/kqemu", + "/dev/rtc", "/dev/hpet", "/dev/net/tun", +] diff --git a/tests/test_nova_compute_contexts.py b/tests/test_nova_compute_contexts.py new file mode 100644 index 00000000..5174afd1 --- /dev/null +++ b/tests/test_nova_compute_contexts.py @@ -0,0 +1,174 @@ +from mock import MagicMock +from copy import deepcopy +from tests.test_utils import CharmTestCase + +import hooks.nova_compute_context as context + +TO_PATCH = [ + 'get_os_codename_package', + 'apt_install', + 'filter_installed_packages', + 'relation_ids', + 'relation_get', + 'config', + 'unit_private_ip', + 'log', + '_save_flag_file', +] + +QUANTUM_CONTEXT = { + 'network_manager': 'quantum', + 'quantum_auth_strategy': 'keystone', + 'keystone_host': 'keystone_host', + 'auth_port': '5000', + 'quantum_url': 'http://quantum_url', + 'service_tenant': 'admin', + 'service_username': 'admin', + 'service_password': 'openstack', + 'quantum_security_groups': 'yes', + 'quantum_plugin': 'ovs', +} + +# Context for an OVS plugin contains at least the following. Other bits +# (driver names) are dependent on OS release. +BASE_QUANTUM_OVS_PLUGIN_CONTEXT = { + 'core_plugin': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\ + 'OVSQuantumPluginV2', + 'enable_tunneling': True, + 'libvirt_use_virtio_for_bridges': True, + 'local_ip': '10.0.0.1', + 'nova_firewall_driver': 'nova.virt.firewall.NoopFirewallDriver', + 'ovs_firewall_driver': 'quantum.agent.linux.iptables_firewall.'\ + 'OVSHybridIptablesFirewallDriver', + 'tenant_network_type': 'gre', + 'tunnel_id_ranges': '1:1000', + 'quantum_plugin': 'ovs', + 'quantum_security_groups': True, +} + +def fake_log(msg, level=None): + level = level or 'INFO' + print '[juju test log (%s)] %s' % (level, msg) + +class NovaComputeContextTests(CharmTestCase): + def setUp(self): + super(NovaComputeContextTests, self).setUp(context, TO_PATCH) + self.relation_get.side_effect = self.test_relation.get + self.config.side_effect = self.test_config.get + self.log.side_effect = fake_log + + def test_cloud_compute_context_no_relation(self): + self.relation_ids.return_value = [] + cloud_compute = context.CloudComputeContext() + self.assertEquals({}, cloud_compute()) + + def test_cloud_compute_volume_context_cinder(self): + self.relation_ids.return_value = 'cloud-compute:0' + cloud_compute = context.CloudComputeContext() + + self.test_relation.set({'volume_service': 'cinder'}) + result = cloud_compute() + ex_ctxt = { + 'volume_service_config': { + 'volume_api_class': 'nova.volume.cinder.API' + } + } + self.assertEquals(ex_ctxt, result) + + def test_cloud_compute_volume_context_nova_vol(self): + self.relation_ids.return_value = 'cloud-compute:0' + cloud_compute = context.CloudComputeContext() + self.get_os_codename_package.return_value = 'essex' + self.test_relation.set({'volume_service': 'nova-volume'}) + result = cloud_compute() + ex_ctxt = { + 'volume_service_config': { + 'volume_api_class': 'nova.volume.api.API' + } + } + self.assertEquals(ex_ctxt, result) + + + def test_cloud_compute_volume_context_nova_vol_unsupported(self): + self.relation_ids.return_value = 'cloud-compute:0' + cloud_compute = context.CloudComputeContext() + # n-vol doesn't exist in grizzly + self.get_os_codename_package.return_value = 'grizzly' + self.test_relation.set({'volume_service': 'nova-volume'}) + result = cloud_compute() + self.assertEquals({}, result) + + def test_cloud_compute_flatdhcp_context(self): + self.test_relation.set({ + 'network_manager': 'FlatDHCPManager', + 'ec2_host': 'novaapihost'}) + cloud_compute = context.CloudComputeContext() + ex_ctxt = { + 'network_manager_config': { + 'network_manager': 'nova.network.manager.FlatDHCPManager', + 'ec2_dmz_host': 'novaapihost', + 'flat_interface': 'eth1' + }, + } + self.assertEquals(ex_ctxt, cloud_compute()) + + def test_cloud_compute_quantum_context(self): + self.test_relation.set(QUANTUM_CONTEXT) + cloud_compute = context.CloudComputeContext() + ex_ctxt = { 'network_manager_config': { + 'auth_port': '5000', + 'keystone_host': 'keystone_host', + 'network_api_class': 'nova.network.quantumv2.api.API', + 'quantum_admin_auth_url': 'http://keystone_host:5000/v2.0', + 'quantum_admin_password': 'openstack', + 'quantum_admin_tenant_name': 'admin', + 'quantum_admin_username': 'admin', + 'quantum_auth_strategy': 'keystone', + 'quantum_plugin': 'ovs', + 'quantum_security_groups': 'yes', + 'quantum_url': 'http://quantum_url' + } + } + self.assertEquals(ex_ctxt, cloud_compute()) + self._save_flag_file.assert_called_with( + path='/etc/nova/nm.conf', data='quantum') + + def test_quantum_plugin_context_no_setting(self): + qplugin = context.QuantumPluginContext() + self.assertEquals({}, qplugin()) + + def _test_qplugin_context(self, os_release): + self.get_os_codename_package.return_value = os_release + self.unit_private_ip.return_value = '10.0.0.1' + self.test_relation.set( + {'quantum_plugin': 'ovs', 'quantum_security_groups': 'yes'}) + qplugin = context.QuantumPluginContext() + qplugin._ensure_packages = MagicMock() + return qplugin() + + def test_quantum_plugin_context_ovs_folsom(self): + ex_ctxt = deepcopy(BASE_QUANTUM_OVS_PLUGIN_CONTEXT) + ex_ctxt['libvirt_vif_driver'] = ('nova.virt.libvirt.vif.' + 'LibvirtHybridOVSBridgeDriver') + self.assertEquals(ex_ctxt, self._test_qplugin_context('folsom')) + self._save_flag_file.assert_called_with( + path='/etc/nova/quantum_plugin.conf', data='ovs') + + def test_quantum_plugin_context_ovs_grizzly_and_beyond(self): + ex_ctxt = deepcopy(BASE_QUANTUM_OVS_PLUGIN_CONTEXT) + ex_ctxt['libvirt_vif_driver'] = ('nova.virt.libvirt.vif.' + 'LibvirtGenericVIFDriver') + self.assertEquals(ex_ctxt, self._test_qplugin_context('grizzly')) + self._save_flag_file.assert_called_with( + path='/etc/nova/quantum_plugin.conf', data='ovs') + + def test_libvirt_bin_context_no_migration(self): + self.test_config.set('enable-live-migration', 'false') + libvirt = context.NovaComputeLibvirtContext() + self.assertEquals({'libvirtd_opts': '-d'}, libvirt()) + + def test_libvirt_bin_context_migration_tcp_listen(self): + self.test_config.set('enable-live-migration', 'true') + libvirt = context.NovaComputeLibvirtContext() + self.assertEquals({'libvirtd_opts': '-d -l'}, libvirt()) + diff --git a/tests/test_nova_compute_relations.py b/tests/test_nova_compute_relations.py index 0f8917ca..3ebd0028 100644 --- a/tests/test_nova_compute_relations.py +++ b/tests/test_nova_compute_relations.py @@ -38,7 +38,6 @@ TO_PATCH = [ 'import_authorized_keys', 'import_keystone_ca_cert', 'migration_enabled', - 'configure_live_migration', 'do_openstack_upgrade', 'quantum_attribute', 'quantum_enabled', @@ -91,7 +90,6 @@ class NovaComputeRelationsTests(CharmTestCase): self.migration_enabled.return_value = False relations.config_changed() self.assertFalse(self.do_openstack_upgrade.called) - self.assertTrue(self.configure_live_migration) self.assertFalse(compute_joined.called) def test_amqp_joined(self): From 3088213f27ea2c8ab0ff1e4dc74d2c972217a707 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 1 Aug 2013 12:53:09 -0700 Subject: [PATCH 11/84] Add context generator for handling config-flags charm config setting. --- config.yaml | 1 - .../charmhelpers/contrib/openstack/context.py | 2 +- hooks/nova_compute_context.py | 37 +++++++++++++++++-- hooks/nova_compute_relations.py | 3 +- hooks/nova_compute_utils.py | 4 +- revision | 2 +- templates/folsom/nova.conf | 12 +++++- tests/test_nova_compute_contexts.py | 24 ++++++++++++ 8 files changed, 74 insertions(+), 11 deletions(-) diff --git a/config.yaml b/config.yaml index c0b81f3b..772b3059 100644 --- a/config.yaml +++ b/config.yaml @@ -71,7 +71,6 @@ options: type: string description: Network interface on which to build bridge config-flags: - default: None type: string description: Comma separated list of key=value config flags to be set in nova.conf. nagios_context: diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index c4706424..69cc7efd 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -207,7 +207,7 @@ class HAProxyContext(OSContextGenerator): class ImageServiceContext(OSContextGenerator): - interfaces = ['image-servce'] + interfaces = ['image-service'] def __call__(self): ''' diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 205ccb56..b412ff0e 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -9,6 +9,7 @@ from charmhelpers.core.hookenv import ( relation_ids, unit_private_ip, ERROR, + WARNING, ) from charmhelpers.contrib.openstack.utils import get_os_codename_package @@ -38,8 +39,7 @@ class NovaComputeLibvirtContext(context.OSContextGenerator): def __call__(self): # enable tcp listening if configured for live migration. - migration = config('enable-live-migration') - if migration and migration.lower() == 'true': + if config('enable-live-migration'): opts = '-d -l' else: opts = '-d' @@ -75,7 +75,9 @@ class CloudComputeContext(context.OSContextGenerator): def _ensure_packages(self, packages): '''Install but do not upgrade required packages''' - apt_install(filter_installed_packages(packages)) + required = filter_installed_packages(packages) + if required: + apt_install(required) def flat_dhcp_context(self): ec2_host = relation_get('ec2_host') @@ -131,6 +133,7 @@ class CloudComputeContext(context.OSContextGenerator): raise return vol_ctxt + def __call__(self): rids = relation_ids('cloud-compute') if not rids: @@ -153,14 +156,40 @@ class CloudComputeContext(context.OSContextGenerator): vol_service = self.volume_context() if vol_service: ctxt.update({'volume_service_config': vol_service}) + return ctxt + +class OSConfigFlagContext(context.OSContextGenerator): + ''' + Responsible adding user-defined config-flags in charm config to a + to a template context. + ''' + # this can be moved to charm-helpers? + def __call__(self): + config_flags = config('config-flags') + if not config_flags: + return {} + config_flags = config_flags.split(',') + flags = {} + for flag in config_flags: + if '=' not in flag: + log('Impoperly formatted config-flag, expected k=v ' + ' got %s' % flag, level=WARNING) + continue + k, v = flag.split('=') + flags[k.strip()] = v + ctxt = {'user_config_flags': flags} + return ctxt + class QuantumPluginContext(context.OSContextGenerator): interfaces = [] def _ensure_packages(self, packages): '''Install but do not upgrade required plugin packages''' - apt_install(filter_installed_packages(packages)) + required = filter_installed_packages(packages) + if required: + apt_install(required) def ovs_context(self): q_driver = 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\ diff --git a/hooks/nova_compute_relations.py b/hooks/nova_compute_relations.py index b99ccf06..0426f91a 100755 --- a/hooks/nova_compute_relations.py +++ b/hooks/nova_compute_relations.py @@ -17,6 +17,7 @@ from charmhelpers.core.hookenv import ( from charmhelpers.core.host import ( apt_install, apt_update, + filter_installed_packages, restart_on_change, ) @@ -142,7 +143,7 @@ def compute_changed(): def ceph_joined(): if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') - apt_install('ceph-common') + apt_install(filter_installed_packages('ceph-common')) @hooks.hook('ceph-relation-changed') diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index e9d11588..4fa43e5c 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -21,6 +21,7 @@ from nova_compute_context import ( CloudComputeContext, NovaComputeLibvirtContext, NovaComputeCephContext, + OSConfigFlagContext, QuantumPluginContext, ) @@ -49,6 +50,7 @@ BASE_RESOURCE_MAP = { context.ImageServiceContext(), CloudComputeContext(), NovaComputeCephContext(), + OSConfigFlagContext(), QuantumPluginContext()] }, } @@ -225,7 +227,7 @@ def quantum_attribute(plugin, attr): def public_ssh_key(user='root'): home = pwd.getpwnam(user).pw_dir try: - with open(os.path.join(home, '.ssh', 'id_rsa')) as key: + with open(os.path.join(home, '.ssh', 'id_rsa.pub')) as key: return key.read().strip() except: return None diff --git a/revision b/revision index 3ad5abd0..f96ac067 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -99 +105 diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index 393d749b..ae88988c 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -1,4 +1,8 @@ -# juju managed +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### [DEFAULT] dhcpbridge_flagfile=/etc/nova/nova.conf dhcpbridge=/usr/bin/nova-dhcpbridge @@ -48,5 +52,9 @@ nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% for key, value in volume_service_config.iteritems() -%} {{ key }} = {{ value }} {% endfor -%} - +{% endif -%} +{% if user_config_flags -%} +{% for key, value in user_config_flags.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} {% endif -%} diff --git a/tests/test_nova_compute_contexts.py b/tests/test_nova_compute_contexts.py index 5174afd1..497fd1a4 100644 --- a/tests/test_nova_compute_contexts.py +++ b/tests/test_nova_compute_contexts.py @@ -172,3 +172,27 @@ class NovaComputeContextTests(CharmTestCase): libvirt = context.NovaComputeLibvirtContext() self.assertEquals({'libvirtd_opts': '-d -l'}, libvirt()) + + def test_config_flag_context_none_set_in_config(self): + flags = context.OSConfigFlagContext() + self.assertEquals({}, flags()) + + def test_conflig_flag_context(self): + self.test_config.set('config-flags', 'one=two,three=four,five=six') + flags = context.OSConfigFlagContext() + ex = { + 'user_config_flags': { + 'one': 'two', 'three': 'four', 'five': 'six' + } + } + self.assertEquals(ex, flags()) + + def test_conflig_flag_context_filters_bad_input(self): + self.test_config.set('config-flags', 'one=two,threefour,five=six') + flags = context.OSConfigFlagContext() + ex = { + 'user_config_flags': { + 'one': 'two', 'five': 'six' + } + } + self.assertEquals(ex, flags()) From 9446e951af37f29d001e2483e2a55297789ced3c Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 1 Aug 2013 16:21:58 -0700 Subject: [PATCH 12/84] Finish up: new templates, ssh key creation, Makefile. --- .coveragerc | 6 + Makefile | 14 + .../charmhelpers/contrib/openstack/context.py | 2 +- hooks/misc_utils.py | 8 +- hooks/nova_compute_context.py | 38 +- hooks/nova_compute_relations.py | 2 +- hooks/nova_compute_utils.py | 43 +- hooks/start | 1 + hooks/stop | 1 + metadata.yaml | 2 + revision | 2 +- templates/essex/nova.conf | 4 + templates/folsom/nova.conf | 7 +- templates/libvirt-bin | 5 + templates/libvirtd.conf | 400 ++++++++++++++++++ templates/qemu.conf | 5 + templates/secret.xml | 8 + tests/test_nova_compute_contexts.py | 42 +- tests/test_nova_compute_relations.py | 9 +- tests/test_nova_compute_utils.py | 9 +- tests/test_utils.py | 2 +- 21 files changed, 555 insertions(+), 55 deletions(-) create mode 100644 .coveragerc create mode 100644 Makefile create mode 120000 hooks/start create mode 120000 hooks/stop create mode 100644 templates/libvirtd.conf create mode 100644 templates/secret.xml diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..caea8867 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,6 @@ +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + if __name__ == .__main__.: +include= + hooks/nova_* diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..acc7d5e6 --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +#!/usr/bin/make +PYTHON := /usr/bin/env python + +lint: + @flake8 --exclude hooks/charmhelpers hooks + @flake8 --exclude hooks/charmhelpers tests + @charm proof + +test: + @echo Starting tests... + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage tests + +sync: + @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 69cc7efd..bc5991a1 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -153,7 +153,7 @@ class CephContext(OSContextGenerator): def __call__(self): '''This generates context for /etc/ceph/ceph.conf templates''' - log('Generating tmeplate context for ceph') + log('Generating template context for ceph') mon_hosts = [] auth = None for rid in relation_ids('ceph'): diff --git a/hooks/misc_utils.py b/hooks/misc_utils.py index c5748f13..7ec63938 100644 --- a/hooks/misc_utils.py +++ b/hooks/misc_utils.py @@ -12,11 +12,13 @@ from charmhelpers.contrib.hahelpers.ceph import ( ) -# This was pulled from cinder redux. It should go somewhere common, charmhelpers.hahelpers.ceph? +# This was pulled from cinder redux. It should go somewhere common, +# charmhelpers.hahelpers.ceph? def ensure_ceph_keyring(service): '''Ensures a ceph keyring exists. Returns True if so, False otherwise''' - # TODO: This can be shared between cinder + glance, find a home for it. + # TODO: This can be shared between nova + glance + cinder, find a home for + # it. key = None for rid in relation_ids('ceph'): for unit in related_units(rid): @@ -27,5 +29,5 @@ def ensure_ceph_keyring(service): return False ceph_create_keyring(service=service, key=key) keyring = ceph_keyring_path(service) - subprocess.check_call(['chown', 'cinder.cinder', keyring]) + subprocess.check_call(['chown', 'nova.nova', keyring]) return True diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index b412ff0e..0d30efb1 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -7,6 +7,7 @@ from charmhelpers.core.hookenv import ( log, relation_get, relation_ids, + service_name, unit_private_ip, ERROR, WARNING, @@ -37,19 +38,27 @@ class NovaComputeLibvirtContext(context.OSContextGenerator): interfaces = [] def __call__(self): + # distro defaults + ctxt = { + # /etc/default/libvirt-bin + 'libvirtd_opts': '-d', + # /etc/libvirt/libvirtd.conf ( + 'listen_tls': 1, + } # enable tcp listening if configured for live migration. if config('enable-live-migration'): - opts = '-d -l' - else: - opts = '-d' - return { - 'libvirtd_opts': opts, - } + ctxt['libvirtd_opts'] += ' -l' + + if config('migration-auth-type') in ['none', 'None', 'ssh']: + ctxt['listen_tls'] = 0 + + return ctxt class NovaComputeVirtContext(context.OSContextGenerator): interfaces = [] + def __call__(self): return {} @@ -59,9 +68,17 @@ class NovaComputeCephContext(context.CephContext): ctxt = super(NovaComputeCephContext, self).__call__() if not ctxt: return {} + svc = service_name() + # secret.xml ctxt['ceph_secret_uuid'] = CEPH_SECRET_UUID + # nova.conf + ctxt['service_name'] = svc + ctxt['rbd_user'] = svc + ctxt['rbd_secret_uuid'] = CEPH_SECRET_UUID + ctxt['rbd_pool'] = 'nova' return ctxt + class CloudComputeContext(context.OSContextGenerator): ''' Generates main context for writing nova.conf and quantum.conf templates @@ -105,7 +122,7 @@ class CloudComputeContext(context.OSContextGenerator): 'quantum_security_groups': relation_get('quantum_security_groups'), 'quantum_plugin': relation_get('quantum_plugin'), } - missing = [k for k, v in quantum_ctxt.iteritems() if v == None] + missing = [k for k, v in quantum_ctxt.iteritems() if v is None] if missing: log('Missing required relation settings for Quantum: ' + ' '.join(missing)) @@ -133,7 +150,6 @@ class CloudComputeContext(context.OSContextGenerator): raise return vol_ctxt - def __call__(self): rids = relation_ids('cloud-compute') if not rids: @@ -182,6 +198,7 @@ class OSConfigFlagContext(context.OSContextGenerator): ctxt = {'user_config_flags': flags} return ctxt + class QuantumPluginContext(context.OSContextGenerator): interfaces = [] @@ -194,8 +211,8 @@ class QuantumPluginContext(context.OSContextGenerator): def ovs_context(self): q_driver = 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\ 'OVSQuantumPluginV2' - q_fw_driver = 'quantum.agent.linux.iptables_firewall.'\ - 'OVSHybridIptablesFirewallDriver' + q_fw_driver = 'quantum.agent.linux.iptables_firewall.'\ + 'OVSHybridIptablesFirewallDriver' if get_os_codename_package('nova-common') in ['essex', 'folsom']: n_driver = 'nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver' @@ -243,5 +260,4 @@ class QuantumPluginContext(context.OSContextGenerator): _save_flag_file(path='/etc/nova/quantum_plugin.conf', data=plugin) - return ctxt diff --git a/hooks/nova_compute_relations.py b/hooks/nova_compute_relations.py index 0426f91a..b44d6430 100755 --- a/hooks/nova_compute_relations.py +++ b/hooks/nova_compute_relations.py @@ -143,7 +143,7 @@ def compute_changed(): def ceph_joined(): if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') - apt_install(filter_installed_packages('ceph-common')) + apt_install(filter_installed_packages(['ceph-common'])) @hooks.hook('ceph-relation-changed') diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 4fa43e5c..5ee0da7f 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -3,7 +3,7 @@ import pwd from base64 import b64decode from copy import deepcopy -from subprocess import check_call +from subprocess import check_call, check_output from charmhelpers.core.hookenv import ( config, @@ -27,7 +27,7 @@ from nova_compute_context import ( CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' -TEMPLATES='templates/' +TEMPLATES = 'templates/' BASE_PACKAGES = [ 'nova-compute', @@ -39,6 +39,10 @@ BASE_RESOURCE_MAP = { 'services': ['libvirt-bin'], 'contexts': [], }, + '/etc/libvirt/libvirtd.conf': { + 'services': ['libvirt-bin'], + 'contexts': [NovaComputeLibvirtContext()], + }, '/etc/default/libvirt-bin': { 'services': ['libvirt-bin'], 'contexts': [NovaComputeLibvirtContext()], @@ -129,6 +133,7 @@ def resource_map(): return resource_map + def restart_map(): ''' Constructs a restart map based on charm config settings and relation @@ -136,6 +141,7 @@ def restart_map(): ''' return {k: v['services'] for k, v in resource_map().iteritems()} + def register_configs(): ''' Returns an OSTemplateRenderer object with all required configs registered. @@ -224,6 +230,7 @@ def quantum_attribute(plugin, attr): except KeyError: return None + def public_ssh_key(user='root'): home = pwd.getpwnam(user).pw_dir try: @@ -233,8 +240,27 @@ def public_ssh_key(user='root'): return None -def initialize_ssh_keys(): - pass +def initialize_ssh_keys(user='root'): + home_dir = pwd.getpwnam(user).pw_dir + ssh_dir = os.path.join(home_dir, '.ssh') + if not os.path.isdir(ssh_dir): + os.mkdir(ssh_dir) + + priv_key = os.path.join(ssh_dir, 'id_rsa') + if not os.path.isfile(priv_key): + log('Generating new ssh key for user %s.' % user) + cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048', + '-f', priv_key] + check_output(cmd) + + pub_key = '%s.pub' % priv_key + if not os.path.isfile(pub_key): + log('Generating missing ssh public key @ %s.' % pub_key) + cmd = ['ssh-keygen', '-y', '-f', priv_key] + p = check_output(cmd).strip() + with open(pub_key, 'wb') as out: + out.write(p) + check_output(['chown', '-R', user, ssh_dir]) def import_authorized_keys(user='root'): @@ -244,17 +270,20 @@ def import_authorized_keys(user='root'): # XXX: Should this be managed via templates + contexts? hosts = relation_get('known_hosts') auth_keys = relation_get('authorized_keys') - if None in [hosts, auth_keys]: + # XXX: Need to fix charm-helpers to return None for empty settings, + # in all cases. + if not hosts or not auth_keys: return dest = os.path.join(pwd.getpwnam(user).pw_dir, '.ssh') log('Saving new known_hosts and authorized_keys file to: %s.' % dest) - with open(os.path.join(dest, 'authorized_keys')) as _keys: + with open(os.path.join(dest, 'authorized_keys'), 'wb') as _keys: _keys.write(b64decode(auth_keys)) - with open(os.path.join(dest, 'known_hosts')) as _hosts: + with open(os.path.join(dest, 'known_hosts'), 'wb') as _hosts: _hosts.write(b64decode(hosts)) + def configure_live_migration(configs=None): """ Ensure libvirt live migration is properly configured or disabled, diff --git a/hooks/start b/hooks/start new file mode 120000 index 00000000..6eb6593e --- /dev/null +++ b/hooks/start @@ -0,0 +1 @@ +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/stop b/hooks/stop new file mode 120000 index 00000000..6eb6593e --- /dev/null +++ b/hooks/stop @@ -0,0 +1 @@ +nova_compute_relations.py \ No newline at end of file diff --git a/metadata.yaml b/metadata.yaml index 14ab07be..6e9534d0 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -5,6 +5,8 @@ description: | OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In addition to its "native" API (the OpenStack API), it also supports the Amazon EC2 API. +categories: + - openstack provides: cloud-compute: interface: nova-compute diff --git a/revision b/revision index f96ac067..bc6298e8 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -105 +110 diff --git a/templates/essex/nova.conf b/templates/essex/nova.conf index 4f7eac3c..89be9c7e 100644 --- a/templates/essex/nova.conf +++ b/templates/essex/nova.conf @@ -1,3 +1,7 @@ +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### --dhcpbridge_flagfile=/etc/nova/nova.conf --dhcpbridge=/usr/bin/nova-dhcpbridge --logdir=/var/log/nova diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index ae88988c..28f00e04 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -1,8 +1,7 @@ -############################################################################### +############################################################################### # [ WARNING ] -# cinder configuration file maintained by Juju -# local changes may be overwritten. -############################################################################### +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### [DEFAULT] dhcpbridge_flagfile=/etc/nova/nova.conf dhcpbridge=/usr/bin/nova-dhcpbridge diff --git a/templates/libvirt-bin b/templates/libvirt-bin index 05c67eda..9e65b261 100644 --- a/templates/libvirt-bin +++ b/templates/libvirt-bin @@ -1,3 +1,8 @@ +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### + # Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin) # This is a POSIX shell fragment diff --git a/templates/libvirtd.conf b/templates/libvirtd.conf new file mode 100644 index 00000000..e254542f --- /dev/null +++ b/templates/libvirtd.conf @@ -0,0 +1,400 @@ +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### + +# Master libvirt daemon configuration file +# +# For further information consult http://libvirt.org/format.html +# +# NOTE: the tests/daemon-conf regression test script requires +# that each "PARAMETER = VALUE" line in this file have the parameter +# name just after a leading "#". + +################################################################# +# +# Network connectivity controls +# + +# Flag listening for secure TLS connections on the public TCP/IP port. +# NB, must pass the --listen flag to the libvirtd process for this to +# have any effect. +# +# It is necessary to setup a CA and issue server certificates before +# using this capability. +# +# This is enabled by default, uncomment this to disable it +listen_tls = {{ listen_tls }} + +# Listen for unencrypted TCP connections on the public TCP/IP port. +# NB, must pass the --listen flag to the libvirtd process for this to +# have any effect. +# +# Using the TCP socket requires SASL authentication by default. Only +# SASL mechanisms which support data encryption are allowed. This is +# DIGEST_MD5 and GSSAPI (Kerberos5) +# +# This is disabled by default, uncomment this to enable it. +#listen_tcp = 1 + + + +# Override the port for accepting secure TLS connections +# This can be a port number, or service name +# +#tls_port = "16514" + +# Override the port for accepting insecure TCP connections +# This can be a port number, or service name +# +#tcp_port = "16509" + + +# Override the default configuration which binds to all network +# interfaces. This can be a numeric IPv4/6 address, or hostname +# +#listen_addr = "192.168.0.1" + + +# Flag toggling mDNS advertizement of the libvirt service. +# +# Alternatively can disable for all services on a host by +# stopping the Avahi daemon +# +# This is disabled by default, uncomment this to enable it +#mdns_adv = 1 + +# Override the default mDNS advertizement name. This must be +# unique on the immediate broadcast network. +# +# The default is "Virtualization Host HOSTNAME", where HOSTNAME +# is subsituted for the short hostname of the machine (without domain) +# +#mdns_name = "Virtualization Host Joe Demo" + + +################################################################# +# +# UNIX socket access controls +# + +# Set the UNIX domain socket group ownership. This can be used to +# allow a 'trusted' set of users access to management capabilities +# without becoming root. +# +# This is restricted to 'root' by default. +unix_sock_group = "libvirtd" + +# Set the UNIX socket permissions for the R/O socket. This is used +# for monitoring VM status only +# +# Default allows any user. If setting group ownership may want to +# restrict this to: +#unix_sock_ro_perms = "0777" + +# Set the UNIX socket permissions for the R/W socket. This is used +# for full management of VMs +# +# Default allows only root. If PolicyKit is enabled on the socket, +# the default will change to allow everyone (eg, 0777) +# +# If not using PolicyKit and setting group ownership for access +# control then you may want to relax this to: +unix_sock_rw_perms = "0770" + +# Set the name of the directory in which sockets will be found/created. +#unix_sock_dir = "/var/run/libvirt" + +################################################################# +# +# Authentication. +# +# - none: do not perform auth checks. If you can connect to the +# socket you are allowed. This is suitable if there are +# restrictions on connecting to the socket (eg, UNIX +# socket permissions), or if there is a lower layer in +# the network providing auth (eg, TLS/x509 certificates) +# +# - sasl: use SASL infrastructure. The actual auth scheme is then +# controlled from /etc/sasl2/libvirt.conf. For the TCP +# socket only GSSAPI & DIGEST-MD5 mechanisms will be used. +# For non-TCP or TLS sockets, any scheme is allowed. +# +# - polkit: use PolicyKit to authenticate. This is only suitable +# for use on the UNIX sockets. The default policy will +# require a user to supply their own password to gain +# full read/write access (aka sudo like), while anyone +# is allowed read/only access. +# +# Set an authentication scheme for UNIX read-only sockets +# By default socket permissions allow anyone to connect +# +# To restrict monitoring of domains you may wish to enable +# an authentication mechanism here +auth_unix_ro = "none" + +# Set an authentication scheme for UNIX read-write sockets +# By default socket permissions only allow root. If PolicyKit +# support was compiled into libvirt, the default will be to +# use 'polkit' auth. +# +# If the unix_sock_rw_perms are changed you may wish to enable +# an authentication mechanism here +auth_unix_rw = "none" + +# Change the authentication scheme for TCP sockets. +# +# If you don't enable SASL, then all TCP traffic is cleartext. +# Don't do this outside of a dev/test scenario. For real world +# use, always enable SASL and use the GSSAPI or DIGEST-MD5 +# mechanism in /etc/sasl2/libvirt.conf +#auth_tcp = "sasl" + +# Change the authentication scheme for TLS sockets. +# +# TLS sockets already have encryption provided by the TLS +# layer, and limited authentication is done by certificates +# +# It is possible to make use of any SASL authentication +# mechanism as well, by using 'sasl' for this option +#auth_tls = "none" + + + +################################################################# +# +# TLS x509 certificate configuration +# + + +# Override the default server key file path +# +#key_file = "/etc/pki/libvirt/private/serverkey.pem" + +# Override the default server certificate file path +# +#cert_file = "/etc/pki/libvirt/servercert.pem" + +# Override the default CA certificate path +# +#ca_file = "/etc/pki/CA/cacert.pem" + +# Specify a certificate revocation list. +# +# Defaults to not using a CRL, uncomment to enable it +#crl_file = "/etc/pki/CA/crl.pem" + + + +################################################################# +# +# Authorization controls +# + + +# Flag to disable verification of our own server certificates +# +# When libvirtd starts it performs some sanity checks against +# its own certificates. +# +# Default is to always run sanity checks. Uncommenting this +# will disable sanity checks which is not a good idea +#tls_no_sanity_certificate = 1 + +# Flag to disable verification of client certificates +# +# Client certificate verification is the primary authentication mechanism. +# Any client which does not present a certificate signed by the CA +# will be rejected. +# +# Default is to always verify. Uncommenting this will disable +# verification - make sure an IP whitelist is set +#tls_no_verify_certificate = 1 + + +# A whitelist of allowed x509 Distinguished Names +# This list may contain wildcards such as +# +# "C=GB,ST=London,L=London,O=Red Hat,CN=*" +# +# See the POSIX fnmatch function for the format of the wildcards. +# +# NB If this is an empty list, no client can connect, so comment out +# entirely rather than using empty list to disable these checks +# +# By default, no DN's are checked +#tls_allowed_dn_list = ["DN1", "DN2"] + + +# A whitelist of allowed SASL usernames. The format for usernames +# depends on the SASL authentication mechanism. Kerberos usernames +# look like username@REALM +# +# This list may contain wildcards such as +# +# "*@EXAMPLE.COM" +# +# See the POSIX fnmatch function for the format of the wildcards. +# +# NB If this is an empty list, no client can connect, so comment out +# entirely rather than using empty list to disable these checks +# +# By default, no Username's are checked +#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ] + + + +################################################################# +# +# Processing controls +# + +# The maximum number of concurrent client connections to allow +# over all sockets combined. +#max_clients = 20 + + +# The minimum limit sets the number of workers to start up +# initially. If the number of active clients exceeds this, +# then more threads are spawned, up to max_workers limit. +# Typically you'd want max_workers to equal maximum number +# of clients allowed +#min_workers = 5 +#max_workers = 20 + + +# The number of priority workers. If all workers from above +# pool will stuck, some calls marked as high priority +# (notably domainDestroy) can be executed in this pool. +#prio_workers = 5 + +# Total global limit on concurrent RPC calls. Should be +# at least as large as max_workers. Beyond this, RPC requests +# will be read into memory and queued. This directly impact +# memory usage, currently each request requires 256 KB of +# memory. So by default up to 5 MB of memory is used +# +# XXX this isn't actually enforced yet, only the per-client +# limit is used so far +#max_requests = 20 + +# Limit on concurrent requests from a single client +# connection. To avoid one client monopolizing the server +# this should be a small fraction of the global max_requests +# and max_workers parameter +#max_client_requests = 5 + +################################################################# +# +# Logging controls +# + +# Logging level: 4 errors, 3 warnings, 2 information, 1 debug +# basically 1 will log everything possible +#log_level = 3 + +# Logging filters: +# A filter allows to select a different logging level for a given category +# of logs +# The format for a filter is one of: +# x:name +# x:+name +# where name is a string which is matched against source file name, +# e.g., "remote", "qemu", or "util/json", the optional "+" prefix +# tells libvirt to log stack trace for each message matching name, +# and x is the minimal level where matching messages should be logged: +# 1: DEBUG +# 2: INFO +# 3: WARNING +# 4: ERROR +# +# Multiple filter can be defined in a single @filters, they just need to be +# separated by spaces. +# +# e.g. to only get warning or errors from the remote layer and only errors +# from the event layer: +#log_filters="3:remote 4:event" + +# Logging outputs: +# An output is one of the places to save logging information +# The format for an output can be: +# x:stderr +# output goes to stderr +# x:syslog:name +# use syslog for the output and use the given name as the ident +# x:file:file_path +# output to a file, with the given filepath +# In all case the x prefix is the minimal level, acting as a filter +# 1: DEBUG +# 2: INFO +# 3: WARNING +# 4: ERROR +# +# Multiple output can be defined, they just need to be separated by spaces. +# e.g. to log all warnings and errors to syslog under the libvirtd ident: +#log_outputs="3:syslog:libvirtd" +# + +# Log debug buffer size: default 64 +# The daemon keeps an internal debug log buffer which will be dumped in case +# of crash or upon receiving a SIGUSR2 signal. This setting allows to override +# the default buffer size in kilobytes. +# If value is 0 or less the debug log buffer is deactivated +#log_buffer_size = 64 + + +################################################################## +# +# Auditing +# +# This setting allows usage of the auditing subsystem to be altered: +# +# audit_level == 0 -> disable all auditing +# audit_level == 1 -> enable auditing, only if enabled on host (default) +# audit_level == 2 -> enable auditing, and exit if disabled on host +# +#audit_level = 2 +# +# If set to 1, then audit messages will also be sent +# via libvirt logging infrastructure. Defaults to 0 +# +#audit_logging = 1 + +################################################################### +# UUID of the host: +# Provide the UUID of the host here in case the command +# 'dmidecode -s system-uuid' does not provide a valid uuid. In case +# 'dmidecode' does not provide a valid UUID and none is provided here, a +# temporary UUID will be generated. +# Keep the format of the example UUID below. UUID must not have all digits +# be the same. + +# NB This default all-zeros UUID will not work. Replace +# it with the output of the 'uuidgen' command and then +# uncomment this entry +#host_uuid = "00000000-0000-0000-0000-000000000000" + +################################################################### +# Keepalive protocol: +# This allows libvirtd to detect broken client connections or even +# dead client. A keepalive message is sent to a client after +# keepalive_interval seconds of inactivity to check if the client is +# still responding; keepalive_count is a maximum number of keepalive +# messages that are allowed to be sent to the client without getting +# any response before the connection is considered broken. In other +# words, the connection is automatically closed approximately after +# keepalive_interval * (keepalive_count + 1) seconds since the last +# message received from the client. If keepalive_interval is set to +# -1, libvirtd will never send keepalive requests; however clients +# can still send them and the deamon will send responses. When +# keepalive_count is set to 0, connections will be automatically +# closed after keepalive_interval seconds of inactivity without +# sending any keepalive messages. +# +#keepalive_interval = 5 +#keepalive_count = 5 +# +# If set to 1, libvirtd will refuse to talk to clients that do not +# support keepalive protocol. Defaults to 0. +# +#keepalive_required = 1 diff --git a/templates/qemu.conf b/templates/qemu.conf index c9764eb4..12eee334 100644 --- a/templates/qemu.conf +++ b/templates/qemu.conf @@ -1,3 +1,8 @@ +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### + # File installed by Juju nova-compute charm cgroup_device_acl = [ "/dev/null", "/dev/full", "/dev/zero", diff --git a/templates/secret.xml b/templates/secret.xml new file mode 100644 index 00000000..399e6f60 --- /dev/null +++ b/templates/secret.xml @@ -0,0 +1,8 @@ +{% if ceph_secret_uuid -%} + + {{ ceph_secret_uuid }} + + client.{{ service_name }} secret + + +{% endif -%} diff --git a/tests/test_nova_compute_contexts.py b/tests/test_nova_compute_contexts.py index 497fd1a4..6a516ecd 100644 --- a/tests/test_nova_compute_contexts.py +++ b/tests/test_nova_compute_contexts.py @@ -32,13 +32,13 @@ QUANTUM_CONTEXT = { # Context for an OVS plugin contains at least the following. Other bits # (driver names) are dependent on OS release. BASE_QUANTUM_OVS_PLUGIN_CONTEXT = { - 'core_plugin': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\ + 'core_plugin': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' 'OVSQuantumPluginV2', 'enable_tunneling': True, 'libvirt_use_virtio_for_bridges': True, 'local_ip': '10.0.0.1', 'nova_firewall_driver': 'nova.virt.firewall.NoopFirewallDriver', - 'ovs_firewall_driver': 'quantum.agent.linux.iptables_firewall.'\ + 'ovs_firewall_driver': 'quantum.agent.linux.iptables_firewall.' 'OVSHybridIptablesFirewallDriver', 'tenant_network_type': 'gre', 'tunnel_id_ranges': '1:1000', @@ -46,10 +46,12 @@ BASE_QUANTUM_OVS_PLUGIN_CONTEXT = { 'quantum_security_groups': True, } + def fake_log(msg, level=None): level = level or 'INFO' print '[juju test log (%s)] %s' % (level, msg) + class NovaComputeContextTests(CharmTestCase): def setUp(self): super(NovaComputeContextTests, self).setUp(context, TO_PATCH) @@ -88,7 +90,6 @@ class NovaComputeContextTests(CharmTestCase): } self.assertEquals(ex_ctxt, result) - def test_cloud_compute_volume_context_nova_vol_unsupported(self): self.relation_ids.return_value = 'cloud-compute:0' cloud_compute = context.CloudComputeContext() @@ -115,18 +116,19 @@ class NovaComputeContextTests(CharmTestCase): def test_cloud_compute_quantum_context(self): self.test_relation.set(QUANTUM_CONTEXT) cloud_compute = context.CloudComputeContext() - ex_ctxt = { 'network_manager_config': { - 'auth_port': '5000', - 'keystone_host': 'keystone_host', - 'network_api_class': 'nova.network.quantumv2.api.API', - 'quantum_admin_auth_url': 'http://keystone_host:5000/v2.0', - 'quantum_admin_password': 'openstack', - 'quantum_admin_tenant_name': 'admin', - 'quantum_admin_username': 'admin', - 'quantum_auth_strategy': 'keystone', - 'quantum_plugin': 'ovs', - 'quantum_security_groups': 'yes', - 'quantum_url': 'http://quantum_url' + ex_ctxt = { + 'network_manager_config': { + 'auth_port': '5000', + 'keystone_host': 'keystone_host', + 'network_api_class': 'nova.network.quantumv2.api.API', + 'quantum_admin_auth_url': 'http://keystone_host:5000/v2.0', + 'quantum_admin_password': 'openstack', + 'quantum_admin_tenant_name': 'admin', + 'quantum_admin_username': 'admin', + 'quantum_auth_strategy': 'keystone', + 'quantum_plugin': 'ovs', + 'quantum_security_groups': 'yes', + 'quantum_url': 'http://quantum_url' } } self.assertEquals(ex_ctxt, cloud_compute()) @@ -163,15 +165,15 @@ class NovaComputeContextTests(CharmTestCase): path='/etc/nova/quantum_plugin.conf', data='ovs') def test_libvirt_bin_context_no_migration(self): - self.test_config.set('enable-live-migration', 'false') + self.test_config.set('enable-live-migration', False) libvirt = context.NovaComputeLibvirtContext() - self.assertEquals({'libvirtd_opts': '-d'}, libvirt()) + self.assertEquals({'libvirtd_opts': '-d', 'listen_tls': 1}, libvirt()) def test_libvirt_bin_context_migration_tcp_listen(self): - self.test_config.set('enable-live-migration', 'true') + self.test_config.set('enable-live-migration', True) libvirt = context.NovaComputeLibvirtContext() - self.assertEquals({'libvirtd_opts': '-d -l'}, libvirt()) - + self.assertEquals( + {'libvirtd_opts': '-d -l', 'listen_tls': 1}, libvirt()) def test_config_flag_context_none_set_in_config(self): flags = context.OSConfigFlagContext() diff --git a/tests/test_nova_compute_relations.py b/tests/test_nova_compute_relations.py index 3ebd0028..b7b325cd 100644 --- a/tests/test_nova_compute_relations.py +++ b/tests/test_nova_compute_relations.py @@ -27,6 +27,7 @@ TO_PATCH = [ # charmhelpers.core.host 'apt_install', 'apt_update', + 'filter_installed_packages', 'restart_on_change', #charmhelpers.contrib.openstack.utils 'configure_installation_source', @@ -37,6 +38,7 @@ TO_PATCH = [ 'determine_packages', 'import_authorized_keys', 'import_keystone_ca_cert', + 'initialize_ssh_keys', 'migration_enabled', 'do_openstack_upgrade', 'quantum_attribute', @@ -49,11 +51,16 @@ TO_PATCH = [ ] +def fake_filter(packages): + return packages + + class NovaComputeRelationsTests(CharmTestCase): def setUp(self): super(NovaComputeRelationsTests, self).setUp(relations, TO_PATCH) self.config.side_effect = self.test_config.get + self.filter_installed_packages.side_effect = fake_filter def test_install_hook(self): repo = 'cloud:precise-grizzly' @@ -215,7 +222,7 @@ class NovaComputeRelationsTests(CharmTestCase): isdir.return_value = False relations.ceph_joined() mkdir.assert_called_with('/etc/ceph') - self.apt_install.assert_called_with('ceph-common') + self.apt_install.assert_called_with(['ceph-common']) @patch.object(relations, 'CONFIGS') def test_ceph_changed_missing_relation_data(self, configs): diff --git a/tests/test_nova_compute_utils.py b/tests/test_nova_compute_utils.py index ca08bb04..c2bfbab7 100644 --- a/tests/test_nova_compute_utils.py +++ b/tests/test_nova_compute_utils.py @@ -182,8 +182,8 @@ class NovaComputeUtilsTests(CharmTestCase): ] ex_open = [ - call('/home/foo/.ssh/authorized_keys'), - call('/home/foo/.ssh/known_hosts') + call('/home/foo/.ssh/authorized_keys', 'wb'), + call('/home/foo/.ssh/known_hosts', 'wb') ] ex_write = [ call('foo_host\n'), @@ -195,7 +195,6 @@ class NovaComputeUtilsTests(CharmTestCase): self.assertEquals(ex_open, _open.call_args_list) self.assertEquals(ex_write, _file.write.call_args_list) - @patch('subprocess.check_call') def test_import_keystone_cert_missing_data(self, check_call): self.relation_get.return_value = None @@ -236,8 +235,8 @@ class NovaComputeUtilsTests(CharmTestCase): } resource_map.return_value = rsc_map utils.register_configs() - renderer.assert_called_with(openstack_release='havana', - templates_dir='templates/') + renderer.assert_called_with( + openstack_release='havana', templates_dir='templates/') ex_reg = [ call('/etc/nova/nova-compute.conf', [ctxt2]), call('/etc/nova/nova.conf', [ctxt1]) diff --git a/tests/test_utils.py b/tests/test_utils.py index a981c7c4..e7ff6e22 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -93,7 +93,7 @@ class TestRelation(object): self.relation_data = relation_data def get(self, attr=None, unit=None, rid=None): - if attr == None: + if attr is None: return self.relation_data elif attr in self.relation_data: return self.relation_data[attr] From a6508d24e6d6c8d97e0f4200ebf850c3d4fe213b Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 1 Aug 2013 16:23:29 -0700 Subject: [PATCH 13/84] tests/ -> unit_tests/ --- Makefile | 4 ++-- {tests => unit_tests}/__init__.py | 0 {tests => unit_tests}/test_nova_compute_contexts.py | 2 +- {tests => unit_tests}/test_nova_compute_relations.py | 2 +- {tests => unit_tests}/test_nova_compute_utils.py | 2 +- {tests => unit_tests}/test_utils.py | 0 6 files changed, 5 insertions(+), 5 deletions(-) rename {tests => unit_tests}/__init__.py (100%) rename {tests => unit_tests}/test_nova_compute_contexts.py (99%) rename {tests => unit_tests}/test_nova_compute_relations.py (99%) rename {tests => unit_tests}/test_nova_compute_utils.py (99%) rename {tests => unit_tests}/test_utils.py (100%) diff --git a/Makefile b/Makefile index acc7d5e6..2e2b2db6 100644 --- a/Makefile +++ b/Makefile @@ -3,12 +3,12 @@ PYTHON := /usr/bin/env python lint: @flake8 --exclude hooks/charmhelpers hooks - @flake8 --exclude hooks/charmhelpers tests + @flake8 --exclude hooks/charmhelpers unit_tests @charm proof test: @echo Starting tests... - @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage tests + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests sync: @charm-helper-sync -c charm-helpers-sync.yaml diff --git a/tests/__init__.py b/unit_tests/__init__.py similarity index 100% rename from tests/__init__.py rename to unit_tests/__init__.py diff --git a/tests/test_nova_compute_contexts.py b/unit_tests/test_nova_compute_contexts.py similarity index 99% rename from tests/test_nova_compute_contexts.py rename to unit_tests/test_nova_compute_contexts.py index 6a516ecd..ec11003c 100644 --- a/tests/test_nova_compute_contexts.py +++ b/unit_tests/test_nova_compute_contexts.py @@ -1,6 +1,6 @@ from mock import MagicMock from copy import deepcopy -from tests.test_utils import CharmTestCase +from unit_tests.test_utils import CharmTestCase import hooks.nova_compute_context as context diff --git a/tests/test_nova_compute_relations.py b/unit_tests/test_nova_compute_relations.py similarity index 99% rename from tests/test_nova_compute_relations.py rename to unit_tests/test_nova_compute_relations.py index b7b325cd..eda966f1 100644 --- a/tests/test_nova_compute_relations.py +++ b/unit_tests/test_nova_compute_relations.py @@ -1,6 +1,6 @@ from mock import call, patch, MagicMock -from tests.test_utils import CharmTestCase +from unit_tests.test_utils import CharmTestCase import hooks.nova_compute_utils as utils diff --git a/tests/test_nova_compute_utils.py b/unit_tests/test_nova_compute_utils.py similarity index 99% rename from tests/test_nova_compute_utils.py rename to unit_tests/test_nova_compute_utils.py index c2bfbab7..ac79ba06 100644 --- a/tests/test_nova_compute_utils.py +++ b/unit_tests/test_nova_compute_utils.py @@ -1,6 +1,6 @@ from mock import patch, MagicMock, call -from tests.test_utils import CharmTestCase, patch_open +from unit_tests.test_utils import CharmTestCase, patch_open import hooks.nova_compute_utils as utils diff --git a/tests/test_utils.py b/unit_tests/test_utils.py similarity index 100% rename from tests/test_utils.py rename to unit_tests/test_utils.py From 163fc562a11f7ba4407bf321509867f736859e49 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 1 Aug 2013 16:24:37 -0700 Subject: [PATCH 14/84] *_relations.py -> *_hooks.py --- ...ute_relations.py => nova_compute_hooks.py} | 0 ...elations.py => test_nova_compute_hooks.py} | 70 +++++++++---------- 2 files changed, 35 insertions(+), 35 deletions(-) rename hooks/{nova_compute_relations.py => nova_compute_hooks.py} (100%) rename unit_tests/{test_nova_compute_relations.py => test_nova_compute_hooks.py} (85%) diff --git a/hooks/nova_compute_relations.py b/hooks/nova_compute_hooks.py similarity index 100% rename from hooks/nova_compute_relations.py rename to hooks/nova_compute_hooks.py diff --git a/unit_tests/test_nova_compute_relations.py b/unit_tests/test_nova_compute_hooks.py similarity index 85% rename from unit_tests/test_nova_compute_relations.py rename to unit_tests/test_nova_compute_hooks.py index eda966f1..c261ae00 100644 --- a/unit_tests/test_nova_compute_relations.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -10,7 +10,7 @@ _map = utils.restart_map utils.register_configs = MagicMock() utils.restart_map = MagicMock() -import hooks.nova_compute_relations as relations +import hooks.nova_compute_hooks as hooks utils.register_configs = _reg utils.restart_map = _map @@ -57,7 +57,7 @@ def fake_filter(packages): class NovaComputeRelationsTests(CharmTestCase): def setUp(self): - super(NovaComputeRelationsTests, self).setUp(relations, + super(NovaComputeRelationsTests, self).setUp(hooks, TO_PATCH) self.config.side_effect = self.test_config.get self.filter_installed_packages.side_effect = fake_filter @@ -66,17 +66,17 @@ class NovaComputeRelationsTests(CharmTestCase): repo = 'cloud:precise-grizzly' self.test_config.set('openstack-origin', repo) self.determine_packages.return_value = ['foo', 'bar'] - relations.install() + hooks.install() self.configure_installation_source.assert_called_with(repo) self.assertTrue(self.apt_update.called) self.apt_install.assert_called_with(['foo', 'bar'], fatal=True) def test_config_changed_with_upgrade(self): self.openstack_upgrade_available.return_value = True - relations.config_changed() + hooks.config_changed() self.assertTrue(self.do_openstack_upgrade.called) - @patch.object(relations, 'compute_joined') + @patch.object(hooks, 'compute_joined') def test_config_changed_with_migration(self, compute_joined): self.migration_enabled.return_value = True self.test_config.set('migration-auth-type', 'ssh') @@ -84,30 +84,30 @@ class NovaComputeRelationsTests(CharmTestCase): 'cloud-compute:0', 'cloud-compute:1' ] - relations.config_changed() + hooks.config_changed() ex = [ call('cloud-compute:0'), call('cloud-compute:1'), ] self.assertEquals(ex, compute_joined.call_args_list) - @patch.object(relations, 'compute_joined') + @patch.object(hooks, 'compute_joined') def test_config_changed_no_upgrade_no_migration(self, compute_joined): self.openstack_upgrade_available.return_value = False self.migration_enabled.return_value = False - relations.config_changed() + hooks.config_changed() self.assertFalse(self.do_openstack_upgrade.called) self.assertFalse(compute_joined.called) def test_amqp_joined(self): - relations.amqp_joined() + hooks.amqp_joined() self.relation_set.assert_called_with(username='nova', vhost='nova') - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_amqp_changed_missing_relation_data(self, configs): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] - relations.amqp_changed() + hooks.amqp_changed() self.log.assert_called_with( 'amqp relation incomplete. Peer not ready?' ) @@ -117,15 +117,15 @@ class NovaComputeRelationsTests(CharmTestCase): configs.complete_contexts.return_value = ['amqp'] configs.write = MagicMock() self.quantum_enabled.return_value = quantum - relations.amqp_changed() + hooks.amqp_changed() - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_amqp_changed_with_data_no_quantum(self, configs): self._amqp_test(configs, quantum=False) self.assertEquals([call('/etc/nova/nova.conf')], configs.write.call_args_list) - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_amqp_changed_with_data_and_quantum(self, configs): self._amqp_test(configs, quantum=True) self.assertEquals([call('/etc/nova/nova.conf'), @@ -134,16 +134,16 @@ class NovaComputeRelationsTests(CharmTestCase): def test_db_joined(self): self.unit_get.return_value = 'nova.foohost.com' - relations.db_joined() + hooks.db_joined() self.relation_set.assert_called_with(database='nova', username='nova', hostname='nova.foohost.com') self.unit_get.assert_called_with('private-address') - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_db_changed_missing_relation_data(self, configs): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] - relations.db_changed() + hooks.db_changed() self.log.assert_called_with( 'shared-db relation incomplete. Peer not ready?' ) @@ -153,54 +153,54 @@ class NovaComputeRelationsTests(CharmTestCase): configs.complete_contexts.return_value = ['shared-db'] configs.write = MagicMock() self.quantum_enabled.return_value = quantum - relations.db_changed() + hooks.db_changed() - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_db_changed_with_data_no_quantum(self, configs): self._shared_db_test(configs, quantum=False) self.assertEquals([call('/etc/nova/nova.conf')], configs.write.call_args_list) - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_db_changed_with_data_and_quantum(self, configs): self.quantum_attribute.return_value = '/etc/quantum/plugin.conf' self._shared_db_test(configs, quantum=True) ex = [call('/etc/nova/nova.conf'), call('/etc/quantum/plugin.conf')] self.assertEquals(ex, configs.write.call_args_list) - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_image_service_missing_relation_data(self, configs): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] - relations.image_service_changed() + hooks.image_service_changed() self.log.assert_called_with( 'image-service relation incomplete. Peer not ready?' ) - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_image_service_with_relation_data(self, configs): configs.complete_contexts = MagicMock() configs.write = MagicMock() configs.complete_contexts.return_value = ['image-service'] - relations.image_service_changed() + hooks.image_service_changed() configs.write.assert_called_with('/etc/nova/nova.conf') def test_compute_joined_no_migration(self): self.migration_enabled.return_value = False - relations.compute_joined() + hooks.compute_joined() self.assertFalse(self.relation_set.called) def test_compute_joined_with_ssh_migration(self): self.migration_enabled.return_value = True self.test_config.set('migration-auth-type', 'ssh') self.public_ssh_key.return_value = 'foo' - relations.compute_joined() + hooks.compute_joined() self.relation_set.assert_called_with( relation_id=None, ssh_public_key='foo', migration_auth_type='ssh' ) - relations.compute_joined(rid='cloud-compute:2') + hooks.compute_joined(rid='cloud-compute:2') self.relation_set.assert_called_with( relation_id='cloud-compute:2', ssh_public_key='foo', @@ -208,7 +208,7 @@ class NovaComputeRelationsTests(CharmTestCase): ) def test_compute_changed(self): - relations.compute_changed() + hooks.compute_changed() expected_funcs = [ self.import_authorized_keys, self.import_keystone_ca_cert, @@ -220,36 +220,36 @@ class NovaComputeRelationsTests(CharmTestCase): @patch('os.path.isdir') def test_ceph_joined(self, isdir, mkdir): isdir.return_value = False - relations.ceph_joined() + hooks.ceph_joined() mkdir.assert_called_with('/etc/ceph') self.apt_install.assert_called_with(['ceph-common']) - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_ceph_changed_missing_relation_data(self, configs): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = [] - relations.ceph_changed() + hooks.ceph_changed() self.log.assert_called_with( 'ceph relation incomplete. Peer not ready?' ) - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_ceph_changed_no_keyring(self, configs): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = ['ceph'] self.ensure_ceph_keyring.return_value = False - relations.ceph_changed() + hooks.ceph_changed() self.log.assert_called_with( 'Could not create ceph keyring: peer not ready?' ) - @patch.object(relations, 'CONFIGS') + @patch.object(hooks, 'CONFIGS') def test_ceph_changed_with_key_and_relation_data(self, configs): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = ['ceph'] configs.write = MagicMock() self.ensure_ceph_keyring.return_value = True - relations.ceph_changed() + hooks.ceph_changed() ex = [ call('/etc/ceph/ceph.conf'), call('/etc/ceph/secret.xml'), From c057a9d916087ed5937a0cbf93072d6a06615752 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 1 Aug 2013 16:34:15 -0700 Subject: [PATCH 15/84] Update symlinks. --- hooks/amqp-relation-changed | 2 +- hooks/amqp-relation-joined | 2 +- hooks/ceph-relation-changed | 2 +- hooks/ceph-relation-joined | 2 +- hooks/cloud-compute-relation-changed | 2 +- hooks/cloud-compute-relation-joined | 2 +- hooks/config-changed | 2 +- hooks/image-service-relation-changed | 2 +- hooks/install | 2 +- hooks/shared-db-relation-changed | 2 +- hooks/shared-db-relation-joined | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/hooks/amqp-relation-changed b/hooks/amqp-relation-changed index 6eb6593e..3ba0bdea 120000 --- a/hooks/amqp-relation-changed +++ b/hooks/amqp-relation-changed @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/amqp-relation-joined b/hooks/amqp-relation-joined index 6eb6593e..3ba0bdea 120000 --- a/hooks/amqp-relation-joined +++ b/hooks/amqp-relation-joined @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/ceph-relation-changed b/hooks/ceph-relation-changed index 6eb6593e..3ba0bdea 120000 --- a/hooks/ceph-relation-changed +++ b/hooks/ceph-relation-changed @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/ceph-relation-joined b/hooks/ceph-relation-joined index 6eb6593e..3ba0bdea 120000 --- a/hooks/ceph-relation-joined +++ b/hooks/ceph-relation-joined @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/cloud-compute-relation-changed b/hooks/cloud-compute-relation-changed index 6eb6593e..3ba0bdea 120000 --- a/hooks/cloud-compute-relation-changed +++ b/hooks/cloud-compute-relation-changed @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/cloud-compute-relation-joined b/hooks/cloud-compute-relation-joined index 6eb6593e..3ba0bdea 120000 --- a/hooks/cloud-compute-relation-joined +++ b/hooks/cloud-compute-relation-joined @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/config-changed b/hooks/config-changed index 6eb6593e..3ba0bdea 120000 --- a/hooks/config-changed +++ b/hooks/config-changed @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/image-service-relation-changed b/hooks/image-service-relation-changed index 6eb6593e..3ba0bdea 120000 --- a/hooks/image-service-relation-changed +++ b/hooks/image-service-relation-changed @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/install b/hooks/install index 6eb6593e..3ba0bdea 120000 --- a/hooks/install +++ b/hooks/install @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/shared-db-relation-changed b/hooks/shared-db-relation-changed index 6eb6593e..3ba0bdea 120000 --- a/hooks/shared-db-relation-changed +++ b/hooks/shared-db-relation-changed @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/shared-db-relation-joined b/hooks/shared-db-relation-joined index 6eb6593e..3ba0bdea 120000 --- a/hooks/shared-db-relation-joined +++ b/hooks/shared-db-relation-joined @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file From de8464d021d3f278d870b221c59b62ae1c2db935 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 1 Aug 2013 16:37:19 -0700 Subject: [PATCH 16/84] Ensure all apt_install calls are fatal. --- hooks/nova_compute_context.py | 4 ++-- hooks/nova_compute_hooks.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 0d30efb1..9f79555e 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -94,7 +94,7 @@ class CloudComputeContext(context.OSContextGenerator): '''Install but do not upgrade required packages''' required = filter_installed_packages(packages) if required: - apt_install(required) + apt_install(required, fatal=True) def flat_dhcp_context(self): ec2_host = relation_get('ec2_host') @@ -206,7 +206,7 @@ class QuantumPluginContext(context.OSContextGenerator): '''Install but do not upgrade required plugin packages''' required = filter_installed_packages(packages) if required: - apt_install(required) + apt_install(required, fatal=True) def ovs_context(self): q_driver = 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\ diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index b44d6430..5637fe9b 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -143,7 +143,7 @@ def compute_changed(): def ceph_joined(): if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') - apt_install(filter_installed_packages(['ceph-common'])) + apt_install(filter_installed_packages(['ceph-common']), fatal=True) @hooks.hook('ceph-relation-changed') From 7dada25be461d5e7b568a13664bc4a6afb44cd5c Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 1 Aug 2013 16:38:05 -0700 Subject: [PATCH 17/84] Update tests. --- unit_tests/test_nova_compute_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unit_tests/test_nova_compute_hooks.py b/unit_tests/test_nova_compute_hooks.py index c261ae00..c8c0339d 100644 --- a/unit_tests/test_nova_compute_hooks.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -222,7 +222,7 @@ class NovaComputeRelationsTests(CharmTestCase): isdir.return_value = False hooks.ceph_joined() mkdir.assert_called_with('/etc/ceph') - self.apt_install.assert_called_with(['ceph-common']) + self.apt_install.assert_called_with(['ceph-common'], fatal=True) @patch.object(hooks, 'CONFIGS') def test_ceph_changed_missing_relation_data(self, configs): From 2cfc844c1497a7386633dcd7dac8da785dedf0de Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 1 Aug 2013 18:27:00 -0700 Subject: [PATCH 18/84] Add start/stop links. --- hooks/start | 2 +- hooks/stop | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/start b/hooks/start index 6eb6593e..3ba0bdea 120000 --- a/hooks/start +++ b/hooks/start @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/stop b/hooks/stop index 6eb6593e..3ba0bdea 120000 --- a/hooks/stop +++ b/hooks/stop @@ -1 +1 @@ -nova_compute_relations.py \ No newline at end of file +nova_compute_hooks.py \ No newline at end of file From 548c734cf86b7744e02e6b512e80901db7ee7193 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 12 Aug 2013 14:48:24 -0700 Subject: [PATCH 19/84] neutron compat. --- .../charmhelpers/contrib/hahelpers/cluster.py | 3 +- .../charmhelpers/contrib/openstack/context.py | 132 ++++++++++++++++-- .../contrib/openstack/templates/ceph.conf | 4 +- .../contrib/openstack/templates/haproxy.cfg | 6 +- .../templates/openstack_https_frontend | 8 +- .../contrib/openstack/templating.py | 21 ++- hooks/charmhelpers/contrib/openstack/utils.py | 19 +++ .../contrib/storage/linux/loopback.py | 7 +- hooks/charmhelpers/core/host.py | 14 ++ hooks/nova_compute_context.py | 80 +++-------- hooks/nova_compute_hooks.py | 18 ++- hooks/nova_compute_utils.py | 87 ++++++------ revision | 2 +- 13 files changed, 264 insertions(+), 137 deletions(-) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index dde6c9bb..81abac31 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -19,6 +19,7 @@ from charmhelpers.core.hookenv import ( config as config_get, INFO, ERROR, + unit_get, ) @@ -176,5 +177,5 @@ def canonical_url(configs, vip_setting='vip'): if is_clustered(): addr = config_get(vip_setting) else: - addr = get_unit_hostname() + addr = unit_get('private-address') return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index bc5991a1..2be15c54 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -6,6 +6,12 @@ from subprocess import ( check_call ) + +from charmhelpers.core.host import ( + apt_install, + filter_installed_packages, +) + from charmhelpers.core.hookenv import ( config, local_unit, @@ -14,6 +20,7 @@ from charmhelpers.core.hookenv import ( relation_ids, related_units, unit_get, + unit_private_ip, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -29,6 +36,11 @@ from charmhelpers.contrib.hahelpers.apache import ( get_ca_cert, ) +from charmhelpers.contrib.openstack.neutron import ( + network_manager, + neutron_plugin_attribute, +) + CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -57,26 +69,39 @@ class OSContextGenerator(object): class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] + def __init__(self, database=None, user=None, relation_prefix=None): + ''' + Allows inspecting relation for settings prefixed with relation_prefix. + This is useful for parsing access for multiple databases returned via + the shared-db interface (eg, nova_password, quantum_password) + ''' + self.relation_prefix = relation_prefix + self.database = database + self.user = user + def __call__(self): - log('Generating template context for shared-db') - conf = config() - try: - database = conf['database'] - username = conf['database-user'] - except KeyError as e: + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: log('Could not generate shared_db context. ' - 'Missing required charm config options: %s.' % e) + 'Missing required charm config options. ' + '(database name and user)') raise OSContextError ctxt = {} + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + for rid in relation_ids('shared-db'): for unit in related_units(rid): + passwd = relation_get(password_setting, rid=rid, unit=unit) ctxt = { 'database_host': relation_get('db_host', rid=rid, unit=unit), - 'database': database, - 'database_user': username, - 'database_password': relation_get('password', rid=rid, - unit=unit) + 'database': self.database, + 'database_user': self.user, + 'database_password': passwd, } if not context_complete(ctxt): return {} @@ -153,7 +178,7 @@ class CephContext(OSContextGenerator): def __call__(self): '''This generates context for /etc/ceph/ceph.conf templates''' - log('Generating template context for ceph') + log('Generating tmeplate context for ceph') mon_hosts = [] auth = None for rid in relation_ids('ceph'): @@ -207,7 +232,7 @@ class HAProxyContext(OSContextGenerator): class ImageServiceContext(OSContextGenerator): - interfaces = ['image-service'] + interfaces = ['image-servce'] def __call__(self): ''' @@ -292,3 +317,84 @@ class ApacheSSLContext(OSContextGenerator): portmap = (int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) return ctxt + + +class NeutronContext(object): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute(self.plugin, 'packages') + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + '''Install but do not upgrade required plugin packages''' + required = filter_installed_packages(self.packages) + if required: + apt_install(required, fatal=True) + + def _save_flag_file(self): + if self.network_manager == 'quantum': + _file = '/etc/nova/quantum_plugin.conf' + else: + _file = '/etc/nova/neutron_plugin.conf' + with open(_file, 'wb') as out: + out.write(self.plugin + '\n') + + def ovs_ctxt(self): + ovs_ctxt = { + 'neutron_plugin': 'ovs', + # quantum.conf + 'core_plugin': neutron_plugin_attribute(self.plugin, 'driver'), + # NOTE: network api class in template for each release. + # nova.conf + #'libvirt_vif_driver': n_driver, + #'libvirt_use_virtio_for_bridges': True, + # ovs config + 'local_ip': unit_private_ip(), + } + + if self.neutron_security_groups: + ovs_ctxt['neutron_security_groups'] = True + + fw_driver = ('%s.agent.linux.iptables_firewall.' + 'OVSHybridIptablesFirewallDriver' % + self.network_manager) + + ovs_ctxt.update({ + # IN TEMPLATE: + # - security_group_api=quantum in nova.conf for >= g + # nova_firewall_driver=nova.virt.firewall.NoopFirewallDriver' + 'neutron_firewall_driver': fw_driver, + }) + + return ovs_ctxt + + def __call__(self): + + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + self._ensure_packages() + + ctxt = {'network_manager': self.network_manager} + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + + self._save_flag_file() + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf index 1d8ca3b4..49d07c80 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -3,9 +3,9 @@ # cinder configuration file maintained by Juju # local changes may be overwritten. ############################################################################### -{% if auth %} +{% if auth -%} [global] auth_supported = {{ auth }} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} -{% endif %} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index b184cd4a..a1694e44 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -25,7 +25,7 @@ listen stats :8888 stats uri / stats auth admin:password -{% if units %} +{% if units -%} {% for service, ports in service_ports.iteritems() -%} listen {{ service }} 0.0.0.0:{{ ports[0] }} balance roundrobin @@ -33,5 +33,5 @@ listen {{ service }} 0.0.0.0:{{ ports[0] }} {% for unit, address in units.iteritems() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} -{% endfor %} -{% endif %} +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index e833a715..e02dc751 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -1,5 +1,5 @@ -{% if endpoints %} -{% for ext, int in endpoints %} +{% if endpoints -%} +{% for ext, int in endpoints -%} Listen {{ ext }} NameVirtualHost *:{{ ext }} @@ -19,5 +19,5 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all -{% endfor %} -{% endif %} +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index c555cc6e..4b1f207d 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -11,10 +11,10 @@ from charmhelpers.core.hookenv import ( from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: - from jinja2 import FileSystemLoader, ChoiceLoader, Environment + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: # python-jinja2 may not be installed yet, or we're running unittests. - FileSystemLoader = ChoiceLoader = Environment = None + FileSystemLoader = ChoiceLoader = Environment = exceptions = None class OSConfigException(Exception): @@ -220,9 +220,24 @@ class OSConfigRenderer(object): log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException ctxt = self.templates[config_file].context() + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking for it + # using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from %s by %s or %s.' % + (self.templates_dir, os.path.basename(config_file), _tmpl), + level=ERROR) + raise e + log('Rendering from template: %s' % _tmpl, level=INFO) - template = self._get_template(_tmpl) return template.render(ctxt) def write(self, config_file): diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 5da85b36..677fa1dd 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -163,6 +163,25 @@ def get_os_version_package(pkg, fatal=True): #error_out(e) +os_rel = None + + +def os_release(package, base='essex'): + ''' + Returns OpenStack release codename from a cached global. + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + ''' + global os_rel + if os_rel: + return os_rel + os_rel = (get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config('openstack-origin')) or + base) + return os_rel + + def import_key(keyid): cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ "--recv-keys %s" % keyid diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index 9fb87a2e..38957ef0 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -35,8 +35,11 @@ def create_loopback(file_path): :returns: str: Full path to new loopback device (eg, /dev/loop0) ''' - cmd = ['losetup', '--find', file_path] - return check_output(cmd).strip() + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in loopback_devices().iteritems(): + if f == file_path: + return d def ensure_loopback_device(path, size): diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index fee4216b..4426d009 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -9,6 +9,8 @@ import apt_pkg import os import pwd import grp +import random +import string import subprocess import hashlib @@ -267,3 +269,15 @@ def lsb_release(): k, v = l.split('=') d[k.strip()] = v.strip() return d + + +def pwgen(length=None): + '''Generate a random pasword.''' + if length is None: + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + random_chars = [ + random.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 9f79555e..79cc5956 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -8,12 +8,11 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, service_name, - unit_private_ip, ERROR, WARNING, ) -from charmhelpers.contrib.openstack.utils import get_os_codename_package +from charmhelpers.contrib.openstack.utils import os_release # This is just a label and it must be consistent across @@ -142,7 +141,7 @@ class CloudComputeContext(context.OSContextGenerator): if vol_service == 'cinder': vol_ctxt['volume_api_class'] = 'nova.volume.cinder.API' elif vol_service == 'nova-volume': - if get_os_codename_package('nova-common') in ['essex', 'folsom']: + if os_release('nova-common') in ['essex', 'folsom']: vol_ctxt['volume_api_class'] = 'nova.volume.api.API' else: log('Invalid volume service received via cloud-compute: %s' % @@ -190,7 +189,7 @@ class OSConfigFlagContext(context.OSContextGenerator): flags = {} for flag in config_flags: if '=' not in flag: - log('Impoperly formatted config-flag, expected k=v ' + log('Improperly formatted config-flag, expected k=v ' ' got %s' % flag, level=WARNING) continue k, v = flag.split('=') @@ -199,65 +198,32 @@ class OSConfigFlagContext(context.OSContextGenerator): return ctxt -class QuantumPluginContext(context.OSContextGenerator): +class NeutronComputeContext(context.NeutronContext): interfaces = [] - def _ensure_packages(self, packages): - '''Install but do not upgrade required plugin packages''' - required = filter_installed_packages(packages) - if required: - apt_install(required, fatal=True) + @property + def plugin(self): + from nova_compute_utils import neutron_plugin + return neutron_plugin() - def ovs_context(self): - q_driver = 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\ - 'OVSQuantumPluginV2' - q_fw_driver = 'quantum.agent.linux.iptables_firewall.'\ - 'OVSHybridIptablesFirewallDriver' + @property + def network_manager(self): + from nova_compute_utils import network_manager as manager + return manager() - if get_os_codename_package('nova-common') in ['essex', 'folsom']: + @property + def neutron_security_groups(self): + groups = [relation_get('neutron_security_groups'), + relation_get('quantum_security_groups')] + return ('yes' in groups or 'Yes' in groups) + + def ovs_ctxt(self): + ctxt = super(NeutronComputeContext, self).ovs_ctxt() + if os_release('nova-common') == 'folsom': n_driver = 'nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver' else: n_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver' - n_fw_driver = 'nova.virt.firewall.NoopFirewallDriver' - - ovs_ctxt = { - 'quantum_plugin': 'ovs', - # quantum.conf - 'core_plugin': q_driver, - # nova.conf + ctxt.update({ 'libvirt_vif_driver': n_driver, - 'libvirt_use_virtio_for_bridges': True, - # ovs config - 'tenant_network_type': 'gre', - 'enable_tunneling': True, - 'tunnel_id_ranges': '1:1000', - 'local_ip': unit_private_ip(), - } - - q_sec_groups = relation_get('quantum_security_groups') - if q_sec_groups and q_sec_groups.lower() == 'yes': - ovs_ctxt['quantum_security_groups'] = True - # nova.conf - ovs_ctxt['nova_firewall_driver'] = n_fw_driver - # ovs conf - ovs_ctxt['ovs_firewall_driver'] = q_fw_driver - - return ovs_ctxt - - def __call__(self): - from nova_compute_utils import quantum_attribute - - plugin = relation_get('quantum_plugin') - if not plugin: - return {} - - self._ensure_packages(quantum_attribute(plugin, 'packages')) - - ctxt = {} - - if plugin == 'ovs': - ctxt.update(self.ovs_context()) - - _save_flag_file(path='/etc/nova/quantum_plugin.conf', data=plugin) - + }) return ctxt diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 5637fe9b..19494ee9 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -26,16 +26,17 @@ from charmhelpers.contrib.openstack.utils import ( openstack_upgrade_available, ) +from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute + from nova_compute_utils import ( determine_packages, import_authorized_keys, import_keystone_ca_cert, initialize_ssh_keys, migration_enabled, + network_manager, + neutron_plugin, do_openstack_upgrade, - quantum_attribute, - quantum_enabled, - quantum_plugin, public_ssh_key, restart_map, register_configs, @@ -84,8 +85,10 @@ def amqp_changed(): log('amqp relation incomplete. Peer not ready?') return CONFIGS.write('/etc/nova/nova.conf') - if quantum_enabled(): + if network_manager() == 'quantum': CONFIGS.write('/etc/quantum/quantum.conf') + if network_manager() == 'neutron': + CONFIGS.write('/etc/neutron/neutron.conf') @hooks.hook('shared-db-relation-joined') @@ -101,9 +104,10 @@ def db_changed(): log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write('/etc/nova/nova.conf') - if quantum_enabled(): - plugin = quantum_plugin() - CONFIGS.write(quantum_attribute(plugin, 'config')) + nm = network_manager() + if nm in ['quantum', 'neutron']: + plugin = neutron_plugin() + CONFIGS.write(neutron_plugin_attribute(plugin, 'config', nm)) @hooks.hook('image-service-relation-changed') diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 5ee0da7f..455dd165 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -11,7 +11,12 @@ from charmhelpers.core.hookenv import ( related_units, relation_ids, relation_get, - ERROR, +) + +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, + neutron_plugins, + quantum_plugins, ) from charmhelpers.contrib.openstack.utils import get_os_codename_package @@ -22,7 +27,7 @@ from nova_compute_context import ( NovaComputeLibvirtContext, NovaComputeCephContext, OSConfigFlagContext, - QuantumPluginContext, + NeutronComputeContext, ) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -55,7 +60,7 @@ BASE_RESOURCE_MAP = { CloudComputeContext(), NovaComputeCephContext(), OSConfigFlagContext(), - QuantumPluginContext()] + NeutronComputeContext()] }, } @@ -77,22 +82,14 @@ QUANTUM_RESOURCES = { } } -QUANTUM_PLUGINS = { - 'ovs': { - 'config': '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini', - 'contexts': [context.SharedDBContext(), - QuantumPluginContext()], - 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': ['quantum-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms'], - }, - 'nvp': { - 'config': '/etc/quantum/plugins/nicira/nvp.ini', - 'services': [], - 'packages': ['quantum-plugin-nicira'], +NEUTRON_RESOURCES = { + '/etc/neutron/neutron.conf': { + 'services': ['neutron-server'], + 'contexts': [context.AMQPContext()], } } + # Maps virt-type config to a compute package(s). VIRT_TYPES = { 'kvm': ['nova-compute-kvm'], @@ -112,18 +109,24 @@ def resource_map(): resource_map = deepcopy(BASE_RESOURCE_MAP) net_manager = network_manager() - if (net_manager in ['FlatManager', 'FlatDHCPManager'] and + if (net_manager in ['flatmanager', 'flatdhcpmanager'] and config('multi-host').lower() == 'yes'): resource_map['/etc/nova/nova.conf']['services'].extend( ['nova-api', 'nova-network'] ) - elif net_manager == 'Quantum': - plugin = quantum_plugin() - resource_map.update(QUANTUM_RESOURCES) + + if net_manager in ['neutron', 'quantum']: + if net_manager == 'quantum': + resource_map.update(quantum_plugins()) + if net_manager == 'neutron': + resource_map.update(neutron_plugins()) + + plugin = neutron_plugin() if plugin: - conf = quantum_attribute(plugin, 'config') - svcs = quantum_attribute(plugin, 'services') - ctxts = quantum_attribute(plugin, 'contexts') or [] + conf = neutron_plugin_attribute(plugin, 'config', net_manager) + svcs = neutron_plugin_attribute(plugin, 'services', net_manager) + ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager) + or []) resource_map[conf] = {} resource_map[conf]['services'] = svcs resource_map[conf]['contexts'] = ctxts @@ -147,8 +150,11 @@ def register_configs(): Returns an OSTemplateRenderer object with all required configs registered. ''' _resource_map = resource_map() - if quantum_enabled(): + net_manager = network_manager() + if net_manager == 'quantum': _resource_map.update(QUANTUM_RESOURCES) + elif net_manager == 'neutron': + _resource_map.update(NEUTRON_RESOURCES) release = get_os_codename_package('nova-common', fatal=False) or 'essex' configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, @@ -163,12 +169,13 @@ def determine_packages(): packages = [] + BASE_PACKAGES net_manager = network_manager() - if (net_manager in ['FlatManager', 'FlatDHCPManager'] and + if (net_manager in ['flatmanager', 'flatdhcpmanager'] and config('multi-host').lower() == 'yes'): packages.extend(['nova-api', 'nova-network']) - elif net_manager == 'Quantum': - plugin = quantum_plugin() - packages.extend(quantum_attribute(plugin, 'packages')) + elif net_manager == 'quantum': + plugin = neutron_plugin() + packages.extend( + neutron_plugin_attribute(plugin, 'packages', net_manager)) if relation_ids('ceph'): packages.append('ceph-common') @@ -200,7 +207,7 @@ def _network_config(): Obtain all relevant network configuration settings from nova-c-c via cloud-compute interface. ''' - settings = ['network_manager', 'quantum_plugin'] + settings = ['network_manager', 'neutron_plugin', 'quantum_plugin'] net_config = {} for rid in relation_ids('cloud-compute'): for unit in related_units(rid): @@ -211,24 +218,16 @@ def _network_config(): return net_config -def quantum_plugin(): - return _network_config().get('quantum_plugin') +def neutron_plugin(): + return (_network_config().get('quantum_plugin') or + _network_config().get('quantum_plugin')) def network_manager(): - return _network_config().get('network_manager') - - -def quantum_attribute(plugin, attr): - try: - _plugin = QUANTUM_PLUGINS[plugin] - except KeyError: - log('Unrecognised plugin for quantum: %s' % plugin, level=ERROR) - raise - try: - return _plugin[attr] - except KeyError: - return None + manager = _network_config().get('network_manager') + if manager: + manager = manager.lower() + return manager def public_ssh_key(user='root'): diff --git a/revision b/revision index bc6298e8..58c9bdf9 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -110 +111 From 302a10759ff3390759fd023e015155c3d7d73248 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 12 Aug 2013 15:20:15 -0700 Subject: [PATCH 20/84] Appropriately update resource_map in presence of quantum. --- hooks/nova_compute_utils.py | 23 ++++++----------------- revision | 2 +- 2 files changed, 7 insertions(+), 18 deletions(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 455dd165..4333e026 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -13,13 +13,9 @@ from charmhelpers.core.hookenv import ( relation_get, ) -from charmhelpers.contrib.openstack.neutron import ( - neutron_plugin_attribute, - neutron_plugins, - quantum_plugins, -) +from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute -from charmhelpers.contrib.openstack.utils import get_os_codename_package +from charmhelpers.contrib.openstack.utils import os_release from charmhelpers.contrib.openstack import templating, context from nova_compute_context import ( @@ -117,9 +113,9 @@ def resource_map(): if net_manager in ['neutron', 'quantum']: if net_manager == 'quantum': - resource_map.update(quantum_plugins()) + resource_map.update(QUANTUM_RESOURCES) if net_manager == 'neutron': - resource_map.update(neutron_plugins()) + resource_map.update(NEUTRON_RESOURCES) plugin = neutron_plugin() if plugin: @@ -149,18 +145,11 @@ def register_configs(): ''' Returns an OSTemplateRenderer object with all required configs registered. ''' - _resource_map = resource_map() - net_manager = network_manager() - if net_manager == 'quantum': - _resource_map.update(QUANTUM_RESOURCES) - elif net_manager == 'neutron': - _resource_map.update(NEUTRON_RESOURCES) - - release = get_os_codename_package('nova-common', fatal=False) or 'essex' + release = os_release('nova-common') configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) - for cfg, d in _resource_map.iteritems(): + for cfg, d in resource_map().iteritems(): configs.register(cfg, d['contexts']) return configs diff --git a/revision b/revision index 58c9bdf9..4699eb3c 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -111 +116 From e837b0b16b30ccee5a5a93f79110924df9da1eb8 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 12 Aug 2013 15:20:50 -0700 Subject: [PATCH 21/84] checkin neutron helper. --- .../charmhelpers/contrib/openstack/neutron.py | 109 ++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 hooks/charmhelpers/contrib/openstack/neutron.py diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 00000000..53990a08 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,109 @@ +# Various utilies for dealing with Neutron and the renaming from Quantum. + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import os_release + + +# legacy +def quantum_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [ + context.NeutronContext(), + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron')], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': ['quantum-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'services': [], + 'packages': ['quantum-plugin-nicira'], + } + } + + +def neutron_plugins(): + from charmhelpers.contrib.openstack import context + return { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron')], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': ['neutron-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'services': [], + 'packages': ['neutron-plugin-nicira'], + } + } + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log('Error: Network manager does not support plugins.') + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' From 1d510e26c24a83fbcd6bd914809526b9488bad04 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 12 Aug 2013 15:24:17 -0700 Subject: [PATCH 22/84] helpers sync. --- hooks/charmhelpers/contrib/openstack/context.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 2be15c54..3738a212 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -37,7 +37,6 @@ from charmhelpers.contrib.hahelpers.apache import ( ) from charmhelpers.contrib.openstack.neutron import ( - network_manager, neutron_plugin_attribute, ) @@ -332,7 +331,8 @@ class NeutronContext(object): @property def packages(self): - return neutron_plugin_attribute(self.plugin, 'packages') + return neutron_plugin_attribute( + self.plugin, 'packages', self.network_manager) @property def neutron_security_groups(self): @@ -353,10 +353,13 @@ class NeutronContext(object): out.write(self.plugin + '\n') def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager), + ovs_ctxt = { 'neutron_plugin': 'ovs', # quantum.conf - 'core_plugin': neutron_plugin_attribute(self.plugin, 'driver'), + 'core_plugin': driver, # NOTE: network api class in template for each release. # nova.conf #'libvirt_vif_driver': n_driver, From cd8b2fe25dc0d0af8245c656091786ceea9e5849 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 13 Aug 2013 15:03:53 -0700 Subject: [PATCH 23/84] Better compat for quantum+neutron. --- config.yaml | 8 ++ hooks/nova_compute_context.py | 157 +++++++++++++++++++++++----------- hooks/nova_compute_hooks.py | 5 ++ hooks/nova_compute_utils.py | 12 ++- revision | 2 +- 5 files changed, 130 insertions(+), 54 deletions(-) diff --git a/config.yaml b/config.yaml index 772b3059..308c24e0 100644 --- a/config.yaml +++ b/config.yaml @@ -34,6 +34,14 @@ options: default: nova type: string description: Database name + neutron-database-user: + default: neutron + type: string + description: Username for Neutron database access (if enabled) + neutron-database: + default: neutron + type: string + description: Database name for Neutron (if enabled) virt-type: default: kvm type: string diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 79cc5956..c6cff692 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -25,10 +25,33 @@ def _save_flag_file(path, data): Saves local state about plugin or manager to specified file. ''' # Wonder if we can move away from this now? + if data is None: + return with open(path, 'wb') as out: out.write(data) +# compatability functions to help with quantum -> neutron transition +def _network_manager(): + from nova_compute_utils import network_manager as manager + return manager() + + +def _neutron_security_groups(): + groups = [relation_get('neutron_security_groups'), + relation_get('quantum_security_groups')] + return ('yes' in groups or 'Yes' in groups) + + +def _neutron_plugin(): + from nova_compute_utils import neutron_plugin + return neutron_plugin() + + +def _neutron_url(): + return relation_get('neutron_url') or relation_get('quantum_url') + + class NovaComputeLibvirtContext(context.OSContextGenerator): ''' Determines various libvirt options depending on live migration @@ -95,6 +118,14 @@ class CloudComputeContext(context.OSContextGenerator): if required: apt_install(required, fatal=True) + @property + def network_manager(self): + return _network_manager() + + @property + def volume_service(self): + return relation_get('volume_service') + def flat_dhcp_context(self): ec2_host = relation_get('ec2_host') if not ec2_host: @@ -104,50 +135,92 @@ class CloudComputeContext(context.OSContextGenerator): self._ensure_packages(['nova-api', 'nova-network']) return { - 'network_manager': 'nova.network.manager.FlatDHCPManager', 'flat_interface': config('flat-interface'), 'ec2_dmz_host': ec2_host, } - def quantum_context(self): - quantum_ctxt = { - 'quantum_auth_strategy': 'keystone', - 'keystone_host': relation_get('keystone_host'), + def neutron_context(self): + # generate config context for neutron or quantum. these get converted + # directly into flags in nova.conf + # NOTE: Its up to release templates to set correct driver + def _legacy_quantum(ctxt): + renamed = {} + for k, v in ctxt.iteritems(): + k = k.replace('neutron', 'quantum') + renamed[k] = v + return renamed + + neutron_ctxt = { + 'neutron_auth_strategy': 'keystone', + 'keystone_host': relation_get('auth_host'), 'auth_port': relation_get('auth_port'), - 'quantum_url': relation_get('quantum_url'), - 'quantum_admin_tenant_name': relation_get('service_tenant'), - 'quantum_admin_username': relation_get('service_username'), - 'quantum_admin_password': relation_get('service_password'), - 'quantum_security_groups': relation_get('quantum_security_groups'), - 'quantum_plugin': relation_get('quantum_plugin'), + 'neutron_admin_tenant_name': relation_get('service_tenant_name'), + 'neutron_admin_username': relation_get('service_username'), + 'neutron_admin_password': relation_get('service_password'), + 'neutron_plugin': _neutron_plugin(), + 'neutron_url': _neutron_url(), } - missing = [k for k, v in quantum_ctxt.iteritems() if v is None] + missing = [k for k, v in neutron_ctxt.iteritems() if v in ['', None]] if missing: log('Missing required relation settings for Quantum: ' + ' '.join(missing)) return {} - ks_url = 'http://%s:%s/v2.0' % (quantum_ctxt['keystone_host'], - quantum_ctxt['auth_port']) - quantum_ctxt['quantum_admin_auth_url'] = ks_url - quantum_ctxt['network_api_class'] = 'nova.network.quantumv2.api.API' - return quantum_ctxt + neutron_ctxt['neutron_security_groups'] = _neutron_security_groups() + + ks_url = 'http://%s:%s/v2.0' % (neutron_ctxt['keystone_host'], + neutron_ctxt['auth_port']) + neutron_ctxt['neutron_admin_auth_url'] = ks_url + + if self.network_manager == 'quantum': + return _legacy_quantum(neutron_ctxt) + + return neutron_ctxt def volume_context(self): + # provide basic validation that the volume manager is supported on the + # given openstack release (nova-volume is only supported for E and F) + # it is up to release templates to set the correct volume driver. + + os_rel = os_release('nova-common') vol_service = relation_get('volume_service') if not vol_service: return {} - vol_ctxt = {} + return vol_service + + # ensure volume service is supported on specific openstack release. if vol_service == 'cinder': - vol_ctxt['volume_api_class'] = 'nova.volume.cinder.API' + if os_rel == 'essex': + e = ('Attempting to configure cinder volume manager on' + 'unsupported OpenStack release (essex)') + log(e, level=ERROR) + raise context.OSContextError(e) + return 'cinder' elif vol_service == 'nova-volume': - if os_release('nova-common') in ['essex', 'folsom']: - vol_ctxt['volume_api_class'] = 'nova.volume.api.API' + if os_release('nova-common') not in ['essex', 'folsom']: + e = ('Attempting to configure nova-volume manager on' + 'unsupported OpenStack release (%s).' % os_rel) + log(e, level=ERROR) + raise context.OSContextError(e) + return 'nova-volume' else: - log('Invalid volume service received via cloud-compute: %s' % - vol_service, level=ERROR) - raise - return vol_ctxt + e = ('Invalid volume service received via cloud-compute: %s' % + vol_service) + log(e, level=ERROR) + raise context.OSContextError(e) + + def network_manager_context(self): + ctxt = {} + if self.network_manager == 'flatdhcpmanager': + ctxt = self.flat_dhcp_context() + elif self.network_manager in ['neutron', 'quantum']: + ctxt = self.neutron_context() + + _save_flag_file(path='/etc/nova/nm.conf', data=self.network_manager) + + log('Generated config context for %s network manager.' % + self.network_manager) + return ctxt def __call__(self): rids = relation_ids('cloud-compute') @@ -156,21 +229,14 @@ class CloudComputeContext(context.OSContextGenerator): ctxt = {} - net_manager = relation_get('network_manager') + net_manager = self.network_manager_context() if net_manager: - if net_manager.lower() == 'flatdhcpmanager': - ctxt.update({ - 'network_manager_config': self.flat_dhcp_context() - }) - elif net_manager.lower() == 'quantum': - ctxt.update({ - 'network_manager_config': self.quantum_context() - }) - _save_flag_file(path='/etc/nova/nm.conf', data=net_manager) + ctxt['network_manager'] = self.network_manager + ctxt['network_manager_config'] = net_manager vol_service = self.volume_context() if vol_service: - ctxt.update({'volume_service_config': vol_service}) + ctxt['volume_service'] = vol_service return ctxt @@ -203,27 +269,14 @@ class NeutronComputeContext(context.NeutronContext): @property def plugin(self): + return _neutron_plugin() from nova_compute_utils import neutron_plugin return neutron_plugin() @property def network_manager(self): - from nova_compute_utils import network_manager as manager - return manager() + return _network_manager() @property def neutron_security_groups(self): - groups = [relation_get('neutron_security_groups'), - relation_get('quantum_security_groups')] - return ('yes' in groups or 'Yes' in groups) - - def ovs_ctxt(self): - ctxt = super(NeutronComputeContext, self).ovs_ctxt() - if os_release('nova-common') == 'folsom': - n_driver = 'nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver' - else: - n_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver' - ctxt.update({ - 'libvirt_vif_driver': n_driver, - }) - return ctxt + return _neutron_security_groups() diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 19494ee9..c6c17af8 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -95,6 +95,11 @@ def amqp_changed(): def db_joined(): relation_set(database=config('database'), username=config('database-user'), hostname=unit_get('private-address')) + if network_manager() in ['quantum', 'neutron']: + # XXX: Renaming relations from quantum_* to neutron_* here. + relation_set(neutron_database=config('neutron-database'), + neutron_username=config('neutron-database-user'), + neutron_hostname=unit_get('private-address')) @hooks.hook('shared-db-relation-changed') diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 4333e026..ac24c5fd 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -213,9 +213,19 @@ def neutron_plugin(): def network_manager(): + ''' + Obtain the network manager advertised by nova-c-c, renaming to Quantum + if required + ''' manager = _network_config().get('network_manager') if manager: manager = manager.lower() + if manager not in ['quantum', 'neutron']: + return manager + if os_release('nova-common') in ['folsom', 'grizzly']: + return 'quantum' + else: + return 'neutron' return manager @@ -303,6 +313,6 @@ def import_keystone_ca_cert(): if not ca_cert: return log('Writing Keystone CA certificate to %s' % CA_CERT_PATH) - with open(CA_CERT_PATH) as out: + with open(CA_CERT_PATH, 'wb') as out: out.write(b64decode(ca_cert)) check_call(['update-ca-certificates']) diff --git a/revision b/revision index 4699eb3c..d136d6a7 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -116 +125 From 3d4abc282230ae9a392f68c043a73107a2720070 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 13 Aug 2013 15:07:37 -0700 Subject: [PATCH 24/84] Sync helpers. --- .../charmhelpers/contrib/openstack/context.py | 25 +++---------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 3738a212..4f5110d6 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -354,34 +354,15 @@ class NeutronContext(object): def ovs_ctxt(self): driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager), + self.network_manager) ovs_ctxt = { - 'neutron_plugin': 'ovs', - # quantum.conf 'core_plugin': driver, - # NOTE: network api class in template for each release. - # nova.conf - #'libvirt_vif_driver': n_driver, - #'libvirt_use_virtio_for_bridges': True, - # ovs config + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, 'local_ip': unit_private_ip(), } - if self.neutron_security_groups: - ovs_ctxt['neutron_security_groups'] = True - - fw_driver = ('%s.agent.linux.iptables_firewall.' - 'OVSHybridIptablesFirewallDriver' % - self.network_manager) - - ovs_ctxt.update({ - # IN TEMPLATE: - # - security_group_api=quantum in nova.conf for >= g - # nova_firewall_driver=nova.virt.firewall.NoopFirewallDriver' - 'neutron_firewall_driver': fw_driver, - }) - return ovs_ctxt def __call__(self): From 8ce70f712b056e8c4f0cacf738b7b9c96023ab05 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Wed, 14 Aug 2013 09:08:41 -0700 Subject: [PATCH 25/84] Associate quantum plugin with correct local context. --- hooks/charmhelpers/contrib/openstack/neutron.py | 1 - hooks/nova_compute_utils.py | 1 + revision | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 53990a08..37b5a7bd 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -19,7 +19,6 @@ def quantum_plugins(): 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' 'OVSQuantumPluginV2', 'contexts': [ - context.NeutronContext(), context.SharedDBContext(user=config('neutron-database-user'), database=config('neutron-database'), relation_prefix='neutron')], diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index ac24c5fd..309200b7 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -126,6 +126,7 @@ def resource_map(): resource_map[conf] = {} resource_map[conf]['services'] = svcs resource_map[conf]['contexts'] = ctxts + resource_map[conf]['contexts'].append(NeutronComputeContext()) if relation_ids('ceph'): resource_map.update(CEPH_RESOURCES) diff --git a/revision b/revision index d136d6a7..0a3e7b04 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -125 +126 From 4f109fe9612e85f2014d8478b7e4988337dbb5f1 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Wed, 14 Aug 2013 09:13:20 -0700 Subject: [PATCH 26/84] Drop OSConfigFlagContext, move to helpers. --- hooks/nova_compute_context.py | 22 ---------------------- hooks/nova_compute_utils.py | 3 +-- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index c6cff692..201f79ec 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -241,28 +241,6 @@ class CloudComputeContext(context.OSContextGenerator): return ctxt -class OSConfigFlagContext(context.OSContextGenerator): - ''' - Responsible adding user-defined config-flags in charm config to a - to a template context. - ''' - # this can be moved to charm-helpers? - def __call__(self): - config_flags = config('config-flags') - if not config_flags: - return {} - config_flags = config_flags.split(',') - flags = {} - for flag in config_flags: - if '=' not in flag: - log('Improperly formatted config-flag, expected k=v ' - ' got %s' % flag, level=WARNING) - continue - k, v = flag.split('=') - flags[k.strip()] = v - ctxt = {'user_config_flags': flags} - return ctxt - class NeutronComputeContext(context.NeutronContext): interfaces = [] diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 309200b7..d91d9f07 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -22,7 +22,6 @@ from nova_compute_context import ( CloudComputeContext, NovaComputeLibvirtContext, NovaComputeCephContext, - OSConfigFlagContext, NeutronComputeContext, ) @@ -53,9 +52,9 @@ BASE_RESOURCE_MAP = { 'contexts': [context.AMQPContext(), context.SharedDBContext(), context.ImageServiceContext(), + context.OSConfigFlagContext(), CloudComputeContext(), NovaComputeCephContext(), - OSConfigFlagContext(), NeutronComputeContext()] }, } From 5e3c2bc79330c4144e1a3d059c8b086960dbbeac Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Wed, 14 Aug 2013 09:13:38 -0700 Subject: [PATCH 27/84] Sync helpers. --- .../charmhelpers/contrib/hahelpers/cluster.py | 6 +++-- .../charmhelpers/contrib/openstack/context.py | 23 +++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 81abac31..074855f4 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -97,12 +97,14 @@ def https(): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): - if None not in [ + rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), relation_get('ssl_cert', rid=r_id, unit=unit), relation_get('ssl_key', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), - ]: + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): return True return False diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 4f5110d6..67fce55d 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -21,6 +21,7 @@ from charmhelpers.core.hookenv import ( related_units, unit_get, unit_private_ip, + WARNING, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -382,3 +383,25 @@ class NeutronContext(object): self._save_flag_file() return ctxt + + +class OSConfigFlagContext(OSContextGenerator): + ''' + Responsible adding user-defined config-flags in charm config to a + to a template context. + ''' + def __call__(self): + config_flags = config('config-flags') + if not config_flags: + return {} + config_flags = config_flags.split(',') + flags = {} + for flag in config_flags: + if '=' not in flag: + log('Improperly formatted config-flag, expected k=v ' + ' got %s' % flag, level=WARNING) + continue + k, v = flag.split('=') + flags[k.strip()] = v + ctxt = {'user_config_flags': flags} + return ctxt From 139b6600454a0ff4746bd64fa500ae80f926953a Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Wed, 14 Aug 2013 13:45:38 -0700 Subject: [PATCH 28/84] Update tests. --- hooks/nova_compute_context.py | 9 +- hooks/nova_compute_utils.py | 2 +- unit_tests/test_nova_compute_contexts.py | 172 ++++++++++++----------- unit_tests/test_nova_compute_hooks.py | 14 +- unit_tests/test_nova_compute_utils.py | 48 +++---- 5 files changed, 125 insertions(+), 120 deletions(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 201f79ec..5310feb8 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -186,20 +186,19 @@ class CloudComputeContext(context.OSContextGenerator): vol_service = relation_get('volume_service') if not vol_service: return {} - return vol_service # ensure volume service is supported on specific openstack release. if vol_service == 'cinder': if os_rel == 'essex': - e = ('Attempting to configure cinder volume manager on' - 'unsupported OpenStack release (essex)') + e = ('Attempting to configure cinder volume manager on ' + 'an unsupported OpenStack release (essex)') log(e, level=ERROR) raise context.OSContextError(e) return 'cinder' elif vol_service == 'nova-volume': if os_release('nova-common') not in ['essex', 'folsom']: - e = ('Attempting to configure nova-volume manager on' - 'unsupported OpenStack release (%s).' % os_rel) + e = ('Attempting to configure nova-volume manager on ' + 'an unsupported OpenStack release (%s).' % os_rel) log(e, level=ERROR) raise context.OSContextError(e) return 'nova-volume' diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index d91d9f07..d2e9fac6 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -174,8 +174,8 @@ def determine_packages(): packages.extend(VIRT_TYPES[virt_type]) except KeyError: log('Unsupported virt-type configured: %s' % virt_type) - raise + return packages diff --git a/unit_tests/test_nova_compute_contexts.py b/unit_tests/test_nova_compute_contexts.py index ec11003c..ecc1ce73 100644 --- a/unit_tests/test_nova_compute_contexts.py +++ b/unit_tests/test_nova_compute_contexts.py @@ -1,18 +1,19 @@ -from mock import MagicMock +from mock import MagicMock, patch from copy import deepcopy from unit_tests.test_utils import CharmTestCase +from charmhelpers.contrib.openstack.context import OSContextError + import hooks.nova_compute_context as context TO_PATCH = [ - 'get_os_codename_package', 'apt_install', 'filter_installed_packages', 'relation_ids', 'relation_get', 'config', - 'unit_private_ip', 'log', + 'os_release', '_save_flag_file', ] @@ -22,11 +23,12 @@ QUANTUM_CONTEXT = { 'keystone_host': 'keystone_host', 'auth_port': '5000', 'quantum_url': 'http://quantum_url', - 'service_tenant': 'admin', + 'service_tenant_name': 'admin', 'service_username': 'admin', 'service_password': 'openstack', 'quantum_security_groups': 'yes', 'quantum_plugin': 'ovs', + 'auth_host': 'keystone_host', } # Context for an OVS plugin contains at least the following. Other bits @@ -43,7 +45,7 @@ BASE_QUANTUM_OVS_PLUGIN_CONTEXT = { 'tenant_network_type': 'gre', 'tunnel_id_ranges': '1:1000', 'quantum_plugin': 'ovs', - 'quantum_security_groups': True, + 'quantum_security_groups': 'yes', } @@ -64,105 +66,107 @@ class NovaComputeContextTests(CharmTestCase): cloud_compute = context.CloudComputeContext() self.assertEquals({}, cloud_compute()) - def test_cloud_compute_volume_context_cinder(self): + @patch.object(context, '_network_manager') + def test_cloud_compute_volume_context_cinder(self, netman): + netman.return_value = None self.relation_ids.return_value = 'cloud-compute:0' cloud_compute = context.CloudComputeContext() self.test_relation.set({'volume_service': 'cinder'}) - result = cloud_compute() - ex_ctxt = { - 'volume_service_config': { - 'volume_api_class': 'nova.volume.cinder.API' - } - } - self.assertEquals(ex_ctxt, result) + self.assertEquals({'volume_service': 'cinder'}, cloud_compute()) - def test_cloud_compute_volume_context_nova_vol(self): + @patch.object(context, '_network_manager') + def test_cloud_compute_volume_context_nova_vol(self, netman): + netman.return_value = None self.relation_ids.return_value = 'cloud-compute:0' cloud_compute = context.CloudComputeContext() - self.get_os_codename_package.return_value = 'essex' + self.os_release.return_value = 'essex' self.test_relation.set({'volume_service': 'nova-volume'}) - result = cloud_compute() - ex_ctxt = { - 'volume_service_config': { - 'volume_api_class': 'nova.volume.api.API' - } - } - self.assertEquals(ex_ctxt, result) + self.assertEquals({'volume_service': 'nova-volume'}, cloud_compute()) - def test_cloud_compute_volume_context_nova_vol_unsupported(self): + @patch.object(context, '_network_manager') + def test_cloud_compute_volume_context_nova_vol_unsupported(self, netman): + self.skipTest('TODO') + netman.return_value = None self.relation_ids.return_value = 'cloud-compute:0' cloud_compute = context.CloudComputeContext() # n-vol doesn't exist in grizzly - self.get_os_codename_package.return_value = 'grizzly' + self.os_release.return_value = 'grizzly' self.test_relation.set({'volume_service': 'nova-volume'}) - result = cloud_compute() - self.assertEquals({}, result) + self.assertRaises(OSContextError, cloud_compute) - def test_cloud_compute_flatdhcp_context(self): + @patch.object(context, '_network_manager') + def test_cloud_compute_flatdhcp_context(self, netman): + netman.return_value = 'flatdhcpmanager' + self.relation_ids.return_value = 'cloud-compute:0' self.test_relation.set({ 'network_manager': 'FlatDHCPManager', 'ec2_host': 'novaapihost'}) cloud_compute = context.CloudComputeContext() ex_ctxt = { + 'network_manager': 'flatdhcpmanager', 'network_manager_config': { - 'network_manager': 'nova.network.manager.FlatDHCPManager', 'ec2_dmz_host': 'novaapihost', 'flat_interface': 'eth1' - }, + } } self.assertEquals(ex_ctxt, cloud_compute()) - def test_cloud_compute_quantum_context(self): + @patch.object(context, '_neutron_plugin') + @patch.object(context, '_neutron_url') + @patch.object(context, '_network_manager') + def test_cloud_compute_quantum_context(self, netman, url, plugin): + netman.return_value = 'quantum' + plugin.return_value = 'ovs' + url.return_value = 'http://nova-c-c:9696' self.test_relation.set(QUANTUM_CONTEXT) cloud_compute = context.CloudComputeContext() ex_ctxt = { + 'network_manager': 'quantum', 'network_manager_config': { 'auth_port': '5000', 'keystone_host': 'keystone_host', - 'network_api_class': 'nova.network.quantumv2.api.API', 'quantum_admin_auth_url': 'http://keystone_host:5000/v2.0', 'quantum_admin_password': 'openstack', 'quantum_admin_tenant_name': 'admin', 'quantum_admin_username': 'admin', 'quantum_auth_strategy': 'keystone', 'quantum_plugin': 'ovs', - 'quantum_security_groups': 'yes', - 'quantum_url': 'http://quantum_url' + 'quantum_security_groups': True, + 'quantum_url': 'http://nova-c-c:9696' } } self.assertEquals(ex_ctxt, cloud_compute()) self._save_flag_file.assert_called_with( path='/etc/nova/nm.conf', data='quantum') - def test_quantum_plugin_context_no_setting(self): - qplugin = context.QuantumPluginContext() - self.assertEquals({}, qplugin()) - - def _test_qplugin_context(self, os_release): - self.get_os_codename_package.return_value = os_release - self.unit_private_ip.return_value = '10.0.0.1' - self.test_relation.set( - {'quantum_plugin': 'ovs', 'quantum_security_groups': 'yes'}) - qplugin = context.QuantumPluginContext() - qplugin._ensure_packages = MagicMock() - return qplugin() - - def test_quantum_plugin_context_ovs_folsom(self): - ex_ctxt = deepcopy(BASE_QUANTUM_OVS_PLUGIN_CONTEXT) - ex_ctxt['libvirt_vif_driver'] = ('nova.virt.libvirt.vif.' - 'LibvirtHybridOVSBridgeDriver') - self.assertEquals(ex_ctxt, self._test_qplugin_context('folsom')) - self._save_flag_file.assert_called_with( - path='/etc/nova/quantum_plugin.conf', data='ovs') - - def test_quantum_plugin_context_ovs_grizzly_and_beyond(self): - ex_ctxt = deepcopy(BASE_QUANTUM_OVS_PLUGIN_CONTEXT) - ex_ctxt['libvirt_vif_driver'] = ('nova.virt.libvirt.vif.' - 'LibvirtGenericVIFDriver') - self.assertEquals(ex_ctxt, self._test_qplugin_context('grizzly')) - self._save_flag_file.assert_called_with( - path='/etc/nova/quantum_plugin.conf', data='ovs') +# def test_quantum_plugin_context_no_setting(self): +# qplugin = context.QuantumPluginContext() +# self.assertEquals({}, qplugin()) +# +# def _test_qplugin_context(self, os_release): +# self.get_os_codename_package.return_value = os_release +# self.test_relation.set( +# {'quantum_plugin': 'ovs', 'quantum_security_groups': 'yes'}) +# qplugin = context.QuantumPluginContext() +# qplugin._ensure_packages = MagicMock() +# return qplugin() +# +# def test_quantum_plugin_context_ovs_folsom(self): +# ex_ctxt = deepcopy(BASE_QUANTUM_OVS_PLUGIN_CONTEXT) +# ex_ctxt['libvirt_vif_driver'] = ('nova.virt.libvirt.vif.' +# 'LibvirtHybridOVSBridgeDriver') +# self.assertEquals(ex_ctxt, self._test_qplugin_context('folsom')) +# self._save_flag_file.assert_called_with( +# path='/etc/nova/quantum_plugin.conf', data='ovs') +# +# def test_quantum_plugin_context_ovs_grizzly_and_beyond(self): +# ex_ctxt = deepcopy(BASE_QUANTUM_OVS_PLUGIN_CONTEXT) +# ex_ctxt['libvirt_vif_driver'] = ('nova.virt.libvirt.vif.' +# 'LibvirtGenericVIFDriver') +# self.assertEquals(ex_ctxt, self._test_qplugin_context('grizzly')) +# self._save_flag_file.assert_called_with( +# path='/etc/nova/quantum_plugin.conf', data='ovs') def test_libvirt_bin_context_no_migration(self): self.test_config.set('enable-live-migration', False) @@ -175,26 +179,26 @@ class NovaComputeContextTests(CharmTestCase): self.assertEquals( {'libvirtd_opts': '-d -l', 'listen_tls': 1}, libvirt()) - def test_config_flag_context_none_set_in_config(self): - flags = context.OSConfigFlagContext() - self.assertEquals({}, flags()) - - def test_conflig_flag_context(self): - self.test_config.set('config-flags', 'one=two,three=four,five=six') - flags = context.OSConfigFlagContext() - ex = { - 'user_config_flags': { - 'one': 'two', 'three': 'four', 'five': 'six' - } - } - self.assertEquals(ex, flags()) - - def test_conflig_flag_context_filters_bad_input(self): - self.test_config.set('config-flags', 'one=two,threefour,five=six') - flags = context.OSConfigFlagContext() - ex = { - 'user_config_flags': { - 'one': 'two', 'five': 'six' - } - } - self.assertEquals(ex, flags()) +# def test_config_flag_context_none_set_in_config(self): +# flags = context.OSConfigFlagContext() +# self.assertEquals({}, flags()) +# +# def test_conflig_flag_context(self): +# self.test_config.set('config-flags', 'one=two,three=four,five=six') +# flags = context.OSConfigFlagContext() +# ex = { +# 'user_config_flags': { +# 'one': 'two', 'three': 'four', 'five': 'six' +# } +# } +# self.assertEquals(ex, flags()) +# +# def test_conflig_flag_context_filters_bad_input(self): +# self.test_config.set('config-flags', 'one=two,threefour,five=six') +# flags = context.OSConfigFlagContext() +# ex = { +# 'user_config_flags': { +# 'one': 'two', 'five': 'six' +# } +# } +# self.assertEquals(ex, flags()) diff --git a/unit_tests/test_nova_compute_hooks.py b/unit_tests/test_nova_compute_hooks.py index c8c0339d..d4203db2 100644 --- a/unit_tests/test_nova_compute_hooks.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -41,9 +41,9 @@ TO_PATCH = [ 'initialize_ssh_keys', 'migration_enabled', 'do_openstack_upgrade', - 'quantum_attribute', - 'quantum_enabled', - 'quantum_plugin', + 'network_manager', + 'neutron_plugin_attribute', + 'neutron_plugin', 'public_ssh_key', 'register_configs', # misc_utils @@ -116,7 +116,8 @@ class NovaComputeRelationsTests(CharmTestCase): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = ['amqp'] configs.write = MagicMock() - self.quantum_enabled.return_value = quantum + if quantum: + self.network_manager.return_value = 'quantum' hooks.amqp_changed() @patch.object(hooks, 'CONFIGS') @@ -152,7 +153,8 @@ class NovaComputeRelationsTests(CharmTestCase): configs.complete_contexts = MagicMock() configs.complete_contexts.return_value = ['shared-db'] configs.write = MagicMock() - self.quantum_enabled.return_value = quantum + if quantum: + self.network_manager.return_value = 'quantum' hooks.db_changed() @patch.object(hooks, 'CONFIGS') @@ -163,7 +165,7 @@ class NovaComputeRelationsTests(CharmTestCase): @patch.object(hooks, 'CONFIGS') def test_db_changed_with_data_and_quantum(self, configs): - self.quantum_attribute.return_value = '/etc/quantum/plugin.conf' + self.neutron_plugin_attribute.return_value = '/etc/quantum/plugin.conf' self._shared_db_test(configs, quantum=True) ex = [call('/etc/nova/nova.conf'), call('/etc/quantum/plugin.conf')] self.assertEquals(ex, configs.write.call_args_list) diff --git a/unit_tests/test_nova_compute_utils.py b/unit_tests/test_nova_compute_utils.py index ac79ba06..4135fc7f 100644 --- a/unit_tests/test_nova_compute_utils.py +++ b/unit_tests/test_nova_compute_utils.py @@ -7,13 +7,19 @@ import hooks.nova_compute_utils as utils TO_PATCH = [ 'config', - 'get_os_codename_package', + 'os_release', 'log', + 'neutron_plugin_attribute', 'related_units', 'relation_ids', 'relation_get', ] +OVS_PKGS = [ + 'quantum-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms', +] + class NovaComputeUtilsTests(CharmTestCase): def setUp(self): @@ -22,7 +28,7 @@ class NovaComputeUtilsTests(CharmTestCase): @patch.object(utils, 'network_manager') def test_determine_packages_nova_network(self, net_man): - net_man.return_value = 'FlatDHCPManager' + net_man.return_value = 'flatdhcpmanager' self.relation_ids.return_value = [] result = utils.determine_packages() ex = utils.BASE_PACKAGES + [ @@ -32,33 +38,27 @@ class NovaComputeUtilsTests(CharmTestCase): ] self.assertEquals(ex, result) - @patch.object(utils, 'quantum_plugin') + @patch.object(utils, 'neutron_plugin') @patch.object(utils, 'network_manager') - def test_determine_packages_quantum(self, net_man, q_plugin): - net_man.return_value = 'Quantum' - q_plugin.return_value = 'ovs' + def test_determine_packages_quantum(self, net_man, n_plugin): + self.neutron_plugin_attribute.return_value = OVS_PKGS + net_man.return_value = 'quantum' + n_plugin.return_value = 'ovs' self.relation_ids.return_value = [] result = utils.determine_packages() - ex = utils.BASE_PACKAGES + [ - 'quantum-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms', - 'nova-compute-kvm' - ] + ex = utils.BASE_PACKAGES + OVS_PKGS + ['nova-compute-kvm'] self.assertEquals(ex, result) - @patch.object(utils, 'quantum_plugin') + @patch.object(utils, 'neutron_plugin') @patch.object(utils, 'network_manager') - def test_determine_packages_quantum_ceph(self, net_man, q_plugin): - net_man.return_value = 'Quantum' - q_plugin.return_value = 'ovs' + def test_determine_packages_quantum_ceph(self, net_man, n_plugin): + self.neutron_plugin_attribute.return_value = OVS_PKGS + net_man.return_value = 'quantum' + n_plugin.return_value = 'ovs' self.relation_ids.return_value = ['ceph:0'] result = utils.determine_packages() - ex = utils.BASE_PACKAGES + [ - 'quantum-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms', - 'ceph-common', - 'nova-compute-kvm' - ] + ex = (utils.BASE_PACKAGES + OVS_PKGS + + ['ceph-common', 'nova-compute-kvm']) self.assertEquals(ex, result) @patch.object(utils, 'network_manager') @@ -112,7 +112,7 @@ class NovaComputeUtilsTests(CharmTestCase): } self.assertEquals(ex, result) - @patch.object(utils, 'quantum_plugin') + @patch.object(utils, 'neutron_plugin') @patch.object(utils, 'network_manager') def test_resource_map_quantum_ovs(self, net_man, _plugin): self.skipTest('skipped until contexts are properly mocked.') @@ -208,7 +208,7 @@ class NovaComputeUtilsTests(CharmTestCase): self.relation_get.return_value = 'Zm9vX2NlcnQK' with patch_open() as (_open, _file): utils.import_keystone_ca_cert() - _open.assert_called_with(utils.CA_CERT_PATH) + _open.assert_called_with(utils.CA_CERT_PATH, 'wb') _file.write.assert_called_with('foo_cert\n') check_call.assert_called_with(['update-ca-certificates']) @@ -217,7 +217,7 @@ class NovaComputeUtilsTests(CharmTestCase): @patch.object(utils, 'resource_map') def test_register_configs(self, resource_map, quantum, renderer): quantum.return_value = False - self.get_os_codename_package.return_value = 'havana' + self.os_release.return_value = 'havana' fake_renderer = MagicMock() fake_renderer.register = MagicMock() renderer.return_value = fake_renderer From 74a63ce7fef6a122a395afeed2d9980d9ad68e0a Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 15 Aug 2013 11:48:17 -0700 Subject: [PATCH 29/84] Checkin missing templates. --- templates/grizzly/nova.conf | 72 ++++++++++++++++++++++++ templates/grizzly/ovs_quantum_plugin.ini | 27 +++++++++ templates/grizzly/quantum.conf | 38 +++++++++++++ templates/nova.conf | 59 +++++++++++++++++++ 4 files changed, 196 insertions(+) create mode 100644 templates/grizzly/nova.conf create mode 100644 templates/grizzly/ovs_quantum_plugin.ini create mode 100644 templates/grizzly/quantum.conf create mode 100644 templates/nova.conf diff --git a/templates/grizzly/nova.conf b/templates/grizzly/nova.conf new file mode 100644 index 00000000..7ec9eb3a --- /dev/null +++ b/templates/grizzly/nova.conf @@ -0,0 +1,72 @@ +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose=True +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +enabled_apis=ec2,osapi_compute,metadata +auth_strategy=keystone +compute_driver=libvirt.LibvirtDriver +{% if database_host -%} +sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} +{% endif -%} + +{% if rabbitmq_host -%} +rabbit_host = {{ rabbitmq_host }} +rabbit_userid = {{ rabbitmq_user }} +rabbit_password = {{ rabbitmq_password }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +{% endif -%} + +{% if glance_api_servers -%} +glance_api_servers = {{ glance_api_servers }} +{% endif -%} + +{% if rbd_pool -%} +rbd_pool = {{ rbd_pool }} +rbd_user = {{ rbd_user }} +rbd_secret_uuid = {{ rbd_secret_uuid }} +{% endif -%} + +{% if neutron_plugin and neutron_plugin == 'ovs' -%} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver +libvirt_user_virtio_for_bridges = True +{% if neutron_security_groups -%} +security_group_api = quantum +nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} +{% endif -%} + +{% if network_manager_config -%} +{% for key, value in network_manager_config.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% if network_manager == 'quantum' -%} +network_api_class = nova.network.quantumv2.api.API +{% elif network_manager == 'flatdhcpmanager' -%} +network_manager = nova.network.manager.FlatDHCPManager +{% endif -%} +{% endif -%} + +{% if volume_service -%} +volume_api_class=nova.volume.cinder.API +{% endif -%} + +{% if user_config_flags -%} +{% for key, value in user_config_flags.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} diff --git a/templates/grizzly/ovs_quantum_plugin.ini b/templates/grizzly/ovs_quantum_plugin.ini new file mode 100644 index 00000000..da8c9a31 --- /dev/null +++ b/templates/grizzly/ovs_quantum_plugin.ini @@ -0,0 +1,27 @@ +# grizzly +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[OVS] +tunnel_id_ranges = 1:1000 +tenant_network_type = gre +enable_tunneling = True +local_ip = {{ local_ip }} + +[DATABASE] +{% if database_host -%} +sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}?quantum?charset=utf8 +reconnect_interval = 2 +{% else -%} +connection = sqlite:////var/lib/quantum/quantum.sqlite +{% endif -%} + +[AGENT] +polling_interval = 2 + +[SECURITYGROUP] +{% if neutron_security_groups -%} +firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +{% endif -%} + diff --git a/templates/grizzly/quantum.conf b/templates/grizzly/quantum.conf new file mode 100644 index 00000000..1e0666a4 --- /dev/null +++ b/templates/grizzly/quantum.conf @@ -0,0 +1,38 @@ +# grizzly +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +state_path = /var/lib/quantum +lock_path = $state_path/lock +bind_host = 0.0.0.0 +bind_port = 9696 +{% if core_plugin -%} +core_plugin = {{ core_plugin }} +{% endif -%} +api_paste_config = /etc/quantum/api-paste.ini +auth_strategy = keystone +control_exchange = quantum +notification_driver = quantum.openstack.common.notifier.rpc_notifier +default_notification_level = INFO +notification_topics = notifications +{% if rabbitmq_host -%} +rabbit_host = {{ rabbitmq_host }} +rabbit_userid = {{ rabbitmq_user }} +rabbit_password = {{ rabbitmq_password }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} + +{% endif -%} + + +[QUOTAS] + +[DEFAULT_SERVICETYPE] + +[AGENT] +root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf + +[keystone_authtoken] +signing_dir = /var/lib/quantum/keystone-signing + diff --git a/templates/nova.conf b/templates/nova.conf new file mode 100644 index 00000000..28f00e04 --- /dev/null +++ b/templates/nova.conf @@ -0,0 +1,59 @@ +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose=True +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +{% if database_host -%} +sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} +{% endif -%} +{% if rabbitmq_host -%} +rabbit_host = {{ rabbitmq_host }} +rabbit_userid = {{ rabbitmq_user }} +rabbit_password = {{ rabbitmq_password }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +{% endif -%} +{% if glance_api_servers -%} +glance_api_servers = {{ glance_api_servers }} +{% endif -%} +{% if rbd_pool -%} +rbd_pool = {{ rbd_pool }} +rbd_user = {{ rbd_user }} +rbd_secret_uuid = {{ rbd_secret_uuid }} +{% endif -%} +{% if quantum_plugin and quantum_plugin == 'ovs' -%} +libvirt_vif_driver = {{ libvirt_vif_driver }} +libvirt_user_virtio_for_bridges = {{ libvirt_use_virtio_for_birdges }} +{% if quantum_security_groups -%} +security_group_api = quantum +nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} +{% endif -%} +{% if network_manager_config -%} +{% for key, value in network_manager_config.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} +{% if volume_service_config -%} +{% for key, value in volume_service_config.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} +{% if user_config_flags -%} +{% for key, value in user_config_flags.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} From 10793bc187a674db9741e21edf713e2751227c6a Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 15 Aug 2013 11:54:36 -0700 Subject: [PATCH 30/84] Sync helpers. --- hooks/charmhelpers/contrib/openstack/context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 67fce55d..379a27e6 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -232,7 +232,7 @@ class HAProxyContext(OSContextGenerator): class ImageServiceContext(OSContextGenerator): - interfaces = ['image-servce'] + interfaces = ['image-service'] def __call__(self): ''' From c473d2f282e0f6421c053554a88e221df6a41529 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 15 Aug 2013 12:43:43 -0700 Subject: [PATCH 31/84] Ensure access to neutron db on a late joining cloud-compute relation. --- hooks/nova_compute_hooks.py | 15 +++++++++++---- hooks/nova_compute_utils.py | 2 +- unit_tests/test_nova_compute_hooks.py | 23 +++++++++++++++++++++-- 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index c6c17af8..ebaab92d 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -92,12 +92,15 @@ def amqp_changed(): @hooks.hook('shared-db-relation-joined') -def db_joined(): - relation_set(database=config('database'), username=config('database-user'), - hostname=unit_get('private-address')) +def db_joined(rid=None): + relation_set(relation_id=rid, + nova_database=config('database'), + nova_username=config('database-user'), + nova_hostname=unit_get('private-address')) if network_manager() in ['quantum', 'neutron']: # XXX: Renaming relations from quantum_* to neutron_* here. - relation_set(neutron_database=config('neutron-database'), + relation_set(relation_id=rid, + neutron_database=config('neutron-database'), neutron_username=config('neutron-database-user'), neutron_hostname=unit_get('private-address')) @@ -145,6 +148,10 @@ def compute_changed(): CONFIGS.write_all() import_authorized_keys() import_keystone_ca_cert() + if network_manager() in ['quantum', 'neutron']: + # in case we already have a database relation, need to request + # access to the additional neutron database. + [db_joined(rid) for rid in relation_ids('shared-db')] @hooks.hook('ceph-relation-joined') diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index d2e9fac6..05807a72 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -50,7 +50,7 @@ BASE_RESOURCE_MAP = { '/etc/nova/nova.conf': { 'services': ['nova-compute'], 'contexts': [context.AMQPContext(), - context.SharedDBContext(), + context.SharedDBContext(relation_prefix='nova'), context.ImageServiceContext(), context.OSConfigFlagContext(), CloudComputeContext(), diff --git a/unit_tests/test_nova_compute_hooks.py b/unit_tests/test_nova_compute_hooks.py index d4203db2..3f480a81 100644 --- a/unit_tests/test_nova_compute_hooks.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -136,8 +136,27 @@ class NovaComputeRelationsTests(CharmTestCase): def test_db_joined(self): self.unit_get.return_value = 'nova.foohost.com' hooks.db_joined() - self.relation_set.assert_called_with(database='nova', username='nova', - hostname='nova.foohost.com') + self.relation_set.assert_called_with(relation_id=None, + nova_database='nova', + nova_username='nova', + nova_hostname='nova.foohost.com') + self.unit_get.assert_called_with('private-address') + + def test_db_joined_quantum(self): + self.unit_get.return_value = 'nova.foohost.com' + self.network_manager.return_value = 'quantum' + hooks.db_joined(rid='shared-db:0') + calls = [call(nova_database='nova', + nova_username='nova', + nova_hostname='nova.foohost.com', + relation_id='shared-db:0'), + call(neutron_database='neutron', + neutron_username='neutron', + neutron_hostname='nova.foohost.com', + relation_id='shared-db:0'), +] + [self.assertIn(c, self.relation_set.call_args_list) + for c in calls] self.unit_get.assert_called_with('private-address') @patch.object(hooks, 'CONFIGS') From e4b1b98ec72275f13af3047b614d50fcac5d8a7e Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 15 Aug 2013 16:14:45 -0700 Subject: [PATCH 32/84] Ensure OVS bridge exists while generating config ctxt. --- hooks/nova_compute_context.py | 32 +++++++++++++++++++++++++++++--- hooks/nova_compute_utils.py | 4 ++-- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 5310feb8..8572ea19 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -1,6 +1,9 @@ +from subprocess import check_call, check_output + from charmhelpers.contrib.openstack import context -from charmhelpers.core.host import apt_install, filter_installed_packages +from charmhelpers.core.host import ( + apt_install, filter_installed_packages, service_running, service_start) from charmhelpers.core.hookenv import ( config, @@ -9,7 +12,6 @@ from charmhelpers.core.hookenv import ( relation_ids, service_name, ERROR, - WARNING, ) from charmhelpers.contrib.openstack.utils import os_release @@ -19,6 +21,8 @@ from charmhelpers.contrib.openstack.utils import os_release # nova-compute nodes to support live migration. CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' +OVS_BRIDGE = 'br-int' + def _save_flag_file(path, data): ''' @@ -240,7 +244,6 @@ class CloudComputeContext(context.OSContextGenerator): return ctxt - class NeutronComputeContext(context.NeutronContext): interfaces = [] @@ -257,3 +260,26 @@ class NeutronComputeContext(context.NeutronContext): @property def neutron_security_groups(self): return _neutron_security_groups() + + def _ensure_bridge(self): + if not service_running('openvswitch-switch'): + service_start('openvswitch-switch') + + ovs_output = check_output['ovs-vsctl', 'show'] + for ln in ovs_output.split('\n'): + if OVS_BRIDGE in ln.strip(): + log('Found OVS bridge: %s.' % OVS_BRIDGE) + return + log('Creating new OVS bridge: %s.' % OVS_BRIDGE) + check_call(['ovs-vsctl', 'add-br', OVS_BRIDGE]) + + def ovs_ctxt(self): + # In addition to generating config context, ensure the OVS service + # is running and the OVS bridge exists. + ovs_ctxt = super(NeutronComputeContext, self).ovs_ctxt() + if not ovs_ctxt: + return {} + + self._ensure_bridge() + + return ovs_ctxt diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 05807a72..61633a8c 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -72,14 +72,14 @@ CEPH_RESOURCES = { QUANTUM_RESOURCES = { '/etc/quantum/quantum.conf': { - 'services': ['quantum-server'], + 'services': [], 'contexts': [context.AMQPContext()], } } NEUTRON_RESOURCES = { '/etc/neutron/neutron.conf': { - 'services': ['neutron-server'], + 'services': [], 'contexts': [context.AMQPContext()], } } From 4611e89dc235f3d6a36ca07bb47267eace0b186b Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 15 Aug 2013 16:15:59 -0700 Subject: [PATCH 33/84] Sync helpers. --- hooks/charmhelpers/contrib/openstack/context.py | 4 ++-- hooks/charmhelpers/contrib/openstack/templating.py | 10 +++++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 379a27e6..76147bb3 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -392,14 +392,14 @@ class OSConfigFlagContext(OSContextGenerator): ''' def __call__(self): config_flags = config('config-flags') - if not config_flags: + if not config_flags or config_flags in ['None', '']: return {} config_flags = config_flags.split(',') flags = {} for flag in config_flags: if '=' not in flag: log('Improperly formatted config-flag, expected k=v ' - ' got %s' % flag, level=WARNING) + 'got %s' % flag, level=WARNING) continue k, v = flag.split('=') flags[k.strip()] = v diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 4b1f207d..dd32943a 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -219,7 +219,10 @@ class OSConfigRenderer(object): if config_file not in self.templates: log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException + ctxt = self.templates[config_file].context() + if not ctxt: + return _tmpl = os.path.basename(config_file) try: @@ -247,8 +250,13 @@ class OSConfigRenderer(object): if config_file not in self.templates: log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException + + cfg_out = self.render(config_file) + if not cfg_out: + return + with open(config_file, 'wb') as out: - out.write(self.render(config_file)) + out.write(cfg_out) log('Wrote template %s.' % config_file, level=INFO) def write_all(self): From 5f15d023c404c53e5489b5c5f716c581e9e0f26f Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 15 Aug 2013 16:29:29 -0700 Subject: [PATCH 34/84] Fix syntax. --- hooks/nova_compute_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 8572ea19..2bbdc5fc 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -265,7 +265,7 @@ class NeutronComputeContext(context.NeutronContext): if not service_running('openvswitch-switch'): service_start('openvswitch-switch') - ovs_output = check_output['ovs-vsctl', 'show'] + ovs_output = check_output(['ovs-vsctl', 'show']) for ln in ovs_output.split('\n'): if OVS_BRIDGE in ln.strip(): log('Found OVS bridge: %s.' % OVS_BRIDGE) From 33cc2ac9ead4748ceceaab93f29c187acd1dab17 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 15 Aug 2013 17:29:59 -0700 Subject: [PATCH 35/84] Sync helpers. --- hooks/charmhelpers/contrib/openstack/context.py | 3 +-- hooks/charmhelpers/contrib/openstack/templating.py | 10 +--------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 76147bb3..2228c9be 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -367,6 +367,7 @@ class NeutronContext(object): return ovs_ctxt def __call__(self): + self._ensure_packages() if self.network_manager not in ['quantum', 'neutron']: return {} @@ -374,8 +375,6 @@ class NeutronContext(object): if not self.plugin: return {} - self._ensure_packages() - ctxt = {'network_manager': self.network_manager} if self.plugin == 'ovs': diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index dd32943a..4b1f207d 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -219,10 +219,7 @@ class OSConfigRenderer(object): if config_file not in self.templates: log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException - ctxt = self.templates[config_file].context() - if not ctxt: - return _tmpl = os.path.basename(config_file) try: @@ -250,13 +247,8 @@ class OSConfigRenderer(object): if config_file not in self.templates: log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException - - cfg_out = self.render(config_file) - if not cfg_out: - return - with open(config_file, 'wb') as out: - out.write(cfg_out) + out.write(self.render(config_file)) log('Wrote template %s.' % config_file, level=INFO) def write_all(self): From c187ff52d5fc64f8f191d734ff15f086df12ca8d Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 16 Aug 2013 10:55:37 -0700 Subject: [PATCH 36/84] NeutronComputeContext: ensure local_ip is actually an IP. --- hooks/nova_compute_context.py | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 2bbdc5fc..89bde3ce 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -1,3 +1,5 @@ +import socket + from subprocess import check_call, check_output from charmhelpers.contrib.openstack import context @@ -11,6 +13,7 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, service_name, + unit_get, ERROR, ) @@ -244,6 +247,28 @@ class CloudComputeContext(context.OSContextGenerator): return ctxt +def get_host_ip(): + # we used to have a charm-helper to do this, but its disappeared? + # taken from quantum-gateway + + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + hostname = unit_get('private-address') + try: + # Test to see if already an IPv4 address + socket.inet_aton(hostname) + return hostname + except socket.error: + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + return None + + class NeutronComputeContext(context.NeutronContext): interfaces = [] @@ -275,11 +300,13 @@ class NeutronComputeContext(context.NeutronContext): def ovs_ctxt(self): # In addition to generating config context, ensure the OVS service - # is running and the OVS bridge exists. + # is running and the OVS bridge exists. Also need to ensure + # local_ip points to actual IP, not hostname. ovs_ctxt = super(NeutronComputeContext, self).ovs_ctxt() if not ovs_ctxt: return {} self._ensure_bridge() + ovs_ctxt['local_ip'] = get_host_ip() return ovs_ctxt From 0d921ccc417b891eaa2e5405c34b06869b154129 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 16 Aug 2013 12:14:16 -0700 Subject: [PATCH 37/84] Ensure nova.conf subscribes to the Neutron context only when we required. --- hooks/nova_compute_utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 61633a8c..1ff62d99 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -54,8 +54,7 @@ BASE_RESOURCE_MAP = { context.ImageServiceContext(), context.OSConfigFlagContext(), CloudComputeContext(), - NovaComputeCephContext(), - NeutronComputeContext()] + NovaComputeCephContext()], }, } @@ -104,18 +103,25 @@ def resource_map(): resource_map = deepcopy(BASE_RESOURCE_MAP) net_manager = network_manager() + # Network manager gets set late by the cloud-compute interface. + # FlatDHCPManager only requires some extra packages. if (net_manager in ['flatmanager', 'flatdhcpmanager'] and config('multi-host').lower() == 'yes'): resource_map['/etc/nova/nova.conf']['services'].extend( ['nova-api', 'nova-network'] ) + # Neutron/quantum requires additional contexts, as well as new resources + # depending on the plugin used. if net_manager in ['neutron', 'quantum']: if net_manager == 'quantum': resource_map.update(QUANTUM_RESOURCES) if net_manager == 'neutron': resource_map.update(NEUTRON_RESOURCES) + resource_map['/etc/nova/nova.conf']['contexts'].append( + NeutronComputeContext()) + plugin = neutron_plugin() if plugin: conf = neutron_plugin_attribute(plugin, 'config', net_manager) From ae4c96c229ef3b288ae33af24361069ce979b9dd Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 16 Aug 2013 12:45:56 -0700 Subject: [PATCH 38/84] Sync helpers. --- hooks/charmhelpers/contrib/openstack/templating.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 4b1f207d..0b534433 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -247,8 +247,12 @@ class OSConfigRenderer(object): if config_file not in self.templates: log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException + + _out = self.render(config_file) + with open(config_file, 'wb') as out: - out.write(self.render(config_file)) + out.write(_out) + log('Wrote template %s.' % config_file, level=INFO) def write_all(self): From b3cfa8447db5d0eaaa7ba3a8f8c713f640ce2c53 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 16 Aug 2013 17:48:01 -0700 Subject: [PATCH 39/84] Templates: When not using Quantum/Neutron, default to FlatDHCP. --- templates/grizzly/nova.conf | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/templates/grizzly/nova.conf b/templates/grizzly/nova.conf index 7ec9eb3a..5d3fdb53 100644 --- a/templates/grizzly/nova.conf +++ b/templates/grizzly/nova.conf @@ -54,12 +54,13 @@ nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% for key, value in network_manager_config.iteritems() -%} {{ key }} = {{ value }} {% endfor -%} +{% endif -%} + {% if network_manager == 'quantum' -%} network_api_class = nova.network.quantumv2.api.API -{% elif network_manager == 'flatdhcpmanager' -%} +{% else -%} network_manager = nova.network.manager.FlatDHCPManager {% endif -%} -{% endif -%} {% if volume_service -%} volume_api_class=nova.volume.cinder.API From f6c4de0071147101b220e0380f546539d08ac24b Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 19 Aug 2013 14:14:39 -0700 Subject: [PATCH 40/84] Implement upgrades, move templates to folsom. --- hooks/nova_compute_hooks.py | 2 +- hooks/nova_compute_utils.py | 29 +++++++- revision | 2 +- templates/folsom/nova.conf | 30 ++++++-- .../ovs_quantum_plugin.ini | 0 templates/{grizzly => folsom}/quantum.conf | 0 templates/grizzly/nova.conf | 73 ------------------- 7 files changed, 50 insertions(+), 86 deletions(-) rename templates/{grizzly => folsom}/ovs_quantum_plugin.ini (100%) rename templates/{grizzly => folsom}/quantum.conf (100%) delete mode 100644 templates/grizzly/nova.conf diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index ebaab92d..6ba64b19 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -61,7 +61,7 @@ def install(): @restart_on_change(restart_map()) def config_changed(): if openstack_upgrade_available('nova-common'): - do_openstack_upgrade() + do_openstack_upgrade(CONFIGS) if migration_enabled() and config('migration-auth-type') == 'ssh': # Check-in with nova-c-c and register new ssh key, if it has just been diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 1ff62d99..c83ebef5 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -5,6 +5,8 @@ from base64 import b64decode from copy import deepcopy from subprocess import check_call, check_output +from charmhelpers.core.host import apt_update, apt_install + from charmhelpers.core.hookenv import ( config, log, @@ -14,10 +16,14 @@ from charmhelpers.core.hookenv import ( ) from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute - -from charmhelpers.contrib.openstack.utils import os_release from charmhelpers.contrib.openstack import templating, context +from charmhelpers.contrib.openstack.utils import ( + configure_installation_source, + get_os_codename_install_source, + os_release +) + from nova_compute_context import ( CloudComputeContext, NovaComputeLibvirtContext, @@ -307,7 +313,24 @@ def configure_live_migration(configs=None): initialize_ssh_keys() -def do_openstack_upgrade(): +def do_openstack_upgrade(configs): + new_src = config('openstack-origin') + new_os_rel = get_os_codename_install_source(new_src) + log('Performing OpenStack upgrade to %s.' % (new_os_rel)) + + configure_installation_source(new_src) + apt_update() + + dpkg_opts = [ + '--option', 'Dpkg::Options::=--force-confnew', + '--option', 'Dpkg::Options::=--force-confdef', + ] + + apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True) + + # set CONFIGS to load templates from new release and regenerate config + configs.set_release(openstack_release=new_os_rel) + configs.write_all() pass diff --git a/revision b/revision index 0a3e7b04..a949a93d 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -126 +128 diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index 28f00e04..5d3fdb53 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -17,41 +17,55 @@ verbose=True ec2_private_dns_show_ip=True api_paste_config=/etc/nova/api-paste.ini volumes_path=/var/lib/nova/volumes +enabled_apis=ec2,osapi_compute,metadata +auth_strategy=keystone +compute_driver=libvirt.LibvirtDriver {% if database_host -%} sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} {% endif -%} + {% if rabbitmq_host -%} rabbit_host = {{ rabbitmq_host }} rabbit_userid = {{ rabbitmq_user }} rabbit_password = {{ rabbitmq_password }} rabbit_virtual_host = {{ rabbitmq_virtual_host }} {% endif -%} + {% if glance_api_servers -%} glance_api_servers = {{ glance_api_servers }} {% endif -%} + {% if rbd_pool -%} rbd_pool = {{ rbd_pool }} rbd_user = {{ rbd_user }} rbd_secret_uuid = {{ rbd_secret_uuid }} {% endif -%} -{% if quantum_plugin and quantum_plugin == 'ovs' -%} -libvirt_vif_driver = {{ libvirt_vif_driver }} -libvirt_user_virtio_for_bridges = {{ libvirt_use_virtio_for_birdges }} -{% if quantum_security_groups -%} + +{% if neutron_plugin and neutron_plugin == 'ovs' -%} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver +libvirt_user_virtio_for_bridges = True +{% if neutron_security_groups -%} security_group_api = quantum nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} {% endif -%} + {% if network_manager_config -%} {% for key, value in network_manager_config.iteritems() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} -{% if volume_service_config -%} -{% for key, value in volume_service_config.iteritems() -%} -{{ key }} = {{ value }} -{% endfor -%} + +{% if network_manager == 'quantum' -%} +network_api_class = nova.network.quantumv2.api.API +{% else -%} +network_manager = nova.network.manager.FlatDHCPManager {% endif -%} + +{% if volume_service -%} +volume_api_class=nova.volume.cinder.API +{% endif -%} + {% if user_config_flags -%} {% for key, value in user_config_flags.iteritems() -%} {{ key }} = {{ value }} diff --git a/templates/grizzly/ovs_quantum_plugin.ini b/templates/folsom/ovs_quantum_plugin.ini similarity index 100% rename from templates/grizzly/ovs_quantum_plugin.ini rename to templates/folsom/ovs_quantum_plugin.ini diff --git a/templates/grizzly/quantum.conf b/templates/folsom/quantum.conf similarity index 100% rename from templates/grizzly/quantum.conf rename to templates/folsom/quantum.conf diff --git a/templates/grizzly/nova.conf b/templates/grizzly/nova.conf deleted file mode 100644 index 5d3fdb53..00000000 --- a/templates/grizzly/nova.conf +++ /dev/null @@ -1,73 +0,0 @@ -############################################################################### -# [ WARNING ] -# Configuration file maintained by Juju. Local changes may be overwritten. -############################################################################### -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose=True -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata -auth_strategy=keystone -compute_driver=libvirt.LibvirtDriver -{% if database_host -%} -sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} -{% endif -%} - -{% if rabbitmq_host -%} -rabbit_host = {{ rabbitmq_host }} -rabbit_userid = {{ rabbitmq_user }} -rabbit_password = {{ rabbitmq_password }} -rabbit_virtual_host = {{ rabbitmq_virtual_host }} -{% endif -%} - -{% if glance_api_servers -%} -glance_api_servers = {{ glance_api_servers }} -{% endif -%} - -{% if rbd_pool -%} -rbd_pool = {{ rbd_pool }} -rbd_user = {{ rbd_user }} -rbd_secret_uuid = {{ rbd_secret_uuid }} -{% endif -%} - -{% if neutron_plugin and neutron_plugin == 'ovs' -%} -libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver -libvirt_user_virtio_for_bridges = True -{% if neutron_security_groups -%} -security_group_api = quantum -nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver -{% endif -%} -{% endif -%} - -{% if network_manager_config -%} -{% for key, value in network_manager_config.iteritems() -%} -{{ key }} = {{ value }} -{% endfor -%} -{% endif -%} - -{% if network_manager == 'quantum' -%} -network_api_class = nova.network.quantumv2.api.API -{% else -%} -network_manager = nova.network.manager.FlatDHCPManager -{% endif -%} - -{% if volume_service -%} -volume_api_class=nova.volume.cinder.API -{% endif -%} - -{% if user_config_flags -%} -{% for key, value in user_config_flags.iteritems() -%} -{{ key }} = {{ value }} -{% endfor -%} -{% endif -%} From 2cb3e8a3e0d3c84bdff363da5828c7ab7158bd84 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 19 Aug 2013 16:37:58 -0700 Subject: [PATCH 41/84] Update charm-helpers sync source to temporary ~openstack-charmers helper branch. --- charm-helpers.yaml | 2 +- hooks/charmhelpers/contrib/hahelpers/ceph.py | 23 +++++++++++++++---- .../charmhelpers/contrib/openstack/context.py | 1 + 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/charm-helpers.yaml b/charm-helpers.yaml index 7d9aa62e..ddc1575c 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~openstack-charmers/charm-helpers/to_upstream destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/hahelpers/ceph.py b/hooks/charmhelpers/contrib/hahelpers/ceph.py index fb1b8b9b..8ff029a4 100644 --- a/hooks/charmhelpers/contrib/hahelpers/ceph.py +++ b/hooks/charmhelpers/contrib/hahelpers/ceph.py @@ -11,6 +11,7 @@ import commands import os import shutil +import time from subprocess import ( check_call, @@ -24,6 +25,7 @@ from charmhelpers.core.hookenv import ( related_units, log, INFO, + ERROR ) from charmhelpers.core.host import ( @@ -179,11 +181,22 @@ def filesystem_mounted(fs): return fs in [f for m, f in mounts()] -def make_filesystem(blk_device, fstype='ext4'): - log('ceph: Formatting block device %s as filesystem %s.' % - (blk_device, fstype), level=INFO) - cmd = ['mkfs', '-t', fstype, blk_device] - check_call(cmd) +def make_filesystem(blk_device, fstype='ext4', timeout=10): + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('ceph: gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + log('ceph: waiting for block device %s to appear' % blk_device, + level=INFO) + count += 1 + time.sleep(1) + else: + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 2228c9be..b2d40854 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -294,6 +294,7 @@ class ApacheSSLContext(OSContextGenerator): if ca_cert: with open(CA_CERT_PATH, 'w') as ca_out: ca_out.write(b64decode(ca_cert)) + check_call(['update-ca-certificates']) def __call__(self): if isinstance(self.external_ports, basestring): From 7d064697c7f95981788e2f6bbb354b6258e2c4e1 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 20 Aug 2013 12:10:36 -0700 Subject: [PATCH 42/84] Add generic relation broken hook. --- hooks/amqp-relation-broken | 1 + hooks/ceph-relation-broken | 1 + hooks/cloud-compute-relation-broken | 1 + hooks/image-service-relation-broken | 1 + hooks/nova_compute_hooks.py | 10 ++++++++++ hooks/shared-db-relation-broken | 1 + 6 files changed, 15 insertions(+) create mode 120000 hooks/amqp-relation-broken create mode 120000 hooks/ceph-relation-broken create mode 120000 hooks/cloud-compute-relation-broken create mode 120000 hooks/image-service-relation-broken create mode 120000 hooks/shared-db-relation-broken diff --git a/hooks/amqp-relation-broken b/hooks/amqp-relation-broken new file mode 120000 index 00000000..3ba0bdea --- /dev/null +++ b/hooks/amqp-relation-broken @@ -0,0 +1 @@ +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/ceph-relation-broken b/hooks/ceph-relation-broken new file mode 120000 index 00000000..3ba0bdea --- /dev/null +++ b/hooks/ceph-relation-broken @@ -0,0 +1 @@ +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/cloud-compute-relation-broken b/hooks/cloud-compute-relation-broken new file mode 120000 index 00000000..3ba0bdea --- /dev/null +++ b/hooks/cloud-compute-relation-broken @@ -0,0 +1 @@ +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/image-service-relation-broken b/hooks/image-service-relation-broken new file mode 120000 index 00000000..3ba0bdea --- /dev/null +++ b/hooks/image-service-relation-broken @@ -0,0 +1 @@ +nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 6ba64b19..052605b3 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -177,6 +177,16 @@ def ceph_changed(): CONFIGS.write('/etc/nova/nova.conf') +@hooks.hook('amqp-relation-broken', + 'ceph-relation-broken', + 'cloud-compute-relation-broken', + 'image-service-relation-broken', + 'shared-db-relation-broken') +@restart_on_change(restart_map()) +def relation_broken(): + CONFIGS.write_all() + + def main(): try: hooks.execute(sys.argv) diff --git a/hooks/shared-db-relation-broken b/hooks/shared-db-relation-broken new file mode 120000 index 00000000..3ba0bdea --- /dev/null +++ b/hooks/shared-db-relation-broken @@ -0,0 +1 @@ +nova_compute_hooks.py \ No newline at end of file From 79bf71d23b3c1702edf4c530bd082064cf19f92d Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 22 Aug 2013 17:46:42 -0700 Subject: [PATCH 43/84] Do not run cloud-compute-relation-broken. --- hooks/cloud-compute-relation-broken | 1 - hooks/nova_compute_hooks.py | 1 - 2 files changed, 2 deletions(-) delete mode 120000 hooks/cloud-compute-relation-broken diff --git a/hooks/cloud-compute-relation-broken b/hooks/cloud-compute-relation-broken deleted file mode 120000 index 3ba0bdea..00000000 --- a/hooks/cloud-compute-relation-broken +++ /dev/null @@ -1 +0,0 @@ -nova_compute_hooks.py \ No newline at end of file diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 052605b3..f507eb52 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -179,7 +179,6 @@ def ceph_changed(): @hooks.hook('amqp-relation-broken', 'ceph-relation-broken', - 'cloud-compute-relation-broken', 'image-service-relation-broken', 'shared-db-relation-broken') @restart_on_change(restart_map()) From 6c3f0a8c9bb9ce221a77aa128fd3dd7dda8a2874 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 22 Aug 2013 20:12:15 -0700 Subject: [PATCH 44/84] Remove unused code. --- hooks/nova_compute_context.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 89bde3ce..9cb94f5f 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -275,8 +275,6 @@ class NeutronComputeContext(context.NeutronContext): @property def plugin(self): return _neutron_plugin() - from nova_compute_utils import neutron_plugin - return neutron_plugin() @property def network_manager(self): From 45cc506c03a955de13559499293372ad3171c988 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 22 Aug 2013 21:15:07 -0700 Subject: [PATCH 45/84] Ensure CC contexts can functioon out of relation context. When generating cloud compute context, the generator must query the relation in a way that can function from other hook contexts. Drops old tests that have been moved to charm-helpers proper. --- hooks/nova_compute_context.py | 89 ++++++++++++++++++------ unit_tests/test_nova_compute_contexts.py | 68 ++++-------------- 2 files changed, 80 insertions(+), 77 deletions(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 9cb94f5f..a2e8811f 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -12,6 +12,7 @@ from charmhelpers.core.hookenv import ( log, relation_get, relation_ids, + related_units, service_name, unit_get, ERROR, @@ -45,9 +46,21 @@ def _network_manager(): def _neutron_security_groups(): - groups = [relation_get('neutron_security_groups'), - relation_get('quantum_security_groups')] - return ('yes' in groups or 'Yes' in groups) + ''' + Inspects current cloud-compute relation and determine if nova-c-c has + instructed us to use neutron security groups. + ''' + for rid in relation_ids('cloud-compute'): + for unit in related_units('cloud-compute'): + groups = [ + relation_get('neutron_security_groups', + rid=rid, unit=unit), + relation_get('quantum_security_groups', + rid=rid, unit=unit) + ] + if ('yes' in groups or 'Yes' in groups): + return True + return False def _neutron_plugin(): @@ -55,8 +68,10 @@ def _neutron_plugin(): return neutron_plugin() -def _neutron_url(): - return relation_get('neutron_url') or relation_get('quantum_url') +def _neutron_url(rid, unit): + # supports legacy relation settings. + return (relation_get('neutron_url', rid=rid, unit=unit) or + relation_get('quantum_url', rid=rid, unit=unit)) class NovaComputeLibvirtContext(context.OSContextGenerator): @@ -131,10 +146,19 @@ class CloudComputeContext(context.OSContextGenerator): @property def volume_service(self): - return relation_get('volume_service') + volume_service = None + for rid in relation_ids('cloud-compute'): + for unit in related_units(rid): + volume_service = relation_get('volume_service', + rid=rid, unit=unit) + return volume_service def flat_dhcp_context(self): - ec2_host = relation_get('ec2_host') + ec2_host = None + for rid in relation_ids('cloud-compute'): + for unit in related_units(rid): + ec2_host = relation_get('ec2_host', rid=rid, unit=unit) + if not ec2_host: return {} @@ -150,23 +174,41 @@ class CloudComputeContext(context.OSContextGenerator): # generate config context for neutron or quantum. these get converted # directly into flags in nova.conf # NOTE: Its up to release templates to set correct driver + def _legacy_quantum(ctxt): + # rename neutron flags to support legacy quantum. renamed = {} for k, v in ctxt.iteritems(): k = k.replace('neutron', 'quantum') renamed[k] = v return renamed - neutron_ctxt = { - 'neutron_auth_strategy': 'keystone', - 'keystone_host': relation_get('auth_host'), - 'auth_port': relation_get('auth_port'), - 'neutron_admin_tenant_name': relation_get('service_tenant_name'), - 'neutron_admin_username': relation_get('service_username'), - 'neutron_admin_password': relation_get('service_password'), - 'neutron_plugin': _neutron_plugin(), - 'neutron_url': _neutron_url(), - } + neutron_ctxt = {'neutron_url': None} + for rid in relation_ids('cloud-compute'): + for unit in related_units(rid): + rel = {'rid': rid, 'unit': unit} + + url = _neutron_url(**rel) + if not url: + # only bother with units that have a neutron url set. + continue + + neutron_ctxt = { + 'neutron_auth_strategy': 'keystone', + 'keystone_host': relation_get( + 'auth_host', **rel), + 'auth_port': relation_get( + 'auth_port', **rel), + 'neutron_admin_tenant_name': relation_get( + 'service_tenant_name', **rel), + 'neutron_admin_username': relation_get( + 'service_username', **rel), + 'neutron_admin_password': relation_get( + 'service_password', **rel), + 'neutron_plugin': _neutron_plugin(), + 'neutron_url': url, + } + missing = [k for k, v in neutron_ctxt.iteritems() if v in ['', None]] if missing: log('Missing required relation settings for Quantum: ' + @@ -189,20 +231,20 @@ class CloudComputeContext(context.OSContextGenerator): # given openstack release (nova-volume is only supported for E and F) # it is up to release templates to set the correct volume driver. - os_rel = os_release('nova-common') - vol_service = relation_get('volume_service') - if not vol_service: + if not self.volume_service: return {} + os_rel = os_release('nova-common') + # ensure volume service is supported on specific openstack release. - if vol_service == 'cinder': + if self.volume_service == 'cinder': if os_rel == 'essex': e = ('Attempting to configure cinder volume manager on ' 'an unsupported OpenStack release (essex)') log(e, level=ERROR) raise context.OSContextError(e) return 'cinder' - elif vol_service == 'nova-volume': + elif self.volume_service == 'nova-volume': if os_release('nova-common') not in ['essex', 'folsom']: e = ('Attempting to configure nova-volume manager on ' 'an unsupported OpenStack release (%s).' % os_rel) @@ -211,7 +253,7 @@ class CloudComputeContext(context.OSContextGenerator): return 'nova-volume' else: e = ('Invalid volume service received via cloud-compute: %s' % - vol_service) + self.volume_service) log(e, level=ERROR) raise context.OSContextError(e) @@ -300,6 +342,7 @@ class NeutronComputeContext(context.NeutronContext): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. + from pprint import pprint; import ipdb; ipdb.set_trace() ############################## Breakpoint ############################## ovs_ctxt = super(NeutronComputeContext, self).ovs_ctxt() if not ovs_ctxt: return {} diff --git a/unit_tests/test_nova_compute_contexts.py b/unit_tests/test_nova_compute_contexts.py index ecc1ce73..772018a1 100644 --- a/unit_tests/test_nova_compute_contexts.py +++ b/unit_tests/test_nova_compute_contexts.py @@ -1,5 +1,4 @@ -from mock import MagicMock, patch -from copy import deepcopy +from mock import patch from unit_tests.test_utils import CharmTestCase from charmhelpers.contrib.openstack.context import OSContextError @@ -11,6 +10,7 @@ TO_PATCH = [ 'filter_installed_packages', 'relation_ids', 'relation_get', + 'related_units', 'config', 'log', 'os_release', @@ -70,8 +70,8 @@ class NovaComputeContextTests(CharmTestCase): def test_cloud_compute_volume_context_cinder(self, netman): netman.return_value = None self.relation_ids.return_value = 'cloud-compute:0' + self.related_units.return_value = 'nova-cloud-controller/0' cloud_compute = context.CloudComputeContext() - self.test_relation.set({'volume_service': 'cinder'}) self.assertEquals({'volume_service': 'cinder'}, cloud_compute()) @@ -79,6 +79,7 @@ class NovaComputeContextTests(CharmTestCase): def test_cloud_compute_volume_context_nova_vol(self, netman): netman.return_value = None self.relation_ids.return_value = 'cloud-compute:0' + self.related_units.return_value = 'nova-cloud-controller/0' cloud_compute = context.CloudComputeContext() self.os_release.return_value = 'essex' self.test_relation.set({'volume_service': 'nova-volume'}) @@ -99,6 +100,7 @@ class NovaComputeContextTests(CharmTestCase): def test_cloud_compute_flatdhcp_context(self, netman): netman.return_value = 'flatdhcpmanager' self.relation_ids.return_value = 'cloud-compute:0' + self.related_units.return_value = 'nova-cloud-controller/0' self.test_relation.set({ 'network_manager': 'FlatDHCPManager', 'ec2_host': 'novaapihost'}) @@ -116,6 +118,8 @@ class NovaComputeContextTests(CharmTestCase): @patch.object(context, '_neutron_url') @patch.object(context, '_network_manager') def test_cloud_compute_quantum_context(self, netman, url, plugin): + self.relation_ids.return_value = 'cloud-compute:0' + self.related_units.return_value = 'nova-cloud-controller/0' netman.return_value = 'quantum' plugin.return_value = 'ovs' url.return_value = 'http://nova-c-c:9696' @@ -140,33 +144,13 @@ class NovaComputeContextTests(CharmTestCase): self._save_flag_file.assert_called_with( path='/etc/nova/nm.conf', data='quantum') -# def test_quantum_plugin_context_no_setting(self): -# qplugin = context.QuantumPluginContext() -# self.assertEquals({}, qplugin()) -# -# def _test_qplugin_context(self, os_release): -# self.get_os_codename_package.return_value = os_release -# self.test_relation.set( -# {'quantum_plugin': 'ovs', 'quantum_security_groups': 'yes'}) -# qplugin = context.QuantumPluginContext() -# qplugin._ensure_packages = MagicMock() -# return qplugin() -# -# def test_quantum_plugin_context_ovs_folsom(self): -# ex_ctxt = deepcopy(BASE_QUANTUM_OVS_PLUGIN_CONTEXT) -# ex_ctxt['libvirt_vif_driver'] = ('nova.virt.libvirt.vif.' -# 'LibvirtHybridOVSBridgeDriver') -# self.assertEquals(ex_ctxt, self._test_qplugin_context('folsom')) -# self._save_flag_file.assert_called_with( -# path='/etc/nova/quantum_plugin.conf', data='ovs') -# -# def test_quantum_plugin_context_ovs_grizzly_and_beyond(self): -# ex_ctxt = deepcopy(BASE_QUANTUM_OVS_PLUGIN_CONTEXT) -# ex_ctxt['libvirt_vif_driver'] = ('nova.virt.libvirt.vif.' -# 'LibvirtGenericVIFDriver') -# self.assertEquals(ex_ctxt, self._test_qplugin_context('grizzly')) -# self._save_flag_file.assert_called_with( -# path='/etc/nova/quantum_plugin.conf', data='ovs') + @patch.object(context.NeutronComputeContext, 'network_manager') + @patch.object(context.NeutronComputeContext, 'plugin') + def test_quantum_plugin_context_no_setting(self, plugin, nm): + plugin.return_value = None + qplugin = context.NeutronComputeContext() + with patch.object(qplugin, '_ensure_packages'): + self.assertEquals({}, qplugin()) def test_libvirt_bin_context_no_migration(self): self.test_config.set('enable-live-migration', False) @@ -178,27 +162,3 @@ class NovaComputeContextTests(CharmTestCase): libvirt = context.NovaComputeLibvirtContext() self.assertEquals( {'libvirtd_opts': '-d -l', 'listen_tls': 1}, libvirt()) - -# def test_config_flag_context_none_set_in_config(self): -# flags = context.OSConfigFlagContext() -# self.assertEquals({}, flags()) -# -# def test_conflig_flag_context(self): -# self.test_config.set('config-flags', 'one=two,three=four,five=six') -# flags = context.OSConfigFlagContext() -# ex = { -# 'user_config_flags': { -# 'one': 'two', 'three': 'four', 'five': 'six' -# } -# } -# self.assertEquals(ex, flags()) -# -# def test_conflig_flag_context_filters_bad_input(self): -# self.test_config.set('config-flags', 'one=two,threefour,five=six') -# flags = context.OSConfigFlagContext() -# ex = { -# 'user_config_flags': { -# 'one': 'two', 'five': 'six' -# } -# } -# self.assertEquals(ex, flags()) From 8f704690d8dd54d5175aa4268b03bbf0af6a202b Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 22 Aug 2013 21:33:58 -0700 Subject: [PATCH 46/84] Remove breakpoint. --- hooks/nova_compute_context.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index a2e8811f..20fe5288 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -342,7 +342,6 @@ class NeutronComputeContext(context.NeutronContext): # In addition to generating config context, ensure the OVS service # is running and the OVS bridge exists. Also need to ensure # local_ip points to actual IP, not hostname. - from pprint import pprint; import ipdb; ipdb.set_trace() ############################## Breakpoint ############################## ovs_ctxt = super(NeutronComputeContext, self).ovs_ctxt() if not ovs_ctxt: return {} From ab83aabf936c4873f3f3d0f1c30e1e3247b25da7 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 22 Aug 2013 21:37:11 -0700 Subject: [PATCH 47/84] Pass correct parameter to related_units(). --- hooks/nova_compute_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 20fe5288..0c6eb2dd 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -51,7 +51,7 @@ def _neutron_security_groups(): instructed us to use neutron security groups. ''' for rid in relation_ids('cloud-compute'): - for unit in related_units('cloud-compute'): + for unit in related_units(unit): groups = [ relation_get('neutron_security_groups', rid=rid, unit=unit), From afe8a345b176bea48ebfcde34673fda78f0c5e69 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 22 Aug 2013 21:48:32 -0700 Subject: [PATCH 48/84] Pass correct parameter to related_units(). --- hooks/nova_compute_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 0c6eb2dd..8dcd84be 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -51,7 +51,7 @@ def _neutron_security_groups(): instructed us to use neutron security groups. ''' for rid in relation_ids('cloud-compute'): - for unit in related_units(unit): + for unit in related_units(rid): groups = [ relation_get('neutron_security_groups', rid=rid, unit=unit), From 1cf8730e9cc60ad2a9efe4b92243c6336a56e782 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 22 Aug 2013 22:13:15 -0700 Subject: [PATCH 49/84] Ensure libvirt does not listen_tls by default. --- hooks/nova_compute_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 8dcd84be..96c9ae37 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -87,7 +87,7 @@ class NovaComputeLibvirtContext(context.OSContextGenerator): # /etc/default/libvirt-bin 'libvirtd_opts': '-d', # /etc/libvirt/libvirtd.conf ( - 'listen_tls': 1, + 'listen_tls': 0, } # enable tcp listening if configured for live migration. From 0df05fa1a488f8b537247e7423a3fea958382cbc Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 23 Aug 2013 13:21:59 -0700 Subject: [PATCH 50/84] Rely on ceph context to create /etc/ceph. --- hooks/nova_compute_hooks.py | 2 -- unit_tests/test_nova_compute_contexts.py | 4 ++-- unit_tests/test_nova_compute_hooks.py | 9 ++------- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index f507eb52..58cb6db4 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -157,8 +157,6 @@ def compute_changed(): @hooks.hook('ceph-relation-joined') @restart_on_change(restart_map()) def ceph_joined(): - if not os.path.isdir('/etc/ceph'): - os.mkdir('/etc/ceph') apt_install(filter_installed_packages(['ceph-common']), fatal=True) diff --git a/unit_tests/test_nova_compute_contexts.py b/unit_tests/test_nova_compute_contexts.py index 772018a1..2f65f580 100644 --- a/unit_tests/test_nova_compute_contexts.py +++ b/unit_tests/test_nova_compute_contexts.py @@ -155,10 +155,10 @@ class NovaComputeContextTests(CharmTestCase): def test_libvirt_bin_context_no_migration(self): self.test_config.set('enable-live-migration', False) libvirt = context.NovaComputeLibvirtContext() - self.assertEquals({'libvirtd_opts': '-d', 'listen_tls': 1}, libvirt()) + self.assertEquals({'libvirtd_opts': '-d', 'listen_tls': 0}, libvirt()) def test_libvirt_bin_context_migration_tcp_listen(self): self.test_config.set('enable-live-migration', True) libvirt = context.NovaComputeLibvirtContext() self.assertEquals( - {'libvirtd_opts': '-d -l', 'listen_tls': 1}, libvirt()) + {'libvirtd_opts': '-d -l', 'listen_tls': 0}, libvirt()) diff --git a/unit_tests/test_nova_compute_hooks.py b/unit_tests/test_nova_compute_hooks.py index 3f480a81..4a452b5e 100644 --- a/unit_tests/test_nova_compute_hooks.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -153,8 +153,7 @@ class NovaComputeRelationsTests(CharmTestCase): call(neutron_database='neutron', neutron_username='neutron', neutron_hostname='nova.foohost.com', - relation_id='shared-db:0'), -] + relation_id='shared-db:0')] [self.assertIn(c, self.relation_set.call_args_list) for c in calls] self.unit_get.assert_called_with('private-address') @@ -237,12 +236,8 @@ class NovaComputeRelationsTests(CharmTestCase): for func in expected_funcs: self.assertTrue(func.called) - @patch('os.mkdir') - @patch('os.path.isdir') - def test_ceph_joined(self, isdir, mkdir): - isdir.return_value = False + def test_ceph_joined(self): hooks.ceph_joined() - mkdir.assert_called_with('/etc/ceph') self.apt_install.assert_called_with(['ceph-common'], fatal=True) @patch.object(hooks, 'CONFIGS') From 063a2270bd8c6307a86a645b1326b3571f54ab47 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 23 Aug 2013 13:22:44 -0700 Subject: [PATCH 51/84] Sync helpers. --- .../charmhelpers/contrib/openstack/context.py | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index b2d40854..d7af19a7 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -48,6 +48,13 @@ class OSContextError(Exception): pass +def ensure_packages(packages): + '''Install but do not upgrade required plugin packages''' + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + def context_complete(ctxt): _missing = [] for k, v in ctxt.iteritems(): @@ -178,6 +185,8 @@ class CephContext(OSContextGenerator): def __call__(self): '''This generates context for /etc/ceph/ceph.conf templates''' + if not relation_ids('ceph'): + return {} log('Generating tmeplate context for ceph') mon_hosts = [] auth = None @@ -193,6 +202,12 @@ class CephContext(OSContextGenerator): } if not context_complete(ctxt): return {} + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + ensure_packages(['ceph-common']) + return ctxt @@ -341,10 +356,7 @@ class NeutronContext(object): return None def _ensure_packages(self): - '''Install but do not upgrade required plugin packages''' - required = filter_installed_packages(self.packages) - if required: - apt_install(required, fatal=True) + ensure_packages(self.packages) def _save_flag_file(self): if self.network_manager == 'quantum': From 80e9ba695ad0636a76a0351662179e6a0f23fb1d Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Fri, 23 Aug 2013 14:21:20 -0700 Subject: [PATCH 52/84] Subscribe neutron agent to main neutron/quantum config for restart_map. --- hooks/nova_compute_utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index c83ebef5..27ee746a 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -121,9 +121,10 @@ def resource_map(): # depending on the plugin used. if net_manager in ['neutron', 'quantum']: if net_manager == 'quantum': - resource_map.update(QUANTUM_RESOURCES) + nm_rsc = QUANTUM_RESOURCES if net_manager == 'neutron': - resource_map.update(NEUTRON_RESOURCES) + nm_rsc = NEUTRON_RESOURCES + resource_map.update(nm_rsc) resource_map['/etc/nova/nova.conf']['contexts'].append( NeutronComputeContext()) @@ -139,6 +140,9 @@ def resource_map(): resource_map[conf]['contexts'] = ctxts resource_map[conf]['contexts'].append(NeutronComputeContext()) + # associate the plugin agent with main network manager config(s) + [resource_map[nmc]['services'].extend(svcs) for nmc in nm_rsc] + if relation_ids('ceph'): resource_map.update(CEPH_RESOURCES) From e3d0c15e6a5ddea4f0933fb14e8fad1327c68f66 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 3 Sep 2013 15:32:18 -0700 Subject: [PATCH 53/84] Ensure libvirt secret set during NovaComputeCephContext()> --- hooks/charmhelpers/contrib/openstack/context.py | 3 +++ hooks/nova_compute_context.py | 14 ++++++++++++++ unit_tests/test_nova_compute_contexts.py | 2 +- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index d7af19a7..4c2e009b 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -190,15 +190,18 @@ class CephContext(OSContextGenerator): log('Generating tmeplate context for ceph') mon_hosts = [] auth = None + key = None for rid in relation_ids('ceph'): for unit in related_units(rid): mon_hosts.append(relation_get('private-address', rid=rid, unit=unit)) auth = relation_get('auth', rid=rid, unit=unit) + key = relation_get('key', rid=rid, unit=unit) ctxt = { 'mon_hosts': ' '.join(mon_hosts), 'auth': auth, + 'key': key, } if not context_complete(ctxt): return {} diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 96c9ae37..2a93c191 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -108,6 +108,14 @@ class NovaComputeVirtContext(context.OSContextGenerator): class NovaComputeCephContext(context.CephContext): + def libvirt_ceph(self, key): + # create secret for libvirt usage. + cmd = ['virsh', 'secret-define', '--file', '/etc/ceph/secret.xml'] + check_call(cmd) + cmd = ['virsh', 'secret-set-value', '--secret', CEPH_SECRET_UUID, + '--base64', key] + check_call(cmd) + def __call__(self): ctxt = super(NovaComputeCephContext, self).__call__() if not ctxt: @@ -120,6 +128,12 @@ class NovaComputeCephContext(context.CephContext): ctxt['rbd_user'] = svc ctxt['rbd_secret_uuid'] = CEPH_SECRET_UUID ctxt['rbd_pool'] = 'nova' + + # Ensure required hypervisor-specific config. + # Current supported libvirt flavors. Extend? + if config('virt-type') in ['kvm', 'qemu', 'lxc']: + self.libvit_ceph(ctxt['key']) + return ctxt diff --git a/unit_tests/test_nova_compute_contexts.py b/unit_tests/test_nova_compute_contexts.py index 2f65f580..4b5667da 100644 --- a/unit_tests/test_nova_compute_contexts.py +++ b/unit_tests/test_nova_compute_contexts.py @@ -1,4 +1,4 @@ -from mock import patch +from mock import patch from unit_tests.test_utils import CharmTestCase from charmhelpers.contrib.openstack.context import OSContextError From bb0d741f3f50f82a13ab5e85525ac5e5cfb166ba Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 3 Sep 2013 15:51:13 -0700 Subject: [PATCH 54/84] Only define libvirt secret if we have the ceph secret file. --- hooks/nova_compute_context.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 2a93c191..70d423bc 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -1,3 +1,4 @@ +import os import socket from subprocess import check_call, check_output @@ -109,6 +110,8 @@ class NovaComputeVirtContext(context.OSContextGenerator): class NovaComputeCephContext(context.CephContext): def libvirt_ceph(self, key): + if not os.path.isfile('/etc/ceph/secret.xml'): + return # create secret for libvirt usage. cmd = ['virsh', 'secret-define', '--file', '/etc/ceph/secret.xml'] check_call(cmd) @@ -132,7 +135,7 @@ class NovaComputeCephContext(context.CephContext): # Ensure required hypervisor-specific config. # Current supported libvirt flavors. Extend? if config('virt-type') in ['kvm', 'qemu', 'lxc']: - self.libvit_ceph(ctxt['key']) + self.libvirt_ceph(ctxt['key']) return ctxt From 6577d259deb4f807d12a4cee61eb2188019c90a0 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 3 Sep 2013 17:16:15 -0700 Subject: [PATCH 55/84] Move creation of libvirt secret out of context and into a utility helper. --- hooks/charmhelpers/contrib/openstack/context.py | 7 ++++--- hooks/nova_compute_context.py | 16 ---------------- hooks/nova_compute_hooks.py | 12 +++++++++++- hooks/nova_compute_utils.py | 14 ++++++++++++++ unit_tests/test_nova_compute_hooks.py | 2 ++ 5 files changed, 31 insertions(+), 20 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 4c2e009b..5c5df245 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -187,7 +187,7 @@ class CephContext(OSContextGenerator): '''This generates context for /etc/ceph/ceph.conf templates''' if not relation_ids('ceph'): return {} - log('Generating tmeplate context for ceph') + log('Generating template context for ceph') mon_hosts = [] auth = None key = None @@ -203,12 +203,13 @@ class CephContext(OSContextGenerator): 'auth': auth, 'key': key, } - if not context_complete(ctxt): - return {} if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') + if not context_complete(ctxt): + return {} + ensure_packages(['ceph-common']) return ctxt diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 70d423bc..a42b69e9 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -1,4 +1,3 @@ -import os import socket from subprocess import check_call, check_output @@ -109,16 +108,6 @@ class NovaComputeVirtContext(context.OSContextGenerator): class NovaComputeCephContext(context.CephContext): - def libvirt_ceph(self, key): - if not os.path.isfile('/etc/ceph/secret.xml'): - return - # create secret for libvirt usage. - cmd = ['virsh', 'secret-define', '--file', '/etc/ceph/secret.xml'] - check_call(cmd) - cmd = ['virsh', 'secret-set-value', '--secret', CEPH_SECRET_UUID, - '--base64', key] - check_call(cmd) - def __call__(self): ctxt = super(NovaComputeCephContext, self).__call__() if not ctxt: @@ -132,11 +121,6 @@ class NovaComputeCephContext(context.CephContext): ctxt['rbd_secret_uuid'] = CEPH_SECRET_UUID ctxt['rbd_pool'] = 'nova' - # Ensure required hypervisor-specific config. - # Current supported libvirt flavors. Extend? - if config('virt-type') in ['kvm', 'qemu', 'lxc']: - self.libvirt_ceph(ctxt['key']) - return ctxt diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 58cb6db4..1b87b85a 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -1,6 +1,5 @@ #!/usr/bin/python -import os import sys from charmhelpers.core.hookenv import ( @@ -8,6 +7,7 @@ from charmhelpers.core.hookenv import ( config, log, relation_ids, + relation_get, relation_set, service_name, unit_get, @@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute from nova_compute_utils import ( + create_libvirt_secret, determine_packages, import_authorized_keys, import_keystone_ca_cert, @@ -42,6 +43,8 @@ from nova_compute_utils import ( register_configs, ) +from nova_compute_context import CEPH_SECRET_UUID + from misc_utils import ( ensure_ceph_keyring, ) @@ -174,6 +177,13 @@ def ceph_changed(): CONFIGS.write('/etc/ceph/secret.xml') CONFIGS.write('/etc/nova/nova.conf') + # With some refactoring, this can move into NovaComputeCephContext + # and allow easily extended to support other compute flavors. + if config('virt-type') in ['kvm', 'qemu', 'lxc']: + create_libvirt_secret(secret_file='/etc/ceph/secret.xml', + secret_uuid=CEPH_SECRET_UUID, + key=relation_get('key')) + @hooks.hook('amqp-relation-broken', 'ceph-relation-broken', diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 27ee746a..a6323a5d 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -13,6 +13,7 @@ from charmhelpers.core.hookenv import ( related_units, relation_ids, relation_get, + DEBUG, ) from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute @@ -349,3 +350,16 @@ def import_keystone_ca_cert(): with open(CA_CERT_PATH, 'wb') as out: out.write(b64decode(ca_cert)) check_call(['update-ca-certificates']) + + +def create_libvirt_secret(secret_file, secret_uuid, key): + if secret_uuid in check_output(['virsh', 'secret-list']): + log('Libvirt secret already exists for uuid %s.' % secret_uuid, + level=DEBUG) + return + log('Defining new libvirt secret for uuid %s.' % secret_uuid) + cmd = ['virsh', 'secret-define', '--file', '/etc/ceph/secret.xml'] + check_call(cmd) + cmd = ['virsh', 'secret-set-value', '--secret', secret_uuid, + '--base64', key] + check_call(cmd) diff --git a/unit_tests/test_nova_compute_hooks.py b/unit_tests/test_nova_compute_hooks.py index 4a452b5e..03c44054 100644 --- a/unit_tests/test_nova_compute_hooks.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -20,6 +20,7 @@ TO_PATCH = [ 'Hooks', 'config', 'log', + 'relation_get', 'relation_ids', 'relation_set', 'service_name', @@ -34,6 +35,7 @@ TO_PATCH = [ 'openstack_upgrade_available', # nova_compute_utils #'PACKAGES', + 'create_libvirt_secret', 'restart_map', 'determine_packages', 'import_authorized_keys', From 8167f2405b56e8f0c8e938ec2476655a8bfbf384 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 5 Sep 2013 16:40:47 -0700 Subject: [PATCH 56/84] Move get_host_ip() to helpers for use in nova-c-c, too. --- hooks/charmhelpers/contrib/openstack/utils.py | 33 ++++++++++++++++++- hooks/nova_compute_context.py | 27 ++------------- 2 files changed, 34 insertions(+), 26 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 677fa1dd..a57efdaa 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,12 +1,12 @@ #!/usr/bin/python # Common python helper functions used for OpenStack charms. - from collections import OrderedDict import apt_pkg as apt import subprocess import os +import socket import sys from charmhelpers.core.hookenv import ( @@ -290,3 +290,34 @@ def openstack_upgrade_available(package): available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def get_host_ip(hostname): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + try: + import dns.resolver.query + except ImportError: + apt_install('python-dnspython') + import dns.resolver.query + + if is_ip(hostname): + return hostname + answers = dns.resolver.query(hostname, 'A') + if answers: + return answers[0].address + return None diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index a42b69e9..e7d2f941 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -1,4 +1,3 @@ -import socket from subprocess import check_call, check_output @@ -18,7 +17,7 @@ from charmhelpers.core.hookenv import ( ERROR, ) -from charmhelpers.contrib.openstack.utils import os_release +from charmhelpers.contrib.openstack.utils import get_host_ip, os_release # This is just a label and it must be consistent across @@ -290,28 +289,6 @@ class CloudComputeContext(context.OSContextGenerator): return ctxt -def get_host_ip(): - # we used to have a charm-helper to do this, but its disappeared? - # taken from quantum-gateway - - try: - import dns.resolver - except ImportError: - apt_install('python-dnspython') - import dns.resolver - - hostname = unit_get('private-address') - try: - # Test to see if already an IPv4 address - socket.inet_aton(hostname) - return hostname - except socket.error: - answers = dns.resolver.query(hostname, 'A') - if answers: - return answers[0].address - return None - - class NeutronComputeContext(context.NeutronContext): interfaces = [] @@ -349,5 +326,5 @@ class NeutronComputeContext(context.NeutronContext): self._ensure_bridge() - ovs_ctxt['local_ip'] = get_host_ip() + ovs_ctxt['local_ip'] = get_host_ip(unit_get('private-address')) return ovs_ctxt From 56fc9641f8b5ca6637e129bb92f043fe60affdc9 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 5 Sep 2013 17:00:45 -0700 Subject: [PATCH 57/84] Sync helpers. --- hooks/charmhelpers/contrib/openstack/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index a57efdaa..84f69d7f 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -310,10 +310,10 @@ def get_host_ip(hostname): the input if it is already an IP. """ try: - import dns.resolver.query + import dns.resolver except ImportError: apt_install('python-dnspython') - import dns.resolver.query + import dns.resolver if is_ip(hostname): return hostname From d2f70bc574951a2e4072fca5a13c1a21c6657129 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 5 Sep 2013 18:40:08 -0700 Subject: [PATCH 58/84] Sync helpers. --- hooks/charmhelpers/contrib/openstack/utils.py | 53 +++++++++++++++---- 1 file changed, 44 insertions(+), 9 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 84f69d7f..de52fe77 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -304,20 +304,55 @@ def is_ip(address): return False -def get_host_ip(hostname): - """ - Resolves the IP for a given hostname, or returns - the input if it is already an IP. - """ +def ns_query(address): try: import dns.resolver except ImportError: apt_install('python-dnspython') import dns.resolver + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, basestring): + rtype = 'A' + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ if is_ip(hostname): return hostname - answers = dns.resolver.query(hostname, 'A') - if answers: - return answers[0].address - return None + + return ns_query(hostname) + + +def get_hostname(address): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if not is_ip(address): + return address + + try: + import dns.reversename + except ImportError: + apt_install('python-dnspython') + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + + # strip trailing . + if result.endswith('.'): + return result[:-1] + return result From 907574bcb77120563e7deb25bb17fdec1366f61c Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 5 Sep 2013 19:11:31 -0700 Subject: [PATCH 59/84] Subscbe nova.conf to libvirt context, set migration URI for ssh migration accordingly. --- hooks/nova_compute_context.py | 6 +++++- hooks/nova_compute_utils.py | 1 + templates/folsom/nova.conf | 4 ++++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index e7d2f941..1317db81 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -75,7 +75,7 @@ def _neutron_url(rid, unit): class NovaComputeLibvirtContext(context.OSContextGenerator): ''' - Determines various libvirt options depending on live migration + Determines various libvirt and nova options depending on live migration configuration. ''' interfaces = [] @@ -96,6 +96,10 @@ class NovaComputeLibvirtContext(context.OSContextGenerator): if config('migration-auth-type') in ['none', 'None', 'ssh']: ctxt['listen_tls'] = 0 + if config('migration-auth-type') == 'ssh': + # nova.conf + ctxt['libvirt_migration_uri'] = 'qemu+ssh://%s/system' + return ctxt diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index a6323a5d..e969a1e3 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -61,6 +61,7 @@ BASE_RESOURCE_MAP = { context.ImageServiceContext(), context.OSConfigFlagContext(), CloudComputeContext(), + NovaComputeLibvirtContext(), NovaComputeCephContext()], }, } diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index 5d3fdb53..40cba99c 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -71,3 +71,7 @@ volume_api_class=nova.volume.cinder.API {{ key }} = {{ value }} {% endfor -%} {% endif -%} + +{% if libvirt_migration_uri -%} +libvirt_migration_uri = {{ libvirt_migration_uri }} +{% endif -%} From 72af3042f3c6722d36e46f4446f40dc5f7eeea42 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 5 Sep 2013 19:18:46 -0700 Subject: [PATCH 60/84] Use correct flag: libvirt_migration_uri -> live_migration_uri. --- hooks/nova_compute_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 1317db81..ae6517f7 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -98,7 +98,7 @@ class NovaComputeLibvirtContext(context.OSContextGenerator): if config('migration-auth-type') == 'ssh': # nova.conf - ctxt['libvirt_migration_uri'] = 'qemu+ssh://%s/system' + ctxt['live_migration_uri'] = 'qemu+ssh://%s/system' return ctxt From 428bf6b09e00e11993e3a7bb5ba77604aa373523 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 5 Sep 2013 20:00:55 -0700 Subject: [PATCH 61/84] Update template with correct live_migration_uri flag. --- templates/folsom/nova.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index 40cba99c..50195202 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -72,6 +72,6 @@ volume_api_class=nova.volume.cinder.API {% endfor -%} {% endif -%} -{% if libvirt_migration_uri -%} -libvirt_migration_uri = {{ libvirt_migration_uri }} +{% if live_migration_uri -%} +live_migration_uri = {{ live_migration_uri }} {% endif -%} From 437f362af7420a706705a70ced748110a29cc41c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 20 Sep 2013 17:40:54 +0100 Subject: [PATCH 62/84] Resync charm-helpers --- Makefile | 2 +- charm-helpers.yaml | 1 + hooks/charmhelpers/contrib/hahelpers/ceph.py | 5 +- .../charmhelpers/contrib/openstack/context.py | 38 ++++++++----- .../contrib/openstack/templating.py | 2 +- hooks/charmhelpers/contrib/openstack/utils.py | 5 +- hooks/charmhelpers/core/host.py | 56 +++---------------- hooks/nova_compute_context.py | 4 +- hooks/nova_compute_hooks.py | 5 +- hooks/nova_compute_utils.py | 2 +- 10 files changed, 49 insertions(+), 71 deletions(-) diff --git a/Makefile b/Makefile index 2e2b2db6..d91e4b49 100644 --- a/Makefile +++ b/Makefile @@ -11,4 +11,4 @@ test: @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests sync: - @charm-helper-sync -c charm-helpers-sync.yaml + @charm-helper-sync -c charm-helpers.yaml diff --git a/charm-helpers.yaml b/charm-helpers.yaml index ddc1575c..cbfcfba0 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -2,6 +2,7 @@ branch: lp:~openstack-charmers/charm-helpers/to_upstream destination: hooks/charmhelpers include: - core + - fetch - contrib.openstack|inc=* - contrib.storage - contrib.hahelpers: diff --git a/hooks/charmhelpers/contrib/hahelpers/ceph.py b/hooks/charmhelpers/contrib/hahelpers/ceph.py index 8ff029a4..6e085c06 100644 --- a/hooks/charmhelpers/contrib/hahelpers/ceph.py +++ b/hooks/charmhelpers/contrib/hahelpers/ceph.py @@ -28,8 +28,11 @@ from charmhelpers.core.hookenv import ( ERROR ) -from charmhelpers.core.host import ( +from charmhelpers.fetch import ( apt_install, +) + +from charmhelpers.core.host import ( mount, mounts, service_start, diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 5c5df245..92924e34 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -7,7 +7,7 @@ from subprocess import ( ) -from charmhelpers.core.host import ( +from charmhelpers.fetch import ( apt_install, filter_installed_packages, ) @@ -110,9 +110,9 @@ class SharedDBContext(OSContextGenerator): 'database_user': self.user, 'database_password': passwd, } - if not context_complete(ctxt): - return {} - return ctxt + if context_complete(ctxt): + return ctxt + return {} class IdentityServiceContext(OSContextGenerator): @@ -141,9 +141,9 @@ class IdentityServiceContext(OSContextGenerator): 'service_protocol': 'http', 'auth_protocol': 'http', } - if not context_complete(ctxt): - return {} - return ctxt + if context_complete(ctxt): + return ctxt + return {} class AMQPContext(OSContextGenerator): @@ -164,20 +164,30 @@ class AMQPContext(OSContextGenerator): for rid in relation_ids('amqp'): for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): - rabbitmq_host = relation_get('vip', rid=rid, unit=unit) + ctxt['clustered'] = True + ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, + unit=unit) else: - rabbitmq_host = relation_get('private-address', - rid=rid, unit=unit) - ctxt = { - 'rabbitmq_host': rabbitmq_host, + ctxt['rabbitmq_host'] = relation_get('private-address', + rid=rid, unit=unit) + ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, unit=unit), 'rabbitmq_virtual_host': vhost, - } + }) + if context_complete(ctxt): + # Sufficient information found = break out! + break + # Used for active/active rabbitmq >= grizzly + ctxt['rabbitmq_hosts'] = [] + for unit in related_units(rid): + ctxt['rabbitmq_hosts'].append(relation_get('private-address', + rid=rid, unit=unit)) if not context_complete(ctxt): return {} - return ctxt + else: + return ctxt class CephContext(OSContextGenerator): diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 0b534433..4595778c 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,6 +1,6 @@ import os -from charmhelpers.core.host import apt_install +from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( log, diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index de52fe77..39f627df 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -17,6 +17,9 @@ from charmhelpers.core.hookenv import ( from charmhelpers.core.host import ( lsb_release, +) + +from charmhelpers.fetch import ( apt_install, ) @@ -130,7 +133,7 @@ def get_os_codename_package(package, fatal=True): e = 'Could not determine version of uninstalled package: %s' % package error_out(e) - vers = apt.UpstreamVersion(pkg.current_ver.ver_str) + vers = apt.upstream_version(pkg.current_ver.ver_str) try: if 'swift' in pkg.name: diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 4426d009..1a63bf89 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -5,7 +5,6 @@ # Nick Moffitt # Matthew Wedgwood -import apt_pkg import os import pwd import grp @@ -20,20 +19,22 @@ from hookenv import log def service_start(service_name): - service('start', service_name) + return service('start', service_name) def service_stop(service_name): - service('stop', service_name) + return service('stop', service_name) def service_restart(service_name): - service('restart', service_name) + return service('restart', service_name) def service_reload(service_name, restart_on_failure=False): - if not service('reload', service_name) and restart_on_failure: - service('restart', service_name) + service_result = service('reload', service_name) + if not service_result and restart_on_failure: + service_result = service('restart', service_name) + return service_result def service(action, service_name): @@ -136,49 +137,6 @@ def write_file(path, content, owner='root', group='root', perms=0444): target.write(content) -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - apt_pkg.init() - cache = apt_pkg.Cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - options = options or [] - cmd = ['apt-get', '-y'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, basestring): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) - - def mount(device, mountpoint, options=None, persist=False): '''Mount a filesystem''' cmd_args = ['mount'] diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index ae6517f7..8bd2d25b 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -3,8 +3,8 @@ from subprocess import check_call, check_output from charmhelpers.contrib.openstack import context -from charmhelpers.core.host import ( - apt_install, filter_installed_packages, service_running, service_start) +from charmhelpers.core.host import service_running, service_start +from charmhelpers.fetch import apt_install, filter_installed_packages from charmhelpers.core.hookenv import ( config, diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 1b87b85a..9118b727 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -15,10 +15,13 @@ from charmhelpers.core.hookenv import ( ) from charmhelpers.core.host import ( + restart_on_change, +) + +from charmhelpers.fetch import ( apt_install, apt_update, filter_installed_packages, - restart_on_change, ) from charmhelpers.contrib.openstack.utils import ( diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index e969a1e3..a79e6eee 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -5,7 +5,7 @@ from base64 import b64decode from copy import deepcopy from subprocess import check_call, check_output -from charmhelpers.core.host import apt_update, apt_install +from charmhelpers.fetch import apt_update, apt_install from charmhelpers.core.hookenv import ( config, From 85b0472eb806c79e63237a4d9d35b0ff9bf9519c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 20 Sep 2013 17:50:33 +0100 Subject: [PATCH 63/84] Use constants for configuration file names --- hooks/nova_compute_hooks.py | 21 ++++++++++++--------- hooks/nova_compute_utils.py | 34 ++++++++++++++++++++++------------ 2 files changed, 34 insertions(+), 21 deletions(-) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 9118b727..3dbcc719 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -44,6 +44,9 @@ from nova_compute_utils import ( public_ssh_key, restart_map, register_configs, + NOVA_CONF, + QUANTUM_CONF, NEUTRON_CONF, + CEPH_CONF, CEPH_SECRET ) from nova_compute_context import CEPH_SECRET_UUID @@ -90,11 +93,11 @@ def amqp_changed(): if 'amqp' not in CONFIGS.complete_contexts(): log('amqp relation incomplete. Peer not ready?') return - CONFIGS.write('/etc/nova/nova.conf') + CONFIGS.write(NOVA_CONF) if network_manager() == 'quantum': - CONFIGS.write('/etc/quantum/quantum.conf') + CONFIGS.write(QUANTUM_CONF) if network_manager() == 'neutron': - CONFIGS.write('/etc/neutron/neutron.conf') + CONFIGS.write(NEUTRON_CONF) @hooks.hook('shared-db-relation-joined') @@ -117,7 +120,7 @@ def db_changed(): if 'shared-db' not in CONFIGS.complete_contexts(): log('shared-db relation incomplete. Peer not ready?') return - CONFIGS.write('/etc/nova/nova.conf') + CONFIGS.write(NOVA_CONF) nm = network_manager() if nm in ['quantum', 'neutron']: plugin = neutron_plugin() @@ -130,7 +133,7 @@ def image_service_changed(): if 'image-service' not in CONFIGS.complete_contexts(): log('image-service relation incomplete. Peer not ready?') return - CONFIGS.write('/etc/nova/nova.conf') + CONFIGS.write(NOVA_CONF) @hooks.hook('cloud-compute-relation-joined') @@ -176,14 +179,14 @@ def ceph_changed(): if not ensure_ceph_keyring(service=svc): log('Could not create ceph keyring: peer not ready?') return - CONFIGS.write('/etc/ceph/ceph.conf') - CONFIGS.write('/etc/ceph/secret.xml') - CONFIGS.write('/etc/nova/nova.conf') + CONFIGS.write(CEPH_CONF) + CONFIGS.write(CEPH_SECRET) + CONFIGS.write(NOVA_CONF) # With some refactoring, this can move into NovaComputeCephContext # and allow easily extended to support other compute flavors. if config('virt-type') in ['kvm', 'qemu', 'lxc']: - create_libvirt_secret(secret_file='/etc/ceph/secret.xml', + create_libvirt_secret(secret_file=CEPH_SECRET, secret_uuid=CEPH_SECRET_UUID, key=relation_get('key')) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index a79e6eee..adde4a5d 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -41,20 +41,25 @@ BASE_PACKAGES = [ 'genisoimage', # was missing as a package dependency until raring. ] +QEMU_CONF = '/etc/libvirt/qemu.conf' +LIBVIRTD_CONF = '/etc/libvirt/libvirtd.conf' +LIBVIRT_BIN = '/etc/default/libvirt-bin' +NOVA_CONF = '/etc/nova/nova.conf' + BASE_RESOURCE_MAP = { - '/etc/libvirt/qemu.conf': { + QEMU_CONF: { 'services': ['libvirt-bin'], 'contexts': [], }, - '/etc/libvirt/libvirtd.conf': { + LIBVIRTD_CONF: { 'services': ['libvirt-bin'], 'contexts': [NovaComputeLibvirtContext()], }, - '/etc/default/libvirt-bin': { + LIBVIRT_BIN: { 'services': ['libvirt-bin'], 'contexts': [NovaComputeLibvirtContext()], }, - '/etc/nova/nova.conf': { + NOVA_CONF: { 'services': ['nova-compute'], 'contexts': [context.AMQPContext(), context.SharedDBContext(relation_prefix='nova'), @@ -66,26 +71,33 @@ BASE_RESOURCE_MAP = { }, } +CEPH_CONF = '/etc/ceph/ceph.conf' +CEPH_SECRET = '/etc/ceph/secret.xml' + CEPH_RESOURCES = { - '/etc/ceph/ceph.conf': { + CEPH_CONF: { 'contexts': [NovaComputeCephContext()], 'services': [], }, - '/etc/ceph/secret.xml': { + CEPH_SECRET: { 'contexts': [NovaComputeCephContext()], 'services': [], } } +QUANTUM_CONF = '/etc/quantum/quantum.conf' + QUANTUM_RESOURCES = { - '/etc/quantum/quantum.conf': { + QUANTUM_CONF: { 'services': [], 'contexts': [context.AMQPContext()], } } +NEUTRON_CONF = '/etc/neutron/neutron.conf' + NEUTRON_RESOURCES = { - '/etc/neutron/neutron.conf': { + NEUTRON_CONF: { 'services': [], 'contexts': [context.AMQPContext()], } @@ -128,8 +140,7 @@ def resource_map(): nm_rsc = NEUTRON_RESOURCES resource_map.update(nm_rsc) - resource_map['/etc/nova/nova.conf']['contexts'].append( - NeutronComputeContext()) + resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext()) plugin = neutron_plugin() if plugin: @@ -337,7 +348,6 @@ def do_openstack_upgrade(configs): # set CONFIGS to load templates from new release and regenerate config configs.set_release(openstack_release=new_os_rel) configs.write_all() - pass def import_keystone_ca_cert(): @@ -359,7 +369,7 @@ def create_libvirt_secret(secret_file, secret_uuid, key): level=DEBUG) return log('Defining new libvirt secret for uuid %s.' % secret_uuid) - cmd = ['virsh', 'secret-define', '--file', '/etc/ceph/secret.xml'] + cmd = ['virsh', 'secret-define', '--file', secret_file] check_call(cmd) cmd = ['virsh', 'secret-set-value', '--secret', secret_uuid, '--base64', key] From 557a665bdc6bb7f3202b96346e51b2bb2daf4cce Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 20 Sep 2013 17:51:57 +0100 Subject: [PATCH 64/84] Add pydev files --- .project | 17 +++++++++++++++++ .pydevproject | 8 ++++++++ 2 files changed, 25 insertions(+) create mode 100644 .project create mode 100644 .pydevproject diff --git a/.project b/.project new file mode 100644 index 00000000..0b03826e --- /dev/null +++ b/.project @@ -0,0 +1,17 @@ + + + nova-compute + + + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/.pydevproject b/.pydevproject new file mode 100644 index 00000000..129680d7 --- /dev/null +++ b/.pydevproject @@ -0,0 +1,8 @@ + + +python 2.7 +Default + +/nova-compute + + From b337c3eb93ca08e13dff9c0b932d1b79a5cf286b Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 23 Sep 2013 14:23:51 +0100 Subject: [PATCH 65/84] Add missing fetch helper --- .pydevproject | 3 +- hooks/charmhelpers/fetch/__init__.py | 209 +++++++++++++++++++++++++ hooks/charmhelpers/fetch/archiveurl.py | 48 ++++++ hooks/charmhelpers/fetch/bzrurl.py | 49 ++++++ 4 files changed, 308 insertions(+), 1 deletion(-) create mode 100644 hooks/charmhelpers/fetch/__init__.py create mode 100644 hooks/charmhelpers/fetch/archiveurl.py create mode 100644 hooks/charmhelpers/fetch/bzrurl.py diff --git a/.pydevproject b/.pydevproject index 129680d7..a69fbde8 100644 --- a/.pydevproject +++ b/.pydevproject @@ -3,6 +3,7 @@ python 2.7 Default -/nova-compute +/nova-compute/hooks +/nova-compute/unit_tests diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py new file mode 100644 index 00000000..b2f96467 --- /dev/null +++ b/hooks/charmhelpers/fetch/__init__.py @@ -0,0 +1,209 @@ +import importlib +from yaml import safe_load +from charmhelpers.core.host import ( + lsb_release +) +from urlparse import ( + urlparse, + urlunparse, +) +import subprocess +from charmhelpers.core.hookenv import ( + config, + log, +) +import apt_pkg + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages""" + cmd = ['apt-get', '-y', 'purge'] + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def add_source(source, key=None): + if ((source.startswith('ppa:') or + source.startswith('http:'))): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + if key: + subprocess.check_call(['apt-key', 'import', key]) + + +class SourceConfigError(Exception): + pass + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """ + Configure multiple sources from charm configuration + + Example config: + install_sources: + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load(config(sources_var)) + keys = safe_load(config(keys_var)) + if isinstance(sources, basestring) and isinstance(keys, basestring): + add_source(sources, keys) + else: + if not len(sources) == len(keys): + msg = 'Install sources and keys lists are different lengths' + raise SourceConfigError(msg) + for src_num in range(len(sources)): + add_source(sources[src_num], keys[src_num]) + if update: + apt_update(fatal=True) + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', +) + + +class UnhandledSource(Exception): + pass + + +def install_remote(source): + """ + Install a file tree from a remote source + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules + Options supported are submodule-specific""" + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + installed_to = None + for handler in handlers: + try: + installed_to = handler.install(source) + except UnhandledSource: + pass + if not installed_to: + raise UnhandledSource("No handler found for source {}".format(source)) + return installed_to + + +def install_from_config(config_var_name): + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +class BaseFetchHandler(object): + """Base class for FetchHandler implementations in fetch plugins""" + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr(importlib.import_module(package), classname) + plugin_list.append(handler_class()) + except (ImportError, AttributeError): + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format(handler_name)) + return plugin_list diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py new file mode 100644 index 00000000..e35b8f15 --- /dev/null +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,48 @@ +import os +import urllib2 +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """Handler for archives via generic URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + # propogate all exceptions + # URLError, OSError, etc + response = urllib2.urlopen(source) + try: + with open(dest, 'w') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e + + def install(self, source): + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except urllib2.URLError as e: + raise UnhandledSource(e.reason) + except OSError as e: + raise UnhandledSource(e.strerror) + return extract(dld_file) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py new file mode 100644 index 00000000..c348b4bb --- /dev/null +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,49 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from bzrlib.branch import Branch +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-bzrlib") + from bzrlib.branch import Branch + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp'): + return False + else: + return True + + def branch(self, source, dest): + url_parts = self.parse_url(source) + # If we use lp:branchname scheme we need to load plugins + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + if url_parts.scheme == "lp": + from bzrlib.plugin import load_plugins + load_plugins() + try: + remote_branch = Branch.open(source) + remote_branch.bzrdir.sprout(dest).open_branch() + except Exception as e: + raise e + + def install(self, source): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.branch(source, dest_dir) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir + From e3b3cc19bf65c891c6dfdc585a792b21eb1be589 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 23 Sep 2013 11:49:55 -0700 Subject: [PATCH 66/84] Resync helpers, rely on charm-helpers for ensure_ceph_keyring(). --- charm-helpers.yaml | 1 - .../contrib/storage/linux/ceph.py | 336 ++++++++++++++++++ hooks/misc_utils.py | 33 -- hooks/nova_compute_hooks.py | 5 +- 4 files changed, 337 insertions(+), 38 deletions(-) create mode 100644 hooks/charmhelpers/contrib/storage/linux/ceph.py delete mode 100644 hooks/misc_utils.py diff --git a/charm-helpers.yaml b/charm-helpers.yaml index cbfcfba0..3000e7c5 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -7,5 +7,4 @@ include: - contrib.storage - contrib.hahelpers: - apache - - ceph - cluster diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 00000000..29b214b2 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,336 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import os +import shutil +import json +import time + +from subprocess import ( + check_call, + check_output, + CalledProcessError +) + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + INFO, + WARNING, + ERROR +) + +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, +) + +from charmhelpers.fetch import ( + apt_install, +) + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] + auth supported = {auth} + keyring = {keyring} + mon host = {mon_hosts} +""" + + +def install(): + ''' Basic Ceph client installation ''' + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + ''' Check to see if a RADOS block device exists ''' + try: + out = check_output(['rbd', 'list', '--id', service, + '--pool', pool]) + except CalledProcessError: + return False + else: + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + ''' Create a new RADOS block device ''' + cmd = [ + 'rbd', + 'create', + image, + '--size', + str(sizemb), + '--id', + service, + '--pool', + pool + ] + check_call(cmd) + + +def pool_exists(service, name): + ''' Check to see if a RADOS pool already exists ''' + try: + out = check_output(['rados', '--id', service, 'lspools']) + except CalledProcessError: + return False + else: + return name in out + + +def get_osds(): + ''' + Return a list of all Ceph Object Storage Daemons + currently in the cluster + ''' + return json.loads(check_output(['ceph', 'osd', 'ls', '--format=json'])) + + +def create_pool(service, name, replicas=2): + ''' Create a new RADOS pool ''' + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + # Calculate the number of placement groups based + # on upstream recommended best practices. + pgnum = (len(get_osds()) * 100 / replicas) + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'create', + name, pgnum + ] + check_call(cmd) + cmd = [ + 'ceph', '--id', service, + 'osd', 'set', name, + 'size', replicas + ] + check_call(cmd) + + +def delete_pool(service, name): + ''' Delete a RADOS pool from ceph ''' + cmd = [ + 'ceph', '--id', service, + 'osd', 'pool', 'delete', + name, '--yes-i-really-really-mean-it' + ] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def create_keyring(service, key): + ''' Create a new Ceph keyring containing key''' + keyring = _keyring_path(service) + if os.path.exists(keyring): + log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + return + cmd = [ + 'ceph-authtool', + keyring, + '--create-keyring', + '--name=client.{}'.format(service), + '--add-key={}'.format(key) + ] + check_call(cmd) + log('ceph: Created new ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + ''' Create a file containing key ''' + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + return + with open(keyfile, 'w') as fd: + fd.write(key) + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + ''' Query named relation 'ceph' to detemine current nodes ''' + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts + + +def configure(service, key, auth): + ''' Perform basic configuration of Ceph ''' + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)))) + modprobe('rbd') + + +def image_mapped(name): + ''' Determine whether a RADOS block device is mapped locally ''' + try: + out = check_output(['rbd', 'showmapped']) + except CalledProcessError: + return False + else: + return name in out + + +def map_block_storage(service, pool, image): + ''' Map a RADOS block device for local use ''' + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + ''' Determine whether a filesytems is already mounted ''' + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + ''' Make a new filesystem on the specified block device ''' + count = 0 + e_noent = os.errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('ceph: gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + log('ceph: waiting for block device %s to appear' % blk_device, + level=INFO) + count += 1 + time.sleep(1) + else: + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + ''' Migrate data in data_src_dst to blk_device and then remount ''' + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +# TODO: re-use +def modprobe(module): + ''' Load a kernel module and configure for auto-load on reboot ''' + log('ceph: Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def copy_files(src, dst, symlinks=False, ignore=None): + ''' Copy files from src to dst ''' + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[]): + """ + NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('ceph: Creating new pool {}.'.format(pool)) + create_pool(service, pool) + + if not rbd_exists(service, pool, rbd_img): + log('ceph: Creating RBD image ({}).'.format(rbd_img)) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('ceph: Stopping services {} prior to migrating data.' + .format(svc)) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('ceph: Starting service {} after migrating data.' + .format(svc)) + service_start(svc) diff --git a/hooks/misc_utils.py b/hooks/misc_utils.py deleted file mode 100644 index 7ec63938..00000000 --- a/hooks/misc_utils.py +++ /dev/null @@ -1,33 +0,0 @@ -import subprocess - -from charmhelpers.core.hookenv import ( - relation_get, - relation_ids, - related_units, -) - -from charmhelpers.contrib.hahelpers.ceph import ( - create_keyring as ceph_create_keyring, - keyring_path as ceph_keyring_path, -) - - -# This was pulled from cinder redux. It should go somewhere common, -# charmhelpers.hahelpers.ceph? - -def ensure_ceph_keyring(service): - '''Ensures a ceph keyring exists. Returns True if so, False otherwise''' - # TODO: This can be shared between nova + glance + cinder, find a home for - # it. - key = None - for rid in relation_ids('ceph'): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break - if not key: - return False - ceph_create_keyring(service=service, key=key) - keyring = ceph_keyring_path(service) - subprocess.check_call(['chown', 'nova.nova', keyring]) - return True diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index 3dbcc719..e68b3c42 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import ( openstack_upgrade_available, ) +from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute from nova_compute_utils import ( @@ -51,10 +52,6 @@ from nova_compute_utils import ( from nova_compute_context import CEPH_SECRET_UUID -from misc_utils import ( - ensure_ceph_keyring, -) - hooks = Hooks() CONFIGS = register_configs() From 1ecc7117d38b3d96074c9f33b7c4f13e3e5235f9 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 23 Sep 2013 12:04:24 -0700 Subject: [PATCH 67/84] Checkin new helpers. --- .../contrib/storage/linux/ceph.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 29b214b2..10e665d4 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -334,3 +334,25 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, log('ceph: Starting service {} after migrating data.' .format(svc)) service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None): + ''' + Ensures a ceph keyring is created for a named service + and optionally ensures user and group ownership. + + Returns False if no ceph key is available in relation state. + ''' + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + if not key: + return False + create_keyring(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + return True From cc57998e12124f1de18eeb4872f297d3bf4bb486 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 24 Sep 2013 13:04:12 +0100 Subject: [PATCH 68/84] Added neutron configuration files --- templates/neutron.conf | 38 ++++++++++++++++++++++++++++++++ templates/ovs_neutron_plugin.ini | 27 +++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 templates/neutron.conf create mode 100644 templates/ovs_neutron_plugin.ini diff --git a/templates/neutron.conf b/templates/neutron.conf new file mode 100644 index 00000000..1b81f8c4 --- /dev/null +++ b/templates/neutron.conf @@ -0,0 +1,38 @@ +# grizzly +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +state_path = /var/lib/neutron +lock_path = $state_path/lock +bind_host = 0.0.0.0 +bind_port = 9696 +{% if core_plugin -%} +core_plugin = {{ core_plugin }} +{% endif -%} +api_paste_config = /etc/neutron/api-paste.ini +auth_strategy = keystone +control_exchange = quantum +notification_driver = neutron.openstack.common.notifier.rpc_notifier +default_notification_level = INFO +notification_topics = notifications +{% if rabbitmq_host -%} +rabbit_host = {{ rabbitmq_host }} +rabbit_userid = {{ rabbitmq_user }} +rabbit_password = {{ rabbitmq_password }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} + +{% endif -%} + + +[QUOTAS] + +[DEFAULT_SERVICETYPE] + +[AGENT] +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf + +[keystone_authtoken] +signing_dir = /var/lib/neutron/keystone-signing + diff --git a/templates/ovs_neutron_plugin.ini b/templates/ovs_neutron_plugin.ini new file mode 100644 index 00000000..b2145430 --- /dev/null +++ b/templates/ovs_neutron_plugin.ini @@ -0,0 +1,27 @@ +# havana +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[OVS] +tunnel_id_ranges = 1:1000 +tenant_network_type = gre +enable_tunneling = True +local_ip = {{ local_ip }} + +[DATABASE] +{% if database_host -%} +sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}?charset=utf8 +reconnect_interval = 2 +{% else -%} +connection = sqlite:////var/lib/neutron/neutron.sqlite +{% endif -%} + +[AGENT] +polling_interval = 2 + +[SECURITYGROUP] +{% if neutron_security_groups -%} +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +{% endif -%} + From 23edef7f4ace7cfc3b71554f4e4bf731c5914c5f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 24 Sep 2013 14:42:13 +0100 Subject: [PATCH 69/84] Use more constants, use default neutron/quantum control exchange --- hooks/nova_compute_utils.py | 8 ++++---- templates/neutron.conf | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index adde4a5d..9d2a6cca 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -127,7 +127,7 @@ def resource_map(): # FlatDHCPManager only requires some extra packages. if (net_manager in ['flatmanager', 'flatdhcpmanager'] and config('multi-host').lower() == 'yes'): - resource_map['/etc/nova/nova.conf']['services'].extend( + resource_map[NOVA_CONF]['services'].extend( ['nova-api', 'nova-network'] ) @@ -319,9 +319,9 @@ def configure_live_migration(configs=None): # dont think we need this return configs = configs or register_configs() - configs.write('/etc/libvirt/libvirtd.conf') - configs.write('/etc/default/libvirt-bin') - configs.write('/etc/nova/nova.conf') + configs.write(LIBVIRTD_CONF) + configs.write(LIBVIRT_BIN) + configs.write(NOVA_CONF) if not migration_enabled(): return diff --git a/templates/neutron.conf b/templates/neutron.conf index 1b81f8c4..d5583aef 100644 --- a/templates/neutron.conf +++ b/templates/neutron.conf @@ -13,7 +13,6 @@ core_plugin = {{ core_plugin }} {% endif -%} api_paste_config = /etc/neutron/api-paste.ini auth_strategy = keystone -control_exchange = quantum notification_driver = neutron.openstack.common.notifier.rpc_notifier default_notification_level = INFO notification_topics = notifications From fa946c7d4794833d23defb130ad480417c963e89 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 24 Sep 2013 14:43:46 +0100 Subject: [PATCH 70/84] Rejig template locations, fix db url for folsom --- templates/folsom/ovs_quantum_plugin.ini | 2 +- templates/folsom/quantum.conf | 1 - templates/{ => havana}/neutron.conf | 0 templates/{ => havana}/ovs_neutron_plugin.ini | 0 4 files changed, 1 insertion(+), 2 deletions(-) rename templates/{ => havana}/neutron.conf (100%) rename templates/{ => havana}/ovs_neutron_plugin.ini (100%) diff --git a/templates/folsom/ovs_quantum_plugin.ini b/templates/folsom/ovs_quantum_plugin.ini index da8c9a31..664424a0 100644 --- a/templates/folsom/ovs_quantum_plugin.ini +++ b/templates/folsom/ovs_quantum_plugin.ini @@ -11,7 +11,7 @@ local_ip = {{ local_ip }} [DATABASE] {% if database_host -%} -sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}?quantum?charset=utf8 +sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}?charset=utf8 reconnect_interval = 2 {% else -%} connection = sqlite:////var/lib/quantum/quantum.sqlite diff --git a/templates/folsom/quantum.conf b/templates/folsom/quantum.conf index 1e0666a4..60dbcc13 100644 --- a/templates/folsom/quantum.conf +++ b/templates/folsom/quantum.conf @@ -13,7 +13,6 @@ core_plugin = {{ core_plugin }} {% endif -%} api_paste_config = /etc/quantum/api-paste.ini auth_strategy = keystone -control_exchange = quantum notification_driver = quantum.openstack.common.notifier.rpc_notifier default_notification_level = INFO notification_topics = notifications diff --git a/templates/neutron.conf b/templates/havana/neutron.conf similarity index 100% rename from templates/neutron.conf rename to templates/havana/neutron.conf diff --git a/templates/ovs_neutron_plugin.ini b/templates/havana/ovs_neutron_plugin.ini similarity index 100% rename from templates/ovs_neutron_plugin.ini rename to templates/havana/ovs_neutron_plugin.ini From 11af97214252e24c0e35262e22b8832d6e9a0366 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 24 Sep 2013 09:12:49 -0700 Subject: [PATCH 71/84] Fix neutron_plugin() to actually query for neutron and quantum. --- hooks/nova_compute_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 9d2a6cca..569ff292 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -237,7 +237,7 @@ def _network_config(): def neutron_plugin(): - return (_network_config().get('quantum_plugin') or + return (_network_config().get('neutron_plugin') or _network_config().get('quantum_plugin')) From 570df36fffcc0cec8a63e97ac6af682e5a8ffdd8 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Tue, 24 Sep 2013 10:11:51 -0700 Subject: [PATCH 72/84] Subscribe quantum/neutron.conf to NeutronComputeContext() to ensure package installation prior to actually rendering file. --- hooks/nova_compute_utils.py | 4 ++-- revision | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py index 569ff292..50f1a791 100644 --- a/hooks/nova_compute_utils.py +++ b/hooks/nova_compute_utils.py @@ -90,7 +90,7 @@ QUANTUM_CONF = '/etc/quantum/quantum.conf' QUANTUM_RESOURCES = { QUANTUM_CONF: { 'services': [], - 'contexts': [context.AMQPContext()], + 'contexts': [context.AMQPContext(), NeutronComputeContext()], } } @@ -99,7 +99,7 @@ NEUTRON_CONF = '/etc/neutron/neutron.conf' NEUTRON_RESOURCES = { NEUTRON_CONF: { 'services': [], - 'contexts': [context.AMQPContext()], + 'contexts': [context.AMQPContext(), NeutronComputeContext()], } } diff --git a/revision b/revision index a949a93d..b0d73241 100644 --- a/revision +++ b/revision @@ -1 +1 @@ -128 +129 From 4d68c2ce7c99e809f9656358e397d29be9b20c55 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Sep 2013 10:01:42 +0100 Subject: [PATCH 73/84] Rejig unit_tests to drop hook prefixes --- unit_tests/test_nova_compute_contexts.py | 4 ++-- unit_tests/test_nova_compute_hooks.py | 6 +++--- unit_tests/test_nova_compute_utils.py | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/unit_tests/test_nova_compute_contexts.py b/unit_tests/test_nova_compute_contexts.py index 4b5667da..37fa3021 100644 --- a/unit_tests/test_nova_compute_contexts.py +++ b/unit_tests/test_nova_compute_contexts.py @@ -1,9 +1,9 @@ from mock import patch -from unit_tests.test_utils import CharmTestCase +from test_utils import CharmTestCase from charmhelpers.contrib.openstack.context import OSContextError -import hooks.nova_compute_context as context +import nova_compute_context as context TO_PATCH = [ 'apt_install', diff --git a/unit_tests/test_nova_compute_hooks.py b/unit_tests/test_nova_compute_hooks.py index 03c44054..4b8cfef6 100644 --- a/unit_tests/test_nova_compute_hooks.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -1,8 +1,8 @@ from mock import call, patch, MagicMock -from unit_tests.test_utils import CharmTestCase +from test_utils import CharmTestCase -import hooks.nova_compute_utils as utils +import nova_compute_utils as utils _reg = utils.register_configs _map = utils.restart_map @@ -10,7 +10,7 @@ _map = utils.restart_map utils.register_configs = MagicMock() utils.restart_map = MagicMock() -import hooks.nova_compute_hooks as hooks +import nova_compute_hooks as hooks utils.register_configs = _reg utils.restart_map = _map diff --git a/unit_tests/test_nova_compute_utils.py b/unit_tests/test_nova_compute_utils.py index 4135fc7f..1a03a165 100644 --- a/unit_tests/test_nova_compute_utils.py +++ b/unit_tests/test_nova_compute_utils.py @@ -1,9 +1,9 @@ from mock import patch, MagicMock, call -from unit_tests.test_utils import CharmTestCase, patch_open +from test_utils import CharmTestCase, patch_open -import hooks.nova_compute_utils as utils +import nova_compute_utils as utils TO_PATCH = [ 'config', @@ -212,7 +212,7 @@ class NovaComputeUtilsTests(CharmTestCase): _file.write.assert_called_with('foo_cert\n') check_call.assert_called_with(['update-ca-certificates']) - @patch('hooks.charmhelpers.contrib.openstack.templating.OSConfigRenderer') + @patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer') @patch.object(utils, 'quantum_enabled') @patch.object(utils, 'resource_map') def test_register_configs(self, resource_map, quantum, renderer): From 56258050a1e5f58722334df2dcc4dd89a396e422 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Sep 2013 10:31:31 +0100 Subject: [PATCH 74/84] Resync charmhelpers, use ovs helper to create bridges --- charm-helpers.yaml | 1 + .../charmhelpers/contrib/network/__init__.py | 0 .../contrib/network/ovs/__init__.py | 72 +++++++++++++++++++ .../contrib/storage/linux/ceph.py | 13 ++-- hooks/nova_compute_context.py | 10 +-- 5 files changed, 82 insertions(+), 14 deletions(-) create mode 100644 hooks/charmhelpers/contrib/network/__init__.py create mode 100644 hooks/charmhelpers/contrib/network/ovs/__init__.py diff --git a/charm-helpers.yaml b/charm-helpers.yaml index 3000e7c5..f73b209a 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -8,3 +8,4 @@ include: - contrib.hahelpers: - apache - cluster + - contrib.network.ovs diff --git a/hooks/charmhelpers/contrib/network/__init__.py b/hooks/charmhelpers/contrib/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/network/ovs/__init__.py b/hooks/charmhelpers/contrib/network/ovs/__init__.py new file mode 100644 index 00000000..983074fa --- /dev/null +++ b/hooks/charmhelpers/contrib/network/ovs/__init__.py @@ -0,0 +1,72 @@ +''' Helpers for interacting with OpenvSwitch ''' +import subprocess +import os +from charmhelpers.core.hookenv import ( + log, WARNING +) +from charmhelpers.core.host import ( + service +) + + +def add_bridge(name): + ''' Add the named bridge to openvswitch ''' + log('Creating bridge {}'.format(name)) + subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name]) + + +def del_bridge(name): + ''' Delete the named bridge from openvswitch ''' + log('Deleting bridge {}'.format(name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name]) + + +def add_bridge_port(name, port): + ''' Add a port to the named openvswitch bridge ''' + log('Adding port {} to bridge {}'.format(port, name)) + subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port", + name, port]) + subprocess.check_call(["ip", "link", "set", port, "up"]) + + +def del_bridge_port(name, port): + ''' Delete a port from the named openvswitch bridge ''' + log('Deleting port {} from bridge {}'.format(port, name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port", + name, port]) + subprocess.check_call(["ip", "link", "set", port, "down"]) + + +def set_manager(manager): + ''' Set the controller for the local openvswitch ''' + log('Setting manager for local ovs to {}'.format(manager)) + subprocess.check_call(['ovs-vsctl', 'set-manager', + 'ssl:{}'.format(manager)]) + + +CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem' + + +def get_certificate(): + ''' Read openvswitch certificate from disk ''' + if os.path.exists(CERT_PATH): + log('Reading ovs certificate from {}'.format(CERT_PATH)) + with open(CERT_PATH, 'r') as cert: + full_cert = cert.read() + begin_marker = "-----BEGIN CERTIFICATE-----" + end_marker = "-----END CERTIFICATE-----" + begin_index = full_cert.find(begin_marker) + end_index = full_cert.rfind(end_marker) + if end_index == -1 or begin_index == -1: + raise RuntimeError("Certificate does not contain valid begin" + " and end markers.") + full_cert = full_cert[begin_index:(end_index + len(end_marker))] + return full_cert + else: + log('Certificate not found', level=WARNING) + return None + + +def full_restart(): + ''' Full restart and reload of openvswitch ''' + service('force-reload-kmod', 'openvswitch-switch') diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 10e665d4..9bb9530c 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -97,12 +97,13 @@ def pool_exists(service, name): return name in out -def get_osds(): +def get_osds(service): ''' Return a list of all Ceph Object Storage Daemons currently in the cluster ''' - return json.loads(check_output(['ceph', 'osd', 'ls', '--format=json'])) + return json.loads(check_output(['ceph', '--id', service, + 'osd', 'ls', '--format=json'])) def create_pool(service, name, replicas=2): @@ -113,17 +114,17 @@ def create_pool(service, name, replicas=2): return # Calculate the number of placement groups based # on upstream recommended best practices. - pgnum = (len(get_osds()) * 100 / replicas) + pgnum = (len(get_osds(service)) * 100 / replicas) cmd = [ 'ceph', '--id', service, 'osd', 'pool', 'create', - name, pgnum + name, str(pgnum) ] check_call(cmd) cmd = [ 'ceph', '--id', service, - 'osd', 'set', name, - 'size', replicas + 'osd', 'pool', 'set', name, + 'size', str(replicas) ] check_call(cmd) diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py index 8bd2d25b..c2c29f6c 100644 --- a/hooks/nova_compute_context.py +++ b/hooks/nova_compute_context.py @@ -18,6 +18,7 @@ from charmhelpers.core.hookenv import ( ) from charmhelpers.contrib.openstack.utils import get_host_ip, os_release +from charmhelpers.contrib.network.ovs import add_bridge # This is just a label and it must be consistent across @@ -311,14 +312,7 @@ class NeutronComputeContext(context.NeutronContext): def _ensure_bridge(self): if not service_running('openvswitch-switch'): service_start('openvswitch-switch') - - ovs_output = check_output(['ovs-vsctl', 'show']) - for ln in ovs_output.split('\n'): - if OVS_BRIDGE in ln.strip(): - log('Found OVS bridge: %s.' % OVS_BRIDGE) - return - log('Creating new OVS bridge: %s.' % OVS_BRIDGE) - check_call(['ovs-vsctl', 'add-br', OVS_BRIDGE]) + add_bridge(OVS_BRIDGE) def ovs_ctxt(self): # In addition to generating config context, ensure the OVS service From df965a1c6b645f9c02adb230e83de612b9630742 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Sep 2013 10:41:26 +0100 Subject: [PATCH 75/84] Fixup templates for havana --- templates/folsom/nova.conf | 1 + templates/{ => grizzly}/nova.conf | 0 templates/havana/nova.conf | 68 +++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+) rename templates/{ => grizzly}/nova.conf (100%) create mode 100644 templates/havana/nova.conf diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index 50195202..75cf7db2 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -1,3 +1,4 @@ +# folsom ############################################################################### # [ WARNING ] # Configuration file maintained by Juju. Local changes may be overwritten. diff --git a/templates/nova.conf b/templates/grizzly/nova.conf similarity index 100% rename from templates/nova.conf rename to templates/grizzly/nova.conf diff --git a/templates/havana/nova.conf b/templates/havana/nova.conf new file mode 100644 index 00000000..f5b74b01 --- /dev/null +++ b/templates/havana/nova.conf @@ -0,0 +1,68 @@ +# havana +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose=True +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes + +{% if database_host -%} +sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} +{% endif -%} + +{% if rabbitmq_host -%} +rabbit_host = {{ rabbitmq_host }} +rabbit_userid = {{ rabbitmq_user }} +rabbit_password = {{ rabbitmq_password }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +{% endif -%} + +{% if glance_api_servers -%} +glance_api_servers = {{ glance_api_servers }} +{% endif -%} + +{% if rbd_pool -%} +rbd_pool = {{ rbd_pool }} +rbd_user = {{ rbd_user }} +rbd_secret_uuid = {{ rbd_secret_uuid }} +{% endif -%} + +{% if neutron_plugin and neutron_plugin == 'ovs' -%} +libvirt_vif_driver = {{ libvirt_vif_driver }} +libvirt_user_virtio_for_bridges = {{ libvirt_use_virtio_for_birdges }} +{% if neutron_security_groups -%} +security_group_api = neutron +nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} +{% endif -%} + +{% if network_manager_config -%} +{% for key, value in network_manager_config.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if volume_service_config -%} +{% for key, value in volume_service_config.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if user_config_flags -%} +{% for key, value in user_config_flags.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} From 7bdee80baa6b839995453ee1c31b4f7a5ce071d1 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Sep 2013 10:50:49 +0100 Subject: [PATCH 76/84] Tidy up templates for neutron/quantum features --- templates/folsom/nova.conf | 3 ++- templates/grizzly/nova.conf | 35 +++++++++++++++++++++++++++-------- templates/havana/nova.conf | 26 ++++++++++++++++++-------- 3 files changed, 47 insertions(+), 17 deletions(-) diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index 75cf7db2..640a7234 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -43,7 +43,7 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} {% endif -%} {% if neutron_plugin and neutron_plugin == 'ovs' -%} -libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver libvirt_user_virtio_for_bridges = True {% if neutron_security_groups -%} security_group_api = quantum @@ -59,6 +59,7 @@ nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% if network_manager == 'quantum' -%} network_api_class = nova.network.quantumv2.api.API +force_config_drive = True {% else -%} network_manager = nova.network.manager.FlatDHCPManager {% endif -%} diff --git a/templates/grizzly/nova.conf b/templates/grizzly/nova.conf index 28f00e04..fe06a6df 100644 --- a/templates/grizzly/nova.conf +++ b/templates/grizzly/nova.conf @@ -1,3 +1,4 @@ +# grizzly ############################################################################### # [ WARNING ] # Configuration file maintained by Juju. Local changes may be overwritten. @@ -17,43 +18,61 @@ verbose=True ec2_private_dns_show_ip=True api_paste_config=/etc/nova/api-paste.ini volumes_path=/var/lib/nova/volumes +enabled_apis=ec2,osapi_compute,metadata +auth_strategy=keystone +compute_driver=libvirt.LibvirtDriver {% if database_host -%} sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} {% endif -%} + {% if rabbitmq_host -%} rabbit_host = {{ rabbitmq_host }} rabbit_userid = {{ rabbitmq_user }} rabbit_password = {{ rabbitmq_password }} rabbit_virtual_host = {{ rabbitmq_virtual_host }} {% endif -%} + {% if glance_api_servers -%} glance_api_servers = {{ glance_api_servers }} {% endif -%} + {% if rbd_pool -%} rbd_pool = {{ rbd_pool }} rbd_user = {{ rbd_user }} rbd_secret_uuid = {{ rbd_secret_uuid }} {% endif -%} -{% if quantum_plugin and quantum_plugin == 'ovs' -%} -libvirt_vif_driver = {{ libvirt_vif_driver }} -libvirt_user_virtio_for_bridges = {{ libvirt_use_virtio_for_birdges }} -{% if quantum_security_groups -%} + +{% if neutron_plugin and neutron_plugin == 'ovs' -%} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver +libvirt_user_virtio_for_bridges = True +{% if neutron_security_groups -%} security_group_api = quantum nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} {% endif -%} + {% if network_manager_config -%} {% for key, value in network_manager_config.iteritems() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} -{% if volume_service_config -%} -{% for key, value in volume_service_config.iteritems() -%} -{{ key }} = {{ value }} -{% endfor -%} + +{% if network_manager == 'quantum' -%} +network_api_class = nova.network.quantumv2.api.API +{% else -%} +network_manager = nova.network.manager.FlatDHCPManager {% endif -%} + +{% if volume_service -%} +volume_api_class=nova.volume.cinder.API +{% endif -%} + {% if user_config_flags -%} {% for key, value in user_config_flags.iteritems() -%} {{ key }} = {{ value }} {% endfor -%} {% endif -%} + +{% if live_migration_uri -%} +live_migration_uri = {{ live_migration_uri }} +{% endif -%} diff --git a/templates/havana/nova.conf b/templates/havana/nova.conf index f5b74b01..737238f8 100644 --- a/templates/havana/nova.conf +++ b/templates/havana/nova.conf @@ -1,4 +1,4 @@ -# havana +# grizzly ############################################################################### # [ WARNING ] # Configuration file maintained by Juju. Local changes may be overwritten. @@ -18,7 +18,9 @@ verbose=True ec2_private_dns_show_ip=True api_paste_config=/etc/nova/api-paste.ini volumes_path=/var/lib/nova/volumes - +enabled_apis=ec2,osapi_compute,metadata +auth_strategy=keystone +compute_driver=libvirt.LibvirtDriver {% if database_host -%} sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }} {% endif -%} @@ -41,8 +43,8 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} {% endif -%} {% if neutron_plugin and neutron_plugin == 'ovs' -%} -libvirt_vif_driver = {{ libvirt_vif_driver }} -libvirt_user_virtio_for_bridges = {{ libvirt_use_virtio_for_birdges }} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver +libvirt_user_virtio_for_bridges = True {% if neutron_security_groups -%} security_group_api = neutron nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver @@ -55,10 +57,14 @@ nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endfor -%} {% endif -%} -{% if volume_service_config -%} -{% for key, value in volume_service_config.iteritems() -%} -{{ key }} = {{ value }} -{% endfor -%} +{% if network_manager == 'neutron' -%} +network_api_class = nova.network.neutronv2.api.API +{% else -%} +network_manager = nova.network.manager.FlatDHCPManager +{% endif -%} + +{% if volume_service -%} +volume_api_class=nova.volume.cinder.API {% endif -%} {% if user_config_flags -%} @@ -66,3 +72,7 @@ nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {{ key }} = {{ value }} {% endfor -%} {% endif -%} + +{% if live_migration_uri -%} +live_migration_uri = {{ live_migration_uri }} +{% endif -%} From 7d27fe1c244bcdf144a493d06bc8b5c04b2e1954 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Sep 2013 11:11:46 +0100 Subject: [PATCH 77/84] Add options for nvp plugin, tidy --- templates/folsom/nova.conf | 2 +- templates/grizzly/nova.conf | 7 +++++-- templates/havana/nova.conf | 7 +++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index 640a7234..2df64ab9 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -65,7 +65,7 @@ network_manager = nova.network.manager.FlatDHCPManager {% endif -%} {% if volume_service -%} -volume_api_class=nova.volume.cinder.API +volume_api_class = nova.volume.cinder.API {% endif -%} {% if user_config_flags -%} diff --git a/templates/grizzly/nova.conf b/templates/grizzly/nova.conf index fe06a6df..2420e8d9 100644 --- a/templates/grizzly/nova.conf +++ b/templates/grizzly/nova.conf @@ -44,13 +44,16 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} {% if neutron_plugin and neutron_plugin == 'ovs' -%} libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver -libvirt_user_virtio_for_bridges = True {% if neutron_security_groups -%} security_group_api = quantum nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} {% endif -%} +{% if neutron_plugin and neutron_plugin == 'nvp' -%} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtOpenVswitchVirtualPortDriver +{% endif -%} + {% if network_manager_config -%} {% for key, value in network_manager_config.iteritems() -%} {{ key }} = {{ value }} @@ -64,7 +67,7 @@ network_manager = nova.network.manager.FlatDHCPManager {% endif -%} {% if volume_service -%} -volume_api_class=nova.volume.cinder.API +volume_api_class = nova.volume.cinder.API {% endif -%} {% if user_config_flags -%} diff --git a/templates/havana/nova.conf b/templates/havana/nova.conf index 737238f8..5f726128 100644 --- a/templates/havana/nova.conf +++ b/templates/havana/nova.conf @@ -44,13 +44,16 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} {% if neutron_plugin and neutron_plugin == 'ovs' -%} libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver -libvirt_user_virtio_for_bridges = True {% if neutron_security_groups -%} security_group_api = neutron nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} {% endif -%} +{% if neutron_plugin and neutron_plugin == 'nvp' -%} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtOpenVswitchVirtualPortDriver +{% endif -%} + {% if network_manager_config -%} {% for key, value in network_manager_config.iteritems() -%} {{ key }} = {{ value }} @@ -64,7 +67,7 @@ network_manager = nova.network.manager.FlatDHCPManager {% endif -%} {% if volume_service -%} -volume_api_class=nova.volume.cinder.API +volume_api_class = nova.volume.cinder.API {% endif -%} {% if user_config_flags -%} From f5dec74bd008ca06168a2d3de9f81b1af93692aa Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Sep 2013 11:17:16 +0100 Subject: [PATCH 78/84] Correctly identify havana template --- templates/havana/nova.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/havana/nova.conf b/templates/havana/nova.conf index 5f726128..d9b63f2b 100644 --- a/templates/havana/nova.conf +++ b/templates/havana/nova.conf @@ -1,4 +1,4 @@ -# grizzly +# havana ############################################################################### # [ WARNING ] # Configuration file maintained by Juju. Local changes may be overwritten. From 879411bccb1ada37d898b2a3de44d3e03724c898 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 25 Sep 2013 11:23:09 +0100 Subject: [PATCH 79/84] Ensure dkms packages are installed prior to agent packages --- hooks/charmhelpers/contrib/openstack/context.py | 2 +- hooks/charmhelpers/contrib/openstack/neutron.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 92924e34..ccd55261 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -370,7 +370,7 @@ class NeutronContext(object): return None def _ensure_packages(self): - ensure_packages(self.packages) + [ensure_packages(pkgs) for pkgs in self.packages] def _save_flag_file(self): if self.network_manager == 'quantum': diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 37b5a7bd..aea57a4f 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -23,15 +23,15 @@ def quantum_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': ['quantum-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms'], + 'packages': [['openvswitch-datapath-dkms'], + ['quantum-plugin-openvswitch-agent']], }, 'nvp': { 'config': '/etc/quantum/plugins/nicira/nvp.ini', 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'QuantumPlugin.NvpPluginV2', 'services': [], - 'packages': ['quantum-plugin-nicira'], + 'packages': [], } } @@ -49,15 +49,15 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': ['neutron-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms'], + 'packages': [['openvswitch-datapath-dkms'], + ['neutron-plugin-openvswitch-agent']], }, 'nvp': { 'config': '/etc/neutron/plugins/nicira/nvp.ini', 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'NeutronPlugin.NvpPluginV2', 'services': [], - 'packages': ['neutron-plugin-nicira'], + 'packages': [], } } From e179da837c6792650042f7fc426b93e01846be10 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 27 Sep 2013 17:20:42 +0100 Subject: [PATCH 80/84] Add preinstall execd hook calls --- charm-helpers.yaml | 1 + .../charmhelpers/contrib/openstack/context.py | 2 +- .../charmhelpers/contrib/openstack/neutron.py | 12 ++--- .../templates/openstack_https_frontend.conf | 23 +++++++++ hooks/charmhelpers/payload/__init__.py | 1 + hooks/charmhelpers/payload/execd.py | 50 +++++++++++++++++++ hooks/nova_compute_hooks.py | 2 + templates/havana/neutron.conf | 4 +- unit_tests/test_nova_compute_hooks.py | 2 + 9 files changed, 89 insertions(+), 8 deletions(-) create mode 100644 hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf create mode 100644 hooks/charmhelpers/payload/__init__.py create mode 100644 hooks/charmhelpers/payload/execd.py diff --git a/charm-helpers.yaml b/charm-helpers.yaml index f73b209a..d38264e1 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -9,3 +9,4 @@ include: - apache - cluster - contrib.network.ovs + - payload.execd diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index ccd55261..92924e34 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -370,7 +370,7 @@ class NeutronContext(object): return None def _ensure_packages(self): - [ensure_packages(pkgs) for pkgs in self.packages] + ensure_packages(self.packages) def _save_flag_file(self): if self.network_manager == 'quantum': diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index aea57a4f..37b5a7bd 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -23,15 +23,15 @@ def quantum_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [['openvswitch-datapath-dkms'], - ['quantum-plugin-openvswitch-agent']], + 'packages': ['quantum-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], }, 'nvp': { 'config': '/etc/quantum/plugins/nicira/nvp.ini', 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'QuantumPlugin.NvpPluginV2', 'services': [], - 'packages': [], + 'packages': ['quantum-plugin-nicira'], } } @@ -49,15 +49,15 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [['openvswitch-datapath-dkms'], - ['neutron-plugin-openvswitch-agent']], + 'packages': ['neutron-plugin-openvswitch-agent', + 'openvswitch-datapath-dkms'], }, 'nvp': { 'config': '/etc/neutron/plugins/nicira/nvp.ini', 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'NeutronPlugin.NvpPluginV2', 'services': [], - 'packages': [], + 'packages': ['neutron-plugin-nicira'], } } diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf new file mode 100644 index 00000000..e02dc751 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -0,0 +1,23 @@ +{% if endpoints -%} +{% for ext, int in endpoints -%} +Listen {{ ext }} +NameVirtualHost *:{{ ext }} + + ServerName {{ private_address }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endfor -%} +{% endif -%} diff --git a/hooks/charmhelpers/payload/__init__.py b/hooks/charmhelpers/payload/__init__.py new file mode 100644 index 00000000..fc9fbc08 --- /dev/null +++ b/hooks/charmhelpers/payload/__init__.py @@ -0,0 +1 @@ +"Tools for working with files injected into a charm just before deployment." diff --git a/hooks/charmhelpers/payload/execd.py b/hooks/charmhelpers/payload/execd.py new file mode 100644 index 00000000..6476a75f --- /dev/null +++ b/hooks/charmhelpers/payload/execd.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_call(submodule_path, shell=True, stderr=stderr) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hooks/nova_compute_hooks.py b/hooks/nova_compute_hooks.py index e68b3c42..4cbcf2c1 100755 --- a/hooks/nova_compute_hooks.py +++ b/hooks/nova_compute_hooks.py @@ -31,6 +31,7 @@ from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute +from charmhelpers.payload.execd import execd_preinstall from nova_compute_utils import ( create_libvirt_secret, @@ -58,6 +59,7 @@ CONFIGS = register_configs() @hooks.hook() def install(): + execd_preinstall() configure_installation_source(config('openstack-origin')) apt_update() apt_install(determine_packages(), fatal=True) diff --git a/templates/havana/neutron.conf b/templates/havana/neutron.conf index d5583aef..fb7187c7 100644 --- a/templates/havana/neutron.conf +++ b/templates/havana/neutron.conf @@ -8,20 +8,22 @@ state_path = /var/lib/neutron lock_path = $state_path/lock bind_host = 0.0.0.0 bind_port = 9696 + {% if core_plugin -%} core_plugin = {{ core_plugin }} {% endif -%} + api_paste_config = /etc/neutron/api-paste.ini auth_strategy = keystone notification_driver = neutron.openstack.common.notifier.rpc_notifier default_notification_level = INFO notification_topics = notifications + {% if rabbitmq_host -%} rabbit_host = {{ rabbitmq_host }} rabbit_userid = {{ rabbitmq_user }} rabbit_password = {{ rabbitmq_password }} rabbit_virtual_host = {{ rabbitmq_virtual_host }} - {% endif -%} diff --git a/unit_tests/test_nova_compute_hooks.py b/unit_tests/test_nova_compute_hooks.py index 4b8cfef6..36c3d82f 100644 --- a/unit_tests/test_nova_compute_hooks.py +++ b/unit_tests/test_nova_compute_hooks.py @@ -50,6 +50,7 @@ TO_PATCH = [ 'register_configs', # misc_utils 'ensure_ceph_keyring', + 'execd_preinstall' ] @@ -72,6 +73,7 @@ class NovaComputeRelationsTests(CharmTestCase): self.configure_installation_source.assert_called_with(repo) self.assertTrue(self.apt_update.called) self.apt_install.assert_called_with(['foo', 'bar'], fatal=True) + self.execd_preinstall.assert_called() def test_config_changed_with_upgrade(self): self.openstack_upgrade_available.return_value = True From 2fb78f8431b1af22936aad60a99bda760ecd4436 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 30 Sep 2013 10:58:41 +0100 Subject: [PATCH 81/84] Re-sync charmhelpers for fix for dkms before switch restart --- hooks/charmhelpers/contrib/openstack/context.py | 2 +- hooks/charmhelpers/contrib/openstack/neutron.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 92924e34..ccd55261 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -370,7 +370,7 @@ class NeutronContext(object): return None def _ensure_packages(self): - ensure_packages(self.packages) + [ensure_packages(pkgs) for pkgs in self.packages] def _save_flag_file(self): if self.network_manager == 'quantum': diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 37b5a7bd..d18d41e3 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -23,15 +23,15 @@ def quantum_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': ['quantum-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms'], + 'packages': [['openvswitch-datapath-dkms'], + ['quantum-plugin-openvswitch-agent']], }, 'nvp': { 'config': '/etc/quantum/plugins/nicira/nvp.ini', 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'QuantumPlugin.NvpPluginV2', 'services': [], - 'packages': ['quantum-plugin-nicira'], + 'packages': [], } } @@ -49,15 +49,15 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': ['neutron-plugin-openvswitch-agent', - 'openvswitch-datapath-dkms'], + 'packages': [['openvswitch-datapath-dkms'], + ['quantum-plugin-openvswitch-agent']], }, 'nvp': { 'config': '/etc/neutron/plugins/nicira/nvp.ini', 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'NeutronPlugin.NvpPluginV2', 'services': [], - 'packages': ['neutron-plugin-nicira'], + 'packages': [], } } From 7c3765e468619a259aa3959689ec55164fd82ef6 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 1 Oct 2013 12:16:21 +0100 Subject: [PATCH 82/84] Amend key for firewall driver --- templates/folsom/nova.conf | 2 +- templates/grizzly/nova.conf | 2 +- templates/havana/nova.conf | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/folsom/nova.conf b/templates/folsom/nova.conf index 2df64ab9..6bc35db2 100644 --- a/templates/folsom/nova.conf +++ b/templates/folsom/nova.conf @@ -47,7 +47,7 @@ libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver libvirt_user_virtio_for_bridges = True {% if neutron_security_groups -%} security_group_api = quantum -nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} {% endif -%} diff --git a/templates/grizzly/nova.conf b/templates/grizzly/nova.conf index 2420e8d9..8d73fc08 100644 --- a/templates/grizzly/nova.conf +++ b/templates/grizzly/nova.conf @@ -46,7 +46,7 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver {% if neutron_security_groups -%} security_group_api = quantum -nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} {% endif -%} diff --git a/templates/havana/nova.conf b/templates/havana/nova.conf index d9b63f2b..e19108dd 100644 --- a/templates/havana/nova.conf +++ b/templates/havana/nova.conf @@ -46,7 +46,7 @@ rbd_secret_uuid = {{ rbd_secret_uuid }} libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver {% if neutron_security_groups -%} security_group_api = neutron -nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} {% endif -%} From 0d533610029bbe02cdb78771b222f87ce5d3d7d5 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Sun, 13 Oct 2013 15:51:26 -0700 Subject: [PATCH 83/84] Sync helpers. --- .../charmhelpers/contrib/openstack/neutron.py | 13 +++++++-- hooks/charmhelpers/contrib/openstack/utils.py | 28 +++++++++++-------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index d18d41e3..d27820e7 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,5 +1,7 @@ # Various utilies for dealing with Neutron and the renaming from Quantum. +from subprocess import check_output + from charmhelpers.core.hookenv import ( config, log, @@ -9,6 +11,13 @@ from charmhelpers.core.hookenv import ( from charmhelpers.contrib.openstack.utils import os_release +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).strip() + return 'linux-headers-%s' % kver + + # legacy def quantum_plugins(): from charmhelpers.contrib.openstack import context @@ -23,7 +32,7 @@ def quantum_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [['openvswitch-datapath-dkms'], + 'packages': [[headers_package(), 'openvswitch-datapath-dkms'], ['quantum-plugin-openvswitch-agent']], }, 'nvp': { @@ -49,7 +58,7 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron')], 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [['openvswitch-datapath-dkms'], + 'packages': [[headers_package(), 'openvswitch-datapath-dkms'], ['quantum-plugin-openvswitch-agent']], }, 'nvp': { diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 39f627df..62d207f9 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -45,16 +45,17 @@ OPENSTACK_CODENAMES = OrderedDict([ ]) # The ugly duckling -SWIFT_CODENAMES = { - '1.4.3': 'diablo', - '1.4.8': 'essex', - '1.7.4': 'folsom', - '1.7.6': 'grizzly', - '1.7.7': 'grizzly', - '1.8.0': 'grizzly', - '1.9.0': 'havana', - '1.9.1': 'havana', -} +SWIFT_CODENAMES = OrderedDict([ + ('1.4.3', 'diablo'), + ('1.4.8', 'essex'), + ('1.7.4', 'folsom'), + ('1.8.0', 'grizzly'), + ('1.7.7', 'grizzly'), + ('1.7.6', 'grizzly'), + ('1.10.0', 'havana'), + ('1.9.1', 'havana'), + ('1.9.0', 'havana'), +]) def error_out(msg): @@ -137,8 +138,11 @@ def get_os_codename_package(package, fatal=True): try: if 'swift' in pkg.name: - vers = vers[:5] - return SWIFT_CODENAMES[vers] + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] else: vers = vers[:6] return OPENSTACK_CODENAMES[vers] From 9babf3feae21fb9506b35479b2087a4deb8ab0c9 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 14 Oct 2013 18:32:42 -0700 Subject: [PATCH 84/84] Update charm-helpers config to point to upstream repo, re-sync helpers. --- charm-helpers.yaml | 2 +- .../charmhelpers/contrib/openstack/context.py | 89 +++++++++++++++++++ .../charmhelpers/contrib/openstack/neutron.py | 4 +- 3 files changed, 92 insertions(+), 3 deletions(-) diff --git a/charm-helpers.yaml b/charm-helpers.yaml index d38264e1..ac41438a 100644 --- a/charm-helpers.yaml +++ b/charm-helpers.yaml @@ -1,4 +1,4 @@ -branch: lp:~openstack-charmers/charm-helpers/to_upstream +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index ccd55261..13fdd65a 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1,3 +1,4 @@ +import json import os from base64 import b64decode @@ -21,6 +22,7 @@ from charmhelpers.core.hookenv import ( related_units, unit_get, unit_private_ip, + ERROR, WARNING, ) @@ -431,3 +433,90 @@ class OSConfigFlagContext(OSContextGenerator): flags[k.strip()] = v ctxt = {'user_config_flags': flags} return ctxt + + +class SubordinateConfigContext(OSContextGenerator): + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple serivces. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as: + ctxt = { + ... other context ... + 'subordinate_config': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + + """ + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.service = service + self.config_file = config_file + self.interface = interface + + def __call__(self): + ctxt = {} + for rid in relation_ids(self.interface): + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except: + log('Could not parse JSON from subordinate_config ' + 'setting from %s' % rid, level=ERROR) + continue + + if self.service not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s service' % (rid, self.service)) + continue + + sub_config = sub_config[self.service] + if self.config_file not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s' % (rid, self.config_file)) + continue + + sub_config = sub_config[self.config_file] + for k, v in sub_config.iteritems(): + ctxt[k] = v + + if not ctxt: + ctxt['sections'] = {} + + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index d27820e7..a27ce953 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -85,7 +85,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): _plugin = plugins[plugin] except KeyError: log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) - raise + raise Exception try: return _plugin[attr] @@ -108,7 +108,7 @@ def network_manager(): if release in ['essex']: # E does not support neutron log('Neutron networking not supported in Essex.', level=ERROR) - raise + raise Exception elif release in ['folsom', 'grizzly']: # neutron is named quantum in F and G return 'quantum'