[gandelman-a][james-page] NVP support

This commit is contained in:
James Page 2013-11-17 21:53:21 +00:00
commit e2b5b8c46d
18 changed files with 856 additions and 58 deletions

View File

@ -6,4 +6,5 @@ include:
- contrib.openstack - contrib.openstack
- contrib.hahelpers - contrib.hahelpers
- contrib.network.ovs - contrib.network.ovs
- contrib.storage.linux
- payload.execd - payload.execd

View File

@ -69,4 +69,7 @@ def get_certificate():
def full_restart(): def full_restart():
''' Full restart and reload of openvswitch ''' ''' Full restart and reload of openvswitch '''
service('force-reload-kmod', 'openvswitch-switch') if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
service('start', 'openvswitch-force-reload-kmod')
else:
service('force-reload-kmod', 'openvswitch-switch')

View File

@ -0,0 +1,17 @@
''' Helper for managing alternatives for file conflict resolution '''
import subprocess
import shutil
import os
def install_alternative(name, target, source, priority=50):
''' Install alternative configuration '''
if (os.path.exists(target) and not os.path.islink(target)):
# Move existing file/directory away before installing
shutil.move(target, '{}.bak'.format(target))
cmd = [
'update-alternatives', '--force', '--install',
target, name, source, str(priority)
]
subprocess.check_call(cmd)

View File

@ -385,16 +385,33 @@ class NeutronContext(object):
def ovs_ctxt(self): def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver', driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager) self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = { ovs_ctxt = {
'core_plugin': driver, 'core_plugin': driver,
'neutron_plugin': 'ovs', 'neutron_plugin': 'ovs',
'neutron_security_groups': self.neutron_security_groups, 'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(), 'local_ip': unit_private_ip(),
'config': config
} }
return ovs_ctxt return ovs_ctxt
def nvp_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
nvp_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'nvp',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return nvp_ctxt
def __call__(self): def __call__(self):
self._ensure_packages() self._ensure_packages()
@ -408,6 +425,8 @@ class NeutronContext(object):
if self.plugin == 'ovs': if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt()) ctxt.update(self.ovs_ctxt())
elif self.plugin == 'nvp':
ctxt.update(self.nvp_ctxt())
self._save_flag_file() self._save_flag_file()
return ctxt return ctxt

View File

@ -34,13 +34,23 @@ def quantum_plugins():
'services': ['quantum-plugin-openvswitch-agent'], 'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package(), 'openvswitch-datapath-dkms'], 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
['quantum-plugin-openvswitch-agent']], ['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
}, },
'nvp': { 'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini', 'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2', 'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron')],
'services': [], 'services': [],
'packages': [], 'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
} }
} }
@ -60,13 +70,23 @@ def neutron_plugins():
'services': ['neutron-plugin-openvswitch-agent'], 'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package(), 'openvswitch-datapath-dkms'], 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
['quantum-plugin-openvswitch-agent']], ['quantum-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
}, },
'nvp': { 'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini', 'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2', 'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron')],
'services': [], 'services': [],
'packages': [], 'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
} }
} }

View File

@ -13,19 +13,28 @@ from charmhelpers.core.hookenv import (
config, config,
log as juju_log, log as juju_log,
charm_dir, charm_dir,
ERROR,
INFO
) )
from charmhelpers.core.host import ( from charmhelpers.contrib.storage.linux.lvm import (
lsb_release, deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
) )
from charmhelpers.fetch import ( from charmhelpers.core.host import lsb_release, mounts, umount
apt_install, from charmhelpers.fetch import apt_install
) from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'), ('oneiric', 'diablo'),
('precise', 'essex'), ('precise', 'essex'),
@ -57,6 +66,8 @@ SWIFT_CODENAMES = OrderedDict([
('1.9.0', 'havana'), ('1.9.0', 'havana'),
]) ])
DEFAULT_LOOPBACK_SIZE = '5G'
def error_out(msg): def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR') juju_log("FATAL ERROR: %s" % msg, level='ERROR')
@ -67,7 +78,7 @@ def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.''' '''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = '' rel = ''
if src == 'distro': if src in ['distro', 'distro-proposed']:
try: try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError: except KeyError:
@ -202,6 +213,10 @@ def configure_installation_source(rel):
'''Configure apt installation source.''' '''Configure apt installation source.'''
if rel == 'distro': if rel == 'distro':
return return
elif rel == 'distro-proposed':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:": elif rel[:4] == "ppa:":
src = rel src = rel
subprocess.check_call(["add-apt-repository", "-y", src]) subprocess.check_call(["add-apt-repository", "-y", src])
@ -299,6 +314,62 @@ def openstack_upgrade_available(package):
return apt.version_compare(available_vers, cur_vers) == 1 return apt.version_compare(available_vers, cur_vers) == 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev,
level=ERROR)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
def is_ip(address): def is_ip(address):
""" """
Returns True if address is a valid IP address. Returns True if address is a valid IP address.

View File

@ -0,0 +1,383 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import os
import shutil
import json
import time
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
INFO,
WARNING,
ERROR
)
from charmhelpers.core.host import (
mount,
mounts,
service_start,
service_stop,
service_running,
umount,
)
from charmhelpers.fetch import (
apt_install,
)
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
CEPH_CONF = """[global]
auth supported = {auth}
keyring = {keyring}
mon host = {mon_hosts}
"""
def install():
''' Basic Ceph client installation '''
ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
''' Check to see if a RADOS block device exists '''
try:
out = check_output(['rbd', 'list', '--id', service,
'--pool', pool])
except CalledProcessError:
return False
else:
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
''' Create a new RADOS block device '''
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd)
def pool_exists(service, name):
''' Check to see if a RADOS pool already exists '''
try:
out = check_output(['rados', '--id', service, 'lspools'])
except CalledProcessError:
return False
else:
return name in out
def get_osds(service):
'''
Return a list of all Ceph Object Storage Daemons
currently in the cluster
'''
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json']))
else:
return None
def create_pool(service, name, replicas=2):
''' Create a new RADOS pool '''
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 / replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'create',
name, str(pgnum)
]
check_call(cmd)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'set', name,
'size', str(replicas)
]
check_call(cmd)
def delete_pool(service, name):
''' Delete a RADOS pool from ceph '''
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'delete',
name, '--yes-i-really-really-mean-it'
]
check_call(cmd)
def _keyfile_path(service):
return KEYFILE.format(service)
def _keyring_path(service):
return KEYRING.format(service)
def create_keyring(service, key):
''' Create a new Ceph keyring containing key'''
keyring = _keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
return
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.{}'.format(service),
'--add-key={}'.format(key)
]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
''' Create a file containing key '''
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
''' Query named relation 'ceph' to detemine current nodes '''
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth):
''' Perform basic configuration of Ceph '''
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF.format(auth=auth,
keyring=_keyring_path(service),
mon_hosts=",".join(map(str, hosts))))
modprobe('rbd')
def image_mapped(name):
''' Determine whether a RADOS block device is mapped locally '''
try:
out = check_output(['rbd', 'showmapped'])
except CalledProcessError:
return False
else:
return name in out
def map_block_storage(service, pool, image):
''' Map a RADOS block device for local use '''
cmd = [
'rbd',
'map',
'{}/{}'.format(pool, image),
'--user',
service,
'--secret',
_keyfile_path(service),
]
check_call(cmd)
def filesystem_mounted(fs):
''' Determine whether a filesytems is already mounted '''
return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
''' Make a new filesystem on the specified block device '''
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst):
''' Migrate data in data_src_dst to blk_device and then remount '''
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
copy_files(data_src_dst, '/mnt')
# umount block device
umount('/mnt')
# Grab user/group ID's from original source
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
# TODO: persist is currently a NO-OP in core.host
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
os.chown(data_src_dst, uid, gid)
# TODO: re-use
def modprobe(module):
''' Load a kernel module and configure for auto-load on reboot '''
log('ceph: Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def copy_files(src, dst, symlinks=False, ignore=None):
''' Copy files from src to dst '''
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being re-mounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool))
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img))
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if service_running(svc):
log('ceph: Stopping services {} prior to migrating data.'
.format(svc))
service_stop(svc)
place_data_on_block_device(blk_device, mount_point)
for svc in system_services:
log('ceph: Starting service {} after migrating data.'
.format(svc))
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
'''
Ensures a ceph keyring is created for a named service
and optionally ensures user and group ownership.
Returns False if no ceph key is available in relation state.
'''
key = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False
create_keyring(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
return True
def ceph_version():
''' Retrieve the local version of ceph '''
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None

View File

@ -0,0 +1,62 @@
import os
import re
from subprocess import (
check_call,
check_output,
)
##################################################
# loopback device helpers.
##################################################
def loopback_devices():
'''
Parse through 'losetup -a' output to determine currently mapped
loopback devices. Output is expected to look like:
/dev/loop0: [0807]:961814 (/tmp/my.img)
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
return loopbacks
def create_loopback(file_path):
'''
Create a loopback device for a given backing file.
:returns: str: Full path to new loopback device (eg, /dev/loop0)
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems():
if f == file_path:
return d
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in loopback_devices().iteritems():
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path)

View File

@ -0,0 +1,88 @@
from subprocess import (
CalledProcessError,
check_call,
check_output,
Popen,
PIPE,
)
##################################################
# LVM helpers.
##################################################
def deactivate_lvm_volume_group(block_device):
'''
Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume
'''
vg = list_lvm_volume_group(block_device)
if vg:
cmd = ['vgchange', '-an', vg]
check_call(cmd)
def is_lvm_physical_volume(block_device):
'''
Determine whether a block device is initialized as an LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: boolean: True if block device is a PV, False if not.
'''
try:
check_output(['pvdisplay', block_device])
return True
except CalledProcessError:
return False
def remove_lvm_physical_volume(block_device):
'''
Remove LVM PV signatures from a given block device.
:param block_device: str: Full path of block device to scrub.
'''
p = Popen(['pvremove', '-ff', block_device],
stdin=PIPE)
p.communicate(input='y\n')
def list_lvm_volume_group(block_device):
'''
List LVM volume group associated with a given block device.
Assumes block device is a valid LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: str: Name of volume group associated with block device or None
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
if l.strip().startswith('VG Name'):
vg = ' '.join(l.split()).split(' ').pop()
return vg
def create_lvm_physical_volume(block_device):
'''
Initialize a block device as an LVM physical volume.
:param block_device: str: Full path of block device to initialize.
'''
check_call(['pvcreate', block_device])
def create_lvm_volume_group(volume_group, block_device):
'''
Create an LVM volume group backed by a given block device.
Assumes block device has already been initialized as an LVM PV.
:param volume_group: str: Name of volume group to create.
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])

View File

@ -0,0 +1,25 @@
from os import stat
from stat import S_ISBLK
from subprocess import (
check_call
)
def is_block_device(path):
'''
Confirm device at path is a valid block device node.
:returns: boolean: True if path is a block device, False if not.
'''
return S_ISBLK(stat(path).st_mode)
def zap_disk(block_device):
'''
Clear a block device of partition table. Relies on sgdisk, which is
installed as pat of the 'gdisk' package in Ubuntu.
:param block_device: str: Full path of block device to clean.
'''
check_call(['sgdisk', '--zap-all', block_device])

View File

@ -9,6 +9,7 @@ import json
import yaml import yaml
import subprocess import subprocess
import UserDict import UserDict
from subprocess import CalledProcessError
CRITICAL = "CRITICAL" CRITICAL = "CRITICAL"
ERROR = "ERROR" ERROR = "ERROR"
@ -21,7 +22,7 @@ cache = {}
def cached(func): def cached(func):
''' Cache return values for multiple executions of func + args """Cache return values for multiple executions of func + args
For example: For example:
@ -32,7 +33,7 @@ def cached(func):
unit_get('test') unit_get('test')
will cache the result of unit_get + 'test' for future calls. will cache the result of unit_get + 'test' for future calls.
''' """
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
global cache global cache
key = str((func, args, kwargs)) key = str((func, args, kwargs))
@ -46,8 +47,8 @@ def cached(func):
def flush(key): def flush(key):
''' Flushes any entries from function cache where the """Flushes any entries from function cache where the
key is found in the function+args ''' key is found in the function+args """
flush_list = [] flush_list = []
for item in cache: for item in cache:
if key in item: if key in item:
@ -57,7 +58,7 @@ def flush(key):
def log(message, level=None): def log(message, level=None):
"Write a message to the juju log" """Write a message to the juju log"""
command = ['juju-log'] command = ['juju-log']
if level: if level:
command += ['-l', level] command += ['-l', level]
@ -66,7 +67,7 @@ def log(message, level=None):
class Serializable(UserDict.IterableUserDict): class Serializable(UserDict.IterableUserDict):
"Wrapper, an object that can be serialized to yaml or json" """Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj): def __init__(self, obj):
# wrap the object # wrap the object
@ -96,11 +97,11 @@ class Serializable(UserDict.IterableUserDict):
self.data = state self.data = state
def json(self): def json(self):
"Serialize the object to json" """Serialize the object to json"""
return json.dumps(self.data) return json.dumps(self.data)
def yaml(self): def yaml(self):
"Serialize the object to yaml" """Serialize the object to yaml"""
return yaml.dump(self.data) return yaml.dump(self.data)
@ -119,38 +120,38 @@ def execution_environment():
def in_relation_hook(): def in_relation_hook():
"Determine whether we're running in a relation hook" """Determine whether we're running in a relation hook"""
return 'JUJU_RELATION' in os.environ return 'JUJU_RELATION' in os.environ
def relation_type(): def relation_type():
"The scope for the current relation hook" """The scope for the current relation hook"""
return os.environ.get('JUJU_RELATION', None) return os.environ.get('JUJU_RELATION', None)
def relation_id(): def relation_id():
"The relation ID for the current relation hook" """The relation ID for the current relation hook"""
return os.environ.get('JUJU_RELATION_ID', None) return os.environ.get('JUJU_RELATION_ID', None)
def local_unit(): def local_unit():
"Local unit ID" """Local unit ID"""
return os.environ['JUJU_UNIT_NAME'] return os.environ['JUJU_UNIT_NAME']
def remote_unit(): def remote_unit():
"The remote unit for the current relation hook" """The remote unit for the current relation hook"""
return os.environ['JUJU_REMOTE_UNIT'] return os.environ['JUJU_REMOTE_UNIT']
def service_name(): def service_name():
"The name service group this unit belongs to" """The name service group this unit belongs to"""
return local_unit().split('/')[0] return local_unit().split('/')[0]
@cached @cached
def config(scope=None): def config(scope=None):
"Juju charm configuration" """Juju charm configuration"""
config_cmd_line = ['config-get'] config_cmd_line = ['config-get']
if scope is not None: if scope is not None:
config_cmd_line.append(scope) config_cmd_line.append(scope)
@ -163,6 +164,7 @@ def config(scope=None):
@cached @cached
def relation_get(attribute=None, unit=None, rid=None): def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json'] _args = ['relation-get', '--format=json']
if rid: if rid:
_args.append('-r') _args.append('-r')
@ -174,9 +176,14 @@ def relation_get(attribute=None, unit=None, rid=None):
return json.loads(subprocess.check_output(_args)) return json.loads(subprocess.check_output(_args))
except ValueError: except ValueError:
return None return None
except CalledProcessError, e:
if e.returncode == 2:
return None
raise
def relation_set(relation_id=None, relation_settings={}, **kwargs): def relation_set(relation_id=None, relation_settings={}, **kwargs):
"""Set relation information for the current unit"""
relation_cmd_line = ['relation-set'] relation_cmd_line = ['relation-set']
if relation_id is not None: if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id)) relation_cmd_line.extend(('-r', relation_id))
@ -192,7 +199,7 @@ def relation_set(relation_id=None, relation_settings={}, **kwargs):
@cached @cached
def relation_ids(reltype=None): def relation_ids(reltype=None):
"A list of relation_ids" """A list of relation_ids"""
reltype = reltype or relation_type() reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json'] relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None: if reltype is not None:
@ -203,7 +210,7 @@ def relation_ids(reltype=None):
@cached @cached
def related_units(relid=None): def related_units(relid=None):
"A list of related units" """A list of related units"""
relid = relid or relation_id() relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json'] units_cmd_line = ['relation-list', '--format=json']
if relid is not None: if relid is not None:
@ -213,7 +220,7 @@ def related_units(relid=None):
@cached @cached
def relation_for_unit(unit=None, rid=None): def relation_for_unit(unit=None, rid=None):
"Get the json represenation of a unit's relation" """Get the json represenation of a unit's relation"""
unit = unit or remote_unit() unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid) relation = relation_get(unit=unit, rid=rid)
for key in relation: for key in relation:
@ -225,7 +232,7 @@ def relation_for_unit(unit=None, rid=None):
@cached @cached
def relations_for_id(relid=None): def relations_for_id(relid=None):
"Get relations of a specific relation ID" """Get relations of a specific relation ID"""
relation_data = [] relation_data = []
relid = relid or relation_ids() relid = relid or relation_ids()
for unit in related_units(relid): for unit in related_units(relid):
@ -237,7 +244,7 @@ def relations_for_id(relid=None):
@cached @cached
def relations_of_type(reltype=None): def relations_of_type(reltype=None):
"Get relations of a specific type" """Get relations of a specific type"""
relation_data = [] relation_data = []
reltype = reltype or relation_type() reltype = reltype or relation_type()
for relid in relation_ids(reltype): for relid in relation_ids(reltype):
@ -249,7 +256,7 @@ def relations_of_type(reltype=None):
@cached @cached
def relation_types(): def relation_types():
"Get a list of relation types supported by this charm" """Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '') charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml')) mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf) md = yaml.safe_load(mdf)
@ -264,6 +271,7 @@ def relation_types():
@cached @cached
def relations(): def relations():
"""Get a nested dictionary of relation data for all related units"""
rels = {} rels = {}
for reltype in relation_types(): for reltype in relation_types():
relids = {} relids = {}
@ -277,15 +285,35 @@ def relations():
return rels return rels
@cached
def is_relation_made(relation, keys='private-address'):
'''
Determine whether a relation is established by checking for
presence of key(s). If a list of keys is provided, they
must all be present for the relation to be identified as made
'''
if isinstance(keys, str):
keys = [keys]
for r_id in relation_ids(relation):
for unit in related_units(r_id):
context = {}
for k in keys:
context[k] = relation_get(k, rid=r_id,
unit=unit)
if None not in context.values():
return True
return False
def open_port(port, protocol="TCP"): def open_port(port, protocol="TCP"):
"Open a service network port" """Open a service network port"""
_args = ['open-port'] _args = ['open-port']
_args.append('{}/{}'.format(port, protocol)) _args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args) subprocess.check_call(_args)
def close_port(port, protocol="TCP"): def close_port(port, protocol="TCP"):
"Close a service network port" """Close a service network port"""
_args = ['close-port'] _args = ['close-port']
_args.append('{}/{}'.format(port, protocol)) _args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args) subprocess.check_call(_args)
@ -293,6 +321,7 @@ def close_port(port, protocol="TCP"):
@cached @cached
def unit_get(attribute): def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute] _args = ['unit-get', '--format=json', attribute]
try: try:
return json.loads(subprocess.check_output(_args)) return json.loads(subprocess.check_output(_args))
@ -301,22 +330,46 @@ def unit_get(attribute):
def unit_private_ip(): def unit_private_ip():
"""Get this unit's private IP address"""
return unit_get('private-address') return unit_get('private-address')
class UnregisteredHookError(Exception): class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
pass pass
class Hooks(object): class Hooks(object):
"""A convenient handler for hook functions.
Example:
hooks = Hooks()
# register a hook, taking its name from the function name
@hooks.hook()
def install():
...
# register a hook, providing a custom hook name
@hooks.hook("config-changed")
def config_changed():
...
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self): def __init__(self):
super(Hooks, self).__init__() super(Hooks, self).__init__()
self._hooks = {} self._hooks = {}
def register(self, name, function): def register(self, name, function):
"""Register a hook"""
self._hooks[name] = function self._hooks[name] = function
def execute(self, args): def execute(self, args):
"""Execute a registered hook based on args[0]"""
hook_name = os.path.basename(args[0]) hook_name = os.path.basename(args[0])
if hook_name in self._hooks: if hook_name in self._hooks:
self._hooks[hook_name]() self._hooks[hook_name]()
@ -324,6 +377,7 @@ class Hooks(object):
raise UnregisteredHookError(hook_name) raise UnregisteredHookError(hook_name)
def hook(self, *hook_names): def hook(self, *hook_names):
"""Decorator, registering them as hooks"""
def wrapper(decorated): def wrapper(decorated):
for hook_name in hook_names: for hook_name in hook_names:
self.register(hook_name, decorated) self.register(hook_name, decorated)
@ -337,4 +391,5 @@ class Hooks(object):
def charm_dir(): def charm_dir():
"""Return the root directory of the current charm"""
return os.environ.get('CHARM_DIR') return os.environ.get('CHARM_DIR')

View File

@ -19,18 +19,22 @@ from hookenv import log
def service_start(service_name): def service_start(service_name):
"""Start a system service"""
return service('start', service_name) return service('start', service_name)
def service_stop(service_name): def service_stop(service_name):
"""Stop a system service"""
return service('stop', service_name) return service('stop', service_name)
def service_restart(service_name): def service_restart(service_name):
"""Restart a system service"""
return service('restart', service_name) return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False): def service_reload(service_name, restart_on_failure=False):
"""Reload a system service, optionally falling back to restart if reload fails"""
service_result = service('reload', service_name) service_result = service('reload', service_name)
if not service_result and restart_on_failure: if not service_result and restart_on_failure:
service_result = service('restart', service_name) service_result = service('restart', service_name)
@ -38,11 +42,13 @@ def service_reload(service_name, restart_on_failure=False):
def service(action, service_name): def service(action, service_name):
"""Control a system service"""
cmd = ['service', service_name, action] cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0 return subprocess.call(cmd) == 0
def service_running(service): def service_running(service):
"""Determine whether a system service is running"""
try: try:
output = subprocess.check_output(['service', service, 'status']) output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
@ -55,7 +61,7 @@ def service_running(service):
def adduser(username, password=None, shell='/bin/bash', system_user=False): def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user""" """Add a user to the system"""
try: try:
user_info = pwd.getpwnam(username) user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username)) log('user {0} already exists!'.format(username))
@ -138,7 +144,7 @@ def write_file(path, content, owner='root', group='root', perms=0444):
def mount(device, mountpoint, options=None, persist=False): def mount(device, mountpoint, options=None, persist=False):
'''Mount a filesystem''' """Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount'] cmd_args = ['mount']
if options is not None: if options is not None:
cmd_args.extend(['-o', options]) cmd_args.extend(['-o', options])
@ -155,7 +161,7 @@ def mount(device, mountpoint, options=None, persist=False):
def umount(mountpoint, persist=False): def umount(mountpoint, persist=False):
'''Unmount a filesystem''' """Unmount a filesystem"""
cmd_args = ['umount', mountpoint] cmd_args = ['umount', mountpoint]
try: try:
subprocess.check_output(cmd_args) subprocess.check_output(cmd_args)
@ -169,7 +175,7 @@ def umount(mountpoint, persist=False):
def mounts(): def mounts():
'''List of all mounted volumes as [[mountpoint,device],[...]]''' """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
with open('/proc/mounts') as f: with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]] # [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split() system_mounts = [m[1::-1] for m in [l.strip().split()
@ -178,7 +184,7 @@ def mounts():
def file_hash(path): def file_hash(path):
''' Generate a md5 hash of the contents of 'path' or None if not found ''' """Generate a md5 hash of the contents of 'path' or None if not found """
if os.path.exists(path): if os.path.exists(path):
h = hashlib.md5() h = hashlib.md5()
with open(path, 'r') as source: with open(path, 'r') as source:
@ -189,7 +195,7 @@ def file_hash(path):
def restart_on_change(restart_map): def restart_on_change(restart_map):
''' Restart services based on configuration files changing """Restart services based on configuration files changing
This function is used a decorator, for example This function is used a decorator, for example
@ -202,7 +208,7 @@ def restart_on_change(restart_map):
In this example, the cinder-api and cinder-volume services In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function. ceph_client_changed function.
''' """
def wrap(f): def wrap(f):
def wrapped_f(*args): def wrapped_f(*args):
checksums = {} checksums = {}
@ -220,7 +226,7 @@ def restart_on_change(restart_map):
def lsb_release(): def lsb_release():
'''Return /etc/lsb-release in a dict''' """Return /etc/lsb-release in a dict"""
d = {} d = {}
with open('/etc/lsb-release', 'r') as lsb: with open('/etc/lsb-release', 'r') as lsb:
for l in lsb: for l in lsb:
@ -230,7 +236,7 @@ def lsb_release():
def pwgen(length=None): def pwgen(length=None):
'''Generate a random pasword.''' """Generate a random pasword."""
if length is None: if length is None:
length = random.choice(range(35, 45)) length = random.choice(range(35, 45))
alphanumeric_chars = [ alphanumeric_chars = [

View File

@ -20,6 +20,32 @@ deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
PROPOSED_POCKET = """# Proposed PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
""" """
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precies-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
}
def filter_installed_packages(packages): def filter_installed_packages(packages):
@ -79,16 +105,35 @@ def apt_purge(packages, fatal=False):
subprocess.call(cmd) subprocess.call(cmd)
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None): def add_source(source, key=None):
if ((source.startswith('ppa:') or if (source.startswith('ppa:') or
source.startswith('http:'))): source.startswith('http:') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source]) subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'): elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True) fatal=True)
pocket = source.split(':')[-1] pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError('Unsupported cloud: source option %s' % pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(pocket)) apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed': elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME'] release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
@ -118,8 +163,11 @@ def configure_sources(update=False,
Note that 'null' (a.k.a. None) should not be quoted. Note that 'null' (a.k.a. None) should not be quoted.
""" """
sources = safe_load(config(sources_var)) sources = safe_load(config(sources_var))
keys = safe_load(config(keys_var)) keys = config(keys_var)
if isinstance(sources, basestring) and isinstance(keys, basestring): if keys is not None:
keys = safe_load(keys)
if isinstance(sources, basestring) and (
keys is None or isinstance(keys, basestring)):
add_source(sources, keys) add_source(sources, keys)
else: else:
if not len(sources) == len(keys): if not len(sources) == len(keys):

View File

@ -12,6 +12,7 @@ except ImportError:
apt_install("python-bzrlib") apt_install("python-bzrlib")
from bzrlib.branch import Branch from bzrlib.branch import Branch
class BzrUrlFetchHandler(BaseFetchHandler): class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs""" """Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source): def can_handle(self, source):
@ -46,4 +47,3 @@ class BzrUrlFetchHandler(BaseFetchHandler):
except OSError as e: except OSError as e:
raise UnhandledSource(e.strerror) raise UnhandledSource(e.strerror)
return dest_dir return dest_dir

View File

@ -80,7 +80,6 @@ NEUTRON_GATEWAY_PKGS = {
"nova-api-metadata" "nova-api-metadata"
], ],
NVP: [ NVP: [
"openvswitch-switch",
"neutron-dhcp-agent", "neutron-dhcp-agent",
'python-mysqldb', 'python-mysqldb',
'python-oslo.config', # Force upgrade 'python-oslo.config', # Force upgrade
@ -95,7 +94,7 @@ GATEWAY_PKGS = {
EARLY_PACKAGES = { EARLY_PACKAGES = {
OVS: ['openvswitch-datapath-dkms'], OVS: ['openvswitch-datapath-dkms'],
NVP: ['openvswitch-datapath-dkms'] NVP: []
} }
@ -396,13 +395,11 @@ def do_openstack_upgrade(configs):
def configure_ovs(): def configure_ovs():
if not service_running('openvswitch-switch'):
full_restart()
if config('plugin') == OVS: if config('plugin') == OVS:
if not service_running('openvswitch-switch'):
full_restart()
add_bridge(INT_BRIDGE) add_bridge(INT_BRIDGE)
add_bridge(EXT_BRIDGE) add_bridge(EXT_BRIDGE)
ext_port = config('ext-port') ext_port = config('ext-port')
if ext_port: if ext_port:
add_bridge_port(EXT_BRIDGE, ext_port) add_bridge_port(EXT_BRIDGE, ext_port)
if config('plugin') == NVP:
add_bridge(INT_BRIDGE)

View File

@ -6,6 +6,11 @@ templating.OSConfigRenderer = MagicMock()
import quantum_utils import quantum_utils
try:
import neutronclient
except ImportError:
neutronclient = None
from test_utils import ( from test_utils import (
CharmTestCase CharmTestCase
) )
@ -60,7 +65,7 @@ class TestQuantumUtils(CharmTestCase):
self.config.return_value = 'nvp' self.config.return_value = 'nvp'
self.assertEquals( self.assertEquals(
quantum_utils.get_early_packages(), quantum_utils.get_early_packages(),
['openvswitch-datapath-dkms', 'linux-headers-2.6.18']) [])
@patch.object(quantum_utils, 'EARLY_PACKAGES') @patch.object(quantum_utils, 'EARLY_PACKAGES')
def test_get_early_packages_no_dkms(self, _early_packages): def test_get_early_packages_no_dkms(self, _early_packages):
@ -76,6 +81,7 @@ class TestQuantumUtils(CharmTestCase):
self.assertNotEqual(quantum_utils.get_packages(), []) self.assertNotEqual(quantum_utils.get_packages(), [])
def test_configure_ovs_starts_service_if_required(self): def test_configure_ovs_starts_service_if_required(self):
self.config.return_value = 'ovs'
self.service_running.return_value = False self.service_running.return_value = False
quantum_utils.configure_ovs() quantum_utils.configure_ovs()
self.assertTrue(self.full_restart.called) self.assertTrue(self.full_restart.called)
@ -96,11 +102,6 @@ class TestQuantumUtils(CharmTestCase):
]) ])
self.add_bridge_port.assert_called_with('br-ex', 'eth0') self.add_bridge_port.assert_called_with('br-ex', 'eth0')
def test_configure_ovs_nvp(self):
self.config.return_value = 'nvp'
quantum_utils.configure_ovs()
self.add_bridge.assert_called_with('br-int')
def test_do_openstack_upgrade(self): def test_do_openstack_upgrade(self):
self.config.side_effect = self.test_config.get self.config.side_effect = self.test_config.get
self.test_config.set('openstack-origin', 'cloud:precise-havana') self.test_config.set('openstack-origin', 'cloud:precise-havana')
@ -293,6 +294,8 @@ l3_agent_routers = {
class TestQuantumAgentReallocation(CharmTestCase): class TestQuantumAgentReallocation(CharmTestCase):
def setUp(self): def setUp(self):
if not neutronclient:
raise self.skipTest('Skipping, no neutronclient installed')
super(TestQuantumAgentReallocation, self).setUp(quantum_utils, super(TestQuantumAgentReallocation, self).setUp(quantum_utils,
TO_PATCH) TO_PATCH)