Merged next in

This commit is contained in:
Liam Young 2014-10-15 06:49:03 +00:00
commit 3df37a3cf9
26 changed files with 946 additions and 435 deletions

View File

@ -18,7 +18,8 @@ test:
# coreycb note: The -v should only be temporary until Amulet sends
# raise_status() messages to stderr:
# https://bugs.launchpad.net/amulet/+bug/1320357
@juju test -v -p AMULET_HTTP_PROXY
@juju test -v -p AMULET_HTTP_PROXY --timeout 900 \
00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse
sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml

View File

@ -5,7 +5,8 @@ include:
- fetch
- contrib.openstack|inc=*
- contrib.storage
- contrib.hahelpers:
- apache
- contrib.peerstorage
- contrib.hahelpers
- payload.execd
- contrib.network.ip
- contrib.peerstorage

View File

@ -97,6 +97,7 @@ options:
# HA configuration settings
vip:
type: string
default:
description: |
Virtual IP(s) to use to front API services in HA configuration.
.
@ -114,8 +115,21 @@ options:
description: |
Default multicast port number that will be used to communicate between
HA Cluster nodes.
haproxy-server-timeout:
type: int
default:
description: |
Server timeout configuration in ms for haproxy, used in HA configurations.
If not provided, default value of 30000ms is used.
haproxy-client-timeout:
type: int
default:
description: |
Client timeout configuration in ms for haproxy, used in HA configurations.
If not provided, default value of 30000ms is used.
ssl_cert:
type: string
default:
description: |
SSL certificate to install and use for API ports. Setting this value
and ssl_key will enable reverse proxying, point Nova's entry in the
@ -123,9 +137,11 @@ options:
issued by Keystone (if it is configured to do so).
ssl_key:
type: string
default:
description: SSL key to use with certificate specified as ssl_cert.
ssl_ca:
type: string
default:
description: |
SSL CA to use with the certificate and key provided - this is only
required if you are providing a privately signed ssl_cert and ssl_key.
@ -137,6 +153,7 @@ options:
# Neutron NVP and VMware NSX plugin configuration
nvp-controllers:
type: string
default:
description: Space delimited addresses of NVP/NSX controllers
nvp-username:
type: string
@ -152,12 +169,14 @@ options:
description: Name of the NVP cluster configuration to create (grizzly only)
nvp-tz-uuid:
type: string
default:
description: |
This is uuid of the default NVP/NSX Transport zone that will be used for
creating tunneled isolated Quantum networks. It needs to be created
in NVP before starting Quantum with the nvp plugin.
nvp-l3-uuid:
type: string
default:
description: |
This is uuid of the default NVP/NSX L3 Gateway Service.
# end of NVP/NSX configuration
@ -165,6 +184,7 @@ options:
# by default all access is over 'private-address'
os-admin-network:
type: string
default:
description: |
The IP address and netmask of the OpenStack Admin network (e.g.,
192.168.0.0/24)
@ -172,6 +192,7 @@ options:
This network will be used for admin endpoints.
os-internal-network:
type: string
default:
description: |
The IP address and netmask of the OpenStack Internal network (e.g.,
192.168.0.0/24)
@ -179,6 +200,7 @@ options:
This network will be used for internal endpoints.
os-public-network:
type: string
default:
description: |
The IP address and netmask of the OpenStack Public network (e.g.,
192.168.0.0/24)
@ -205,6 +227,7 @@ options:
* identity-service
console-access-protocol:
type: string
default:
description: |
Protocol to use when accessing virtual machine console. Supported types
are None, spice, xvpvnc, novnc and vnc (for both xvpvnc and novnc)
@ -220,3 +243,46 @@ options:
default: 'en-us'
description: |
Console keymap
debug:
default: False
type: boolean
description: Enable debug logging
verbose:
default: False
type: boolean
description: Enable verbose logging
worker-multiplier:
type: int
default: 2
description: |
The CPU core multiplier to use when configuring worker processes for
Nova and Neutron. By default, the number of workers for each daemon
is set to twice the number of CPU cores a service unit has.
cpu-allocation-ratio:
type: float
default: 16.0
description: |
The per physical core -> virtual core ratio to use in the Nova scheduler.
.
Increasing this value will increase instance density on compute nodes
at the expense of instance performance.
ram-allocation-ratio:
type: float
default: 1.5
description: |
The physical ram -> virtual ram ratio to use in the Nova scheduler.
.
Increasing this value will increase instance density on compute nodes
at the potential expense of instance performance.
prefer-ipv6:
type: boolean
default: False
description: |
If True enables IPv6 support. The charm will expect network interfaces
to be configured with an IPv6 address. If set to False (default) IPv4
is expected.
.
NOTE: these charms do not currently support IPv6 privacy extension. In
order for this charm to function correctly, the privacy extension must be
disabled and a non-temporary address must be configured/available on
your network interface.

1
hooks/cell-relation-broken Symbolic link
View File

@ -0,0 +1 @@
nova_cc_hooks.py

1
hooks/cell-relation-changed Symbolic link
View File

@ -0,0 +1 @@
nova_cc_hooks.py

1
hooks/cell-relation-joined Symbolic link
View File

@ -0,0 +1 @@
nova_cc_hooks.py

View File

@ -1,297 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import commands
import os
import shutil
import time
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
INFO,
ERROR
)
from charmhelpers.fetch import (
apt_install,
)
from charmhelpers.core.host import (
mount,
mounts,
service_start,
service_stop,
umount,
)
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
KEYFILE = '/etc/ceph/ceph.client.%s.key'
CEPH_CONF = """[global]
auth supported = %(auth)s
keyring = %(keyring)s
mon host = %(mon_hosts)s
log to syslog = %(use_syslog)s
err to syslog = %(use_syslog)s
clog to syslog = %(use_syslog)s
"""
def running(service):
# this local util can be dropped as soon the following branch lands
# in lp:charm-helpers
# https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
try:
output = check_output(['service', service, 'status'])
except CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def install():
ceph_dir = "/etc/ceph"
if not os.path.isdir(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
(service, pool))
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd)
def pool_exists(service, name):
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
return name in out
def create_pool(service, name):
cmd = [
'rados',
'--id',
service,
'mkpool',
name
]
check_call(cmd)
def keyfile_path(service):
return KEYFILE % service
def keyring_path(service):
return KEYRING % service
def create_keyring(service, key):
keyring = keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=INFO)
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.%s' % service,
'--add-key=%s' % key
]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
# create a file containing the key
keyfile = keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
fd = open(keyfile, 'w')
fd.write(key)
fd.close()
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth):
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
mon_hosts = ",".join(map(str, hosts))
keyring = keyring_path(service)
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF % locals())
modprobe_kernel_module('rbd')
def image_mapped(image_name):
(rc, out) = commands.getstatusoutput('rbd showmapped')
return image_name in out
def map_block_storage(service, pool, image):
cmd = [
'rbd',
'map',
'%s/%s' % (pool, image),
'--user',
service,
'--secret',
keyfile_path(service),
]
check_call(cmd)
def filesystem_mounted(fs):
return fs in [f for m, f in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
try:
copy_files(data_src_dst, '/mnt')
except:
pass
# umount block device
umount('/mnt')
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
check_call(cmd)
# TODO: re-use
def modprobe_kernel_module(module):
log('ceph: Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
cmd = 'echo %s >> /etc/modules' % module
check_call(cmd, shell=True)
def copy_files(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
To be called from the current cluster leader.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being remounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool %s.' % pool, level=INFO)
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if running(svc):
log('Stopping services %s prior to migrating data.' % svc,
level=INFO)
service_stop(svc)
place_data_on_ceph(service, blk_device, mount_point, fstype)
for svc in system_services:
service_start(svc)

View File

@ -6,6 +6,11 @@
# Adam Gandelman <adamg@ubuntu.com>
#
"""
Helpers for clustering and determining "cluster leadership" and other
clustering-related helpers.
"""
import subprocess
import os
@ -19,6 +24,7 @@ from charmhelpers.core.hookenv import (
config as config_get,
INFO,
ERROR,
WARNING,
unit_get,
)
@ -27,6 +33,29 @@ class HAIncompleteConfig(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
It relies on two mechanisms to determine leadership:
1. If the charm is part of a corosync cluster, call corosync to
determine leadership.
2. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit numer". In
other words, the oldest surviving unit.
"""
if is_clustered():
if not is_crm_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
@ -38,7 +67,11 @@ def is_clustered():
return False
def is_leader(resource):
def is_crm_leader(resource):
"""
Returns True if the charm calling this is the elected corosync leader,
as returned by calling the external "crm" command.
"""
cmd = [
"crm", "resource",
"show", resource
@ -54,15 +87,31 @@ def is_leader(resource):
return False
def peer_units():
def is_leader(resource):
log("is_leader is deprecated. Please consider using is_crm_leader "
"instead.", level=WARNING)
return is_crm_leader(resource)
def peer_units(peer_relation="cluster"):
peers = []
for r_id in (relation_ids('cluster') or []):
for r_id in (relation_ids(peer_relation) or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def peer_ips(peer_relation='cluster', addr_key='private-address'):
'''Return a dict of peers and their private-address'''
peers = {}
for r_id in relation_ids(peer_relation):
for unit in relation_list(r_id):
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
return peers
def oldest_peer(peers):
"""Determines who the oldest peer is by comparing unit numbers."""
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
@ -72,16 +121,9 @@ def oldest_peer(peers):
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
log("eligible_leader is deprecated. Please consider using "
"is_elected_leader instead.", level=WARNING)
return is_elected_leader(resource)
def https():

View File

@ -0,0 +1,131 @@
from charmhelpers.core.hookenv import relation_id as current_relation_id
from charmhelpers.core.hookenv import (
is_relation_made,
relation_ids,
relation_get,
local_unit,
relation_set,
)
"""
This helper provides functions to support use of a peer relation
for basic key/value storage, with the added benefit that all storage
can be replicated across peer units.
Requirement to use:
To use this, the "peer_echo()" method has to be called form the peer
relation's relation-changed hook:
@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
def cluster_relation_changed():
peer_echo()
Once this is done, you can use peer storage from anywhere:
@hooks.hook("some-hook")
def some_hook():
# You can store and retrieve key/values this way:
if is_relation_made("cluster"): # from charmhelpers.core.hookenv
# There are peers available so we can work with peer storage
peer_store("mykey", "myvalue")
value = peer_retrieve("mykey")
print value
else:
print "No peers joind the relation, cannot share key/values :("
"""
def peer_retrieve(key, relation_name='cluster'):
"""Retrieve a named key from peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
return relation_get(attribute=key, rid=cluster_rid,
unit=local_unit())
else:
raise ValueError('Unable to detect'
'peer relation {}'.format(relation_name))
def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
inc_list=None, exc_list=None):
""" Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
inc_list = inc_list if inc_list else []
exc_list = exc_list if exc_list else []
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
matched = {}
for k, v in peerdb_settings.items():
full_prefix = prefix + delimiter
if k.startswith(full_prefix):
new_key = k.replace(full_prefix, '')
if new_key in exc_list:
continue
if new_key in inc_list or len(inc_list) == 0:
matched[new_key] = v
return matched
def peer_store(key, value, relation_name='cluster'):
"""Store the key/value pair on the named peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid,
relation_settings={key: value})
else:
raise ValueError('Unable to detect '
'peer relation {}'.format(relation_name))
def peer_echo(includes=None):
"""Echo filtered attributes back onto the same relation for storage.
This is a requirement to use the peerstorage module - it needs to be called
from the peer relation's changed hook.
"""
rdata = relation_get()
echo_data = {}
if includes is None:
echo_data = rdata.copy()
for ex in ['private-address', 'public-address']:
if ex in echo_data:
echo_data.pop(ex)
else:
for attribute, value in rdata.iteritems():
for include in includes:
if include in attribute:
echo_data[attribute] = value
if len(echo_data) > 0:
relation_set(relation_settings=echo_data)
def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
peer_store_fatal=False, relation_settings=None,
delimiter='_', **kwargs):
"""Store passed-in arguments both in argument relation and in peer storage.
It functions like doing relation_set() and peer_store() at the same time,
with the same data.
@param relation_id: the id of the relation to store the data on. Defaults
to the current relation.
@param peer_store_fatal: Set to True, the function will raise an exception
should the peer sotrage not be avialable."""
relation_settings = relation_settings if relation_settings else {}
relation_set(relation_id=relation_id,
relation_settings=relation_settings,
**kwargs)
if is_relation_made(peer_relation_name):
for key, value in dict(kwargs.items() +
relation_settings.items()).iteritems():
key_prefix = relation_id or current_relation_id()
peer_store(key_prefix + delimiter + key,
value,
relation_name=peer_relation_name)
else:
if peer_store_fatal:
raise ValueError('Unable to detect '
'peer relation {}'.format(peer_relation_name))

View File

@ -0,0 +1 @@
nova_cc_hooks.py

View File

@ -1,4 +1,3 @@
from charmhelpers.core.hookenv import (
config, relation_ids, relation_set, log, ERROR,
unit_get, related_units, relation_get)
@ -13,6 +12,10 @@ from charmhelpers.contrib.hahelpers.cluster import (
is_clustered
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
def context_complete(ctxt):
_missing = []
@ -38,6 +41,24 @@ class ApacheSSLContext(context.ApacheSSLContext):
return super(ApacheSSLContext, self).__call__()
class NovaCellContext(context.OSContextGenerator):
interfaces = ['nova-cell']
def __call__(self):
log('Generating template context for cell')
ctxt = {}
for rid in relation_ids('cell'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
ctxt = {
'cell_type': rdata.get('cell_type'),
'cell_name': rdata.get('cell_name'),
}
if context.context_complete(ctxt):
return ctxt
return {}
class NeutronAPIContext(context.OSContextGenerator):
def __call__(self):
@ -159,10 +180,18 @@ def canonical_url(vip_setting='vip'):
scheme = 'http'
if https():
scheme = 'https'
if is_clustered():
addr = config(vip_setting)
if config('prefer-ipv6'):
if is_clustered():
addr = '[%s]' % config(vip_setting)
else:
addr = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0]
else:
addr = unit_get('private-address')
if is_clustered():
addr = config(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)
@ -203,6 +232,7 @@ class NeutronCCContext(context.NeutronContext):
ctxt['nvp_controllers_list'] = \
_config['nvp-controllers'].split()
ctxt['nova_url'] = "{}:8774/v2".format(canonical_url())
return ctxt
@ -222,6 +252,7 @@ class IdentityServiceContext(context.IdentityServiceContext):
)
ctxt['keystone_ec2_url'] = ec2_tokens
ctxt['region'] = config('region')
return ctxt
@ -235,3 +266,18 @@ class NeutronPostgresqlDBContext(context.PostgresqlDBContext):
def __init__(self):
super(NeutronPostgresqlDBContext,
self).__init__(config('neutron-database'))
class NovaConfigContext(context.WorkerConfigContext):
def __call__(self):
ctxt = super(NovaConfigContext, self).__call__()
ctxt['cpu_allocation_ratio'] = config('cpu-allocation-ratio')
ctxt['ram_allocation_ratio'] = config('ram-allocation-ratio')
return ctxt
class NovaIPv6Context(context.BindHostContext):
def __call__(self):
ctxt = super(NovaIPv6Context, self).__call__()
ctxt['use_ipv6'] = config('prefer-ipv6')
return ctxt

View File

@ -15,6 +15,7 @@ from charmhelpers.core.hookenv import (
charm_dir,
is_relation_made,
log,
local_unit,
ERROR,
relation_get,
relation_ids,
@ -28,15 +29,20 @@ from charmhelpers.core.host import (
restart_on_change,
service_running,
service_stop,
service_restart,
)
from charmhelpers.fetch import (
apt_install, apt_update
apt_install,
apt_update,
filter_installed_packages
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available,
os_release,
sync_db_with_multi_ipv6_addresses
)
from charmhelpers.contrib.openstack.neutron import (
@ -45,18 +51,28 @@ from charmhelpers.contrib.openstack.neutron import (
)
from nova_cc_context import (
NeutronAPIContext
NeutronAPIContext,
NovaCellContext,
)
from charmhelpers.contrib.peerstorage import (
peer_retrieve,
peer_echo,
)
from nova_cc_utils import (
api_port,
auth_token_config,
cmd_all_services,
determine_endpoints,
determine_packages,
determine_ports,
disable_services,
do_openstack_upgrade,
enable_services,
keystone_ca_cert_b64,
migrate_database,
migrate_neutron_database,
migrate_nova_database,
neutron_plugin,
save_script_rc,
services,
@ -76,12 +92,13 @@ from nova_cc_utils import (
service_guard,
guard_map,
get_topics,
services,
setup_ipv6,
)
from charmhelpers.contrib.hahelpers.cluster import (
eligible_leader,
get_hacluster_config,
is_leader,
)
from charmhelpers.payload.execd import execd_preinstall
@ -93,9 +110,14 @@ from charmhelpers.contrib.openstack.ip import (
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address
get_netmask_for_address,
get_address_in_network,
get_ipv6_addr,
is_ipv6
)
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
hooks = Hooks()
CONFIGS = register_configs()
@ -114,6 +136,9 @@ def install():
log('Installing %s to /usr/bin' % f)
shutil.copy2(f, '/usr/bin')
[open_port(port) for port in determine_ports()]
log('Disabling services into db relation joined')
disable_services()
cmd_all_services('stop')
@hooks.hook('config-changed')
@ -121,9 +146,17 @@ def install():
active=config('service-guard'))
@restart_on_change(restart_map(), stopstart=True)
def config_changed():
if config('prefer-ipv6'):
setup_ipv6()
sync_db_with_multi_ipv6_addresses(config('database'),
config('database-user'),
relation_prefix='nova')
global CONFIGS
if openstack_upgrade_available('nova-common'):
CONFIGS = do_openstack_upgrade()
[neutron_api_relation_joined(rid=rid, remote_restart=True)
for rid in relation_ids('neutron-api')]
save_script_rc()
configure_https()
CONFIGS.write_all()
@ -136,6 +169,8 @@ def config_changed():
identity_joined(rid=r_id)
for rid in relation_ids('zeromq-configuration'):
zeromq_configuration_relation_joined(rid)
[cluster_joined(rid) for rid in relation_ids('cluster')]
@hooks.hook('amqp-relation-joined')
def amqp_joined(relation_id=None):
@ -158,6 +193,25 @@ def amqp_changed():
CONFIGS.write(QUANTUM_CONF)
if network_manager() == 'neutron':
CONFIGS.write(NEUTRON_CONF)
[nova_cell_relation_joined(rid=rid)
for rid in relation_ids('cell')]
def conditional_neutron_migration():
if relation_ids('neutron-api'):
log('Not running neutron database migration as neutron-api service'
'is present.')
elif os_release('nova-common') <= 'icehouse':
log('Not running neutron database migration as migrations are handled'
'by the neutron-server process.')
else:
migrate_neutron_database()
# neutron-api service may have appeared while the migration was
# running so prod it just in case
[neutron_api_relation_joined(rid=rid, remote_restart=True)
for rid in relation_ids('neutron-api')]
if 'neutron-server' in services():
service_restart('neutron-server')
@hooks.hook('shared-db-relation-joined')
@ -170,14 +224,31 @@ def db_joined():
log(e, level=ERROR)
raise Exception(e)
relation_set(nova_database=config('database'),
nova_username=config('database-user'),
nova_hostname=unit_get('private-address'))
if network_manager() in ['quantum', 'neutron']:
# XXX: Renaming relations from quantum_* to neutron_* here.
relation_set(neutron_database=config('neutron-database'),
neutron_username=config('neutron-database-user'),
neutron_hostname=unit_get('private-address'))
config_neutron = True
else:
config_neutron = False
if config('prefer-ipv6'):
sync_db_with_multi_ipv6_addresses(config('database'),
config('database-user'),
relation_prefix='nova')
if config_neutron:
sync_db_with_multi_ipv6_addresses(config('neutron-database'),
config('neutron-database-user'),
relation_prefix='neutron')
else:
host = unit_get('private-address')
relation_set(nova_database=config('database'),
nova_username=config('database-user'),
nova_hostname=host)
if config_neutron:
# XXX: Renaming relations from quantum_* to neutron_* here.
relation_set(neutron_database=config('neutron-database'),
neutron_username=config('neutron-database-user'),
neutron_hostname=host)
@hooks.hook('pgsql-nova-db-relation-joined')
@ -215,10 +286,21 @@ def db_changed():
CONFIGS.write_all()
if eligible_leader(CLUSTER_RES):
migrate_database()
# Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
# acl entry has been added. So, if the db supports passing a list of
# permitted units then check if we're in the list.
allowed_units = relation_get('nova_allowed_units')
if allowed_units and local_unit() not in allowed_units.split():
log('Allowed_units list provided and this unit not present')
return
migrate_nova_database()
log('Triggering remote cloud-compute restarts.')
[compute_joined(rid=rid, remote_restart=True)
for rid in relation_ids('cloud-compute')]
for rid in relation_ids('cloud-compute')]
log('Triggering remote cell restarts.')
[nova_cell_relation_joined(rid=rid, remote_restart=True)
for rid in relation_ids('cell')]
conditional_neutron_migration()
@hooks.hook('pgsql-nova-db-relation-changed')
@ -232,10 +314,11 @@ def postgresql_nova_db_changed():
CONFIGS.write_all()
if eligible_leader(CLUSTER_RES):
migrate_database()
migrate_nova_database()
log('Triggering remote cloud-compute restarts.')
[compute_joined(rid=rid, remote_restart=True)
for rid in relation_ids('cloud-compute')]
conditional_neutron_migration()
@hooks.hook('pgsql-neutron-db-relation-changed')
@ -263,8 +346,6 @@ def image_service_changed():
@hooks.hook('identity-service-relation-joined')
def identity_joined(rid=None):
if not eligible_leader(CLUSTER_RES):
return
public_url = canonical_url(CONFIGS, PUBLIC)
internal_url = canonical_url(CONFIGS, INTERNAL)
admin_url = canonical_url(CONFIGS, ADMIN)
@ -418,8 +499,6 @@ def console_settings():
def compute_joined(rid=None, remote_restart=False):
cons_settings = console_settings()
relation_set(relation_id=rid, **cons_settings)
if not eligible_leader(CLUSTER_RES):
return
rel_settings = {
'network_manager': network_manager(),
'volume_service': volume_service(),
@ -504,9 +583,6 @@ def compute_departed():
@hooks.hook('neutron-network-service-relation-joined',
'quantum-network-service-relation-joined')
def quantum_joined(rid=None):
if not eligible_leader(CLUSTER_RES):
return
rel_settings = neutron_settings()
# inform quantum about local keystone auth config
@ -520,6 +596,23 @@ def quantum_joined(rid=None):
relation_set(relation_id=rid, **rel_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
for addr_type in ADDRESS_TYPES:
address = get_address_in_network(
config('os-{}-network'.format(addr_type))
)
if address:
relation_set(
relation_id=relation_id,
relation_settings={'{}-address'.format(addr_type): address}
)
if config('prefer-ipv6'):
private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
relation_set(relation_id=relation_id,
relation_settings={'private-address': private_addr})
@hooks.hook('cluster-relation-changed',
'cluster-relation-departed')
@service_guard(guard_map(), CONFIGS,
@ -527,26 +620,45 @@ def quantum_joined(rid=None):
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
CONFIGS.write_all()
if relation_ids('cluster'):
peer_echo(includes='dbsync_state')
dbsync_state = peer_retrieve('dbsync_state')
if dbsync_state == 'complete':
enable_services()
cmd_all_services('start')
else:
log('Database sync not ready. Shutting down services')
disable_services()
cmd_all_services('stop')
@hooks.hook('ha-relation-joined')
def ha_joined():
config = get_hacluster_config()
cluster_config = get_hacluster_config()
resources = {
'res_nova_haproxy': 'lsb:haproxy',
}
resource_params = {
'res_nova_haproxy': 'op monitor interval="5s"'
}
vip_group = []
for vip in config['vip'].split():
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_nova_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_nova_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_nova_{}_vip'.format(iface)
resources[vip_key] = 'ocf:heartbeat:IPaddr2'
resources[vip_key] = res_nova_vip
resource_params[vip_key] = (
'params ip="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(vip=vip,
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
@ -562,8 +674,8 @@ def ha_joined():
'cl_nova_haproxy': 'res_nova_haproxy'
}
relation_set(init_services=init_services,
corosync_bindiface=config['ha-bindiface'],
corosync_mcastport=config['ha-mcastport'],
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
clones=clones)
@ -583,28 +695,37 @@ def ha_changed():
if network_manager() == 'neutron':
CONFIGS.write(NEUTRON_CONF)
if not is_leader(CLUSTER_RES):
log('ha_changed: hacluster complete but we are not leader.')
return
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
identity_joined(rid=rid)
@hooks.hook('shared-db-relation-broken',
'pgsql-nova-db-relation-broken')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
def db_departed():
CONFIGS.write_all()
for r_id in relation_ids('cluster'):
relation_set(relation_id=r_id, dbsync_state='incomplete')
disable_services()
cmd_all_services('stop')
@hooks.hook('amqp-relation-broken',
'cinder-volume-service-relation-broken',
'identity-service-relation-broken',
'image-service-relation-broken',
'nova-volume-service-relation-broken',
'shared-db-relation-broken',
'pgsql-nova-db-relation-broken',
'pgsql-neutron-db-relation-broken',
'quantum-network-service-relation-broken')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
def relation_broken():
CONFIGS.write_all()
[nova_cell_relation_joined(rid=rid)
for rid in relation_ids('cell')]
def configure_https():
@ -652,6 +773,8 @@ def nova_vmware_relation_changed():
@hooks.hook('upgrade-charm')
def upgrade_charm():
apt_install(filter_installed_packages(determine_packages()),
fatal=True)
for r_id in relation_ids('amqp'):
amqp_joined(relation_id=r_id)
for r_id in relation_ids('identity-service'):
@ -661,8 +784,34 @@ def upgrade_charm():
compute_changed(r_id, unit)
# remote_restart is defaulted to true as nova-cells may have started the
# nova-cell process before the db migration was run so it will need a
# kick
@hooks.hook('cell-relation-joined')
def nova_cell_relation_joined(rid=None, remote_restart=True):
rel_settings = {
'nova_url': "%s:8774/v2" % canonical_url(CONFIGS, INTERNAL)
}
if remote_restart:
rel_settings['restart_trigger'] = str(uuid.uuid4())
relation_set(relation_id=rid, **rel_settings)
@hooks.hook('cell-relation-changed')
@restart_on_change(restart_map())
def nova_cell_relation_changed():
CONFIGS.write(NOVA_CONF)
def get_cell_type():
cell_info = NovaCellContext()()
if 'cell_type' in cell_info:
return cell_info['cell_type']
return None
@hooks.hook('neutron-api-relation-joined')
def neutron_api_relation_joined(rid=None):
def neutron_api_relation_joined(rid=None, remote_restart=False):
with open('/etc/init/neutron-server.override', 'wb') as out:
out.write('manual\n')
if os.path.isfile(NEUTRON_CONF):
@ -671,8 +820,14 @@ def neutron_api_relation_joined(rid=None):
service_stop('neutron-server')
for id_rid in relation_ids('identity-service'):
identity_joined(rid=id_rid)
nova_url = canonical_url(CONFIGS, INTERNAL) + ":8774/v2"
relation_set(relation_id=rid, nova_url=nova_url)
rel_settings = {
'nova_url': canonical_url(CONFIGS, INTERNAL) + ":8774/v2"
}
if get_cell_type():
rel_settings['cell_type'] = get_cell_type()
if remote_restart:
rel_settings['restart_trigger'] = str(uuid.uuid4())
relation_set(relation_id=rid, **rel_settings)
@hooks.hook('neutron-api-relation-changed')

View File

@ -12,6 +12,8 @@ from charmhelpers.contrib.openstack.neutron import (
from charmhelpers.contrib.hahelpers.cluster import eligible_leader
from charmhelpers.contrib.peerstorage import peer_store
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
get_host_ip,
@ -25,6 +27,7 @@ from charmhelpers.fetch import (
apt_upgrade,
apt_update,
apt_install,
add_source
)
from charmhelpers.core.hookenv import (
@ -39,9 +42,15 @@ from charmhelpers.core.hookenv import (
)
from charmhelpers.core.host import (
service,
service_start,
service_stop,
service_running
service_running,
lsb_release
)
from charmhelpers.contrib.network.ip import (
is_ipv6
)
import nova_cc_context
@ -57,6 +66,7 @@ BASE_PACKAGES = [
'python-keystoneclient',
'python-mysqldb',
'python-psycopg2',
'python-psutil',
'uuid',
]
@ -105,12 +115,17 @@ BASE_RESOURCE_MAP = OrderedDict([
interface='nova-vmware',
service='nova',
config_file=NOVA_CONF),
nova_cc_context.NovaCellContext(),
context.SyslogContext(),
context.LogLevelContext(),
nova_cc_context.HAProxyContext(),
nova_cc_context.IdentityServiceContext(),
nova_cc_context.VolumeServiceContext(),
context.ZeroMQContext(),
context.NotificationDriverContext()],
context.NotificationDriverContext(),
nova_cc_context.NovaIPv6Context(),
nova_cc_context.NeutronCCContext(),
nova_cc_context.NovaConfigContext()],
}),
(NOVA_API_PASTE, {
'services': [s for s in BASE_SERVICES if 'api' in s],
@ -150,7 +165,9 @@ BASE_RESOURCE_MAP = OrderedDict([
nova_cc_context.IdentityServiceContext(),
nova_cc_context.NeutronCCContext(),
nova_cc_context.HAProxyContext(),
context.SyslogContext()],
context.SyslogContext(),
nova_cc_context.NovaConfigContext(),
context.BindHostContext()],
}),
(NEUTRON_DEFAULT, {
'services': ['neutron-server'],
@ -302,9 +319,9 @@ def determine_ports():
'''Assemble a list of API ports for services we are managing'''
ports = []
for services in restart_map().values():
for service in services:
for svc in services:
try:
ports.append(API_PORTS[service])
ports.append(API_PORTS[svc])
except KeyError:
pass
return list(set(ports))
@ -500,8 +517,12 @@ def _do_openstack_upgrade(new_src):
# NOTE(jamespage) upgrade with existing config files as the
# havana->icehouse migration enables new service_plugins which
# create issues with db upgrades
neutron_db_manage(['stamp', cur_os_rel])
neutron_db_manage(['upgrade', 'head'])
if relation_ids('neutron-api'):
log('Not running neutron database migration as neutron-api service'
'is present.')
else:
neutron_db_manage(['stamp', cur_os_rel])
migrate_neutron_database()
reset_os_release()
configs = register_configs(release=new_os_rel)
configs.write_all()
@ -511,7 +532,7 @@ def _do_openstack_upgrade(new_src):
ml2_migration()
if eligible_leader(CLUSTER_RES):
migrate_database()
migrate_nova_database()
[service_start(s) for s in services()]
disable_policy_rcd()
@ -538,11 +559,23 @@ def volume_service():
return 'cinder'
def migrate_database():
def migrate_nova_database():
'''Runs nova-manage to initialize a new database or migrate existing'''
log('Migrating the nova database.', level=INFO)
cmd = ['nova-manage', 'db', 'sync']
subprocess.check_output(cmd)
if relation_ids('cluster'):
log('Informing peers that dbsync is complete', level=INFO)
peer_store('dbsync_state', 'complete')
log('Enabling services', level=INFO)
enable_services()
cmd_all_services('start')
def migrate_neutron_database():
'''Runs neutron-db-manage to init a new database or migrate existing'''
log('Migrating the neutron database.', level=INFO)
neutron_db_manage(['upgrade', 'head'])
def auth_token_config(setting):
@ -647,16 +680,18 @@ def ssh_compute_add(public_key, rid=None, unit=None, user=None):
private_address = relation_get(rid=rid, unit=unit,
attribute='private-address')
hosts = [private_address]
if relation_get('hostname'):
hosts.append(relation_get('hostname'))
if not is_ip(private_address):
hosts.append(get_host_ip(private_address))
hosts.append(private_address.split('.')[0])
else:
hn = get_hostname(private_address)
hosts.append(hn)
hosts.append(hn.split('.')[0])
if not is_ipv6(private_address):
if relation_get('hostname'):
hosts.append(relation_get('hostname'))
if not is_ip(private_address):
hosts.append(get_host_ip(private_address))
hosts.append(private_address.split('.')[0])
else:
hn = get_hostname(private_address)
hosts.append(hn)
hosts.append(hn.split('.')[0])
for host in list(set(hosts)):
if not ssh_known_host_key(host, unit, user):
@ -781,7 +816,7 @@ def determine_endpoints(public_url, internal_url, admin_url):
})
# XXX: Keep these relations named quantum_*??
if is_relation_made('neutron-api'):
if relation_ids('neutron-api'):
endpoints.update({
'quantum_service': None,
'quantum_region': None,
@ -868,3 +903,43 @@ def get_topics():
if 'nova-consoleauth' in services():
topics.append('consoleauth')
return topics
def cmd_all_services(cmd):
if cmd == 'start':
for svc in services():
if not service_running(svc):
service_start(svc)
else:
for svc in services():
service(cmd, svc)
def disable_services():
for svc in services():
with open('/etc/init/{}.override'.format(svc), 'wb') as out:
out.write('exec true\n')
def enable_services():
for svc in services():
override_file = '/etc/init/{}.override'.format(svc)
if os.path.isfile(override_file):
os.remove(override_file)
def setup_ipv6():
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
if ubuntu_rel < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
# NOTE(xianghui): Need to install haproxy(1.5.3) from trusty-backports
# to support ipv6 address, so check is required to make sure not
# breaking other versions, IPv6 only support for >= Trusty
if ubuntu_rel == 'trusty':
add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports'
' main')
apt_update()
apt_install('haproxy/trusty-backports', fatal=True)
>>>>>>> MERGE-SOURCE

View File

@ -32,6 +32,8 @@ requires:
interface: quantum
neutron-api:
interface: neutron-api
cell:
interface: nova-cell
ha:
interface: hacluster
scope: container

View File

@ -20,6 +20,10 @@ volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
auth_strategy=keystone
compute_driver=libvirt.LibvirtDriver
osapi_compute_workers = {{ workers }}
ec2_workers = {{ workers }}
scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
cpu_allocation_ratio = {{ cpu_allocation_ratio }}
use_syslog={{ use_syslog }}
{% if keystone_ec2_url -%}

View File

@ -5,11 +5,12 @@
[DEFAULT]
state_path = /var/lib/neutron
lock_path = $state_path/lock
bind_host = 0.0.0.0
bind_host = {{ bind_host }}
auth_strategy = keystone
{% if notifications == 'True' -%}
notification_driver = neutron.openstack.common.notifier.rpc_notifier
{% endif -%}
api_workers = {{ workers }}
use_syslog = {{ use_syslog }}
{% if neutron_bind_port -%}

View File

@ -1,8 +1,11 @@
# icehouse
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
verbose={{ verbose }}
debug={{ debug }}
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
@ -13,13 +16,26 @@ iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
auth_strategy=keystone
compute_driver=libvirt.LibvirtDriver
use_ipv6 = {{ use_ipv6 }}
osapi_compute_listen = {{ bind_host }}
my_ip = {{ bind_host }}
metadata_host = {{ bind_host }}
s3_listen = {{ bind_host }}
ec2_listen = {{ bind_host }}
osapi_compute_workers = {{ workers }}
ec2_workers = {{ workers }}
scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
cpu_allocation_ratio = {{ cpu_allocation_ratio }}
ram_allocation_ratio = {{ ram_allocation_ratio }}
use_syslog={{ use_syslog }}
{% if keystone_ec2_url -%}
@ -132,3 +148,7 @@ admin_password = {{ admin_password }}
[osapi_v3]
enabled=True
{% include "parts/cell" %}
[conductor]
workers = {{ workers }}

6
templates/parts/cell Normal file
View File

@ -0,0 +1,6 @@
{% if cell_type -%}
[cells]
enable=True
name={{ cell_name }}
cell_type={{ cell_type }}
{% endif -%}

View File

@ -4,7 +4,7 @@ set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet
sudo apt-get install --yes python-glanceclient
sudo apt-get install --yes python-keystoneclient
sudo apt-get install --yes python-novaclient
sudo apt-get install --yes python-amulet \
python-glanceclient \
python-keystoneclient \
python-novaclient

View File

@ -1,6 +1,12 @@
This directory provides Amulet tests that focus on verification of Nova Cloud
Controller deployments.
In order to run tests, you'll need charm-tools installed (in addition to
juju, of course):
sudo add-apt-repository ppa:juju/stable
sudo apt-get update
sudo apt-get install charm-tools
If you use a web proxy server to access the web, you'll need to set the
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.

View File

@ -19,9 +19,9 @@ u = OpenStackAmuletUtils(ERROR)
class NovaCCBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic nova cloud controller deployment."""
def __init__(self, series=None, openstack=None, source=None):
def __init__(self, series=None, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(NovaCCBasicDeployment, self).__init__(series, openstack, source)
super(NovaCCBasicDeployment, self).__init__(series, openstack, source, stable)
self._add_services()
self._add_relations()
self._configure_services()
@ -29,12 +29,16 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment):
self._initialize_tests()
def _add_services(self):
"""Add the service that we're testing, including the number of units,
where nova-cloud-controller is local, and the other charms are from
the charm store."""
this_service = ('nova-cloud-controller', 1)
other_services = [('mysql', 1), ('rabbitmq-server', 1),
('nova-compute', 2), ('keystone', 1), ('glance', 1)]
"""Add services
Add the services that we're testing, where nova-cc is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'nova-cloud-controller'}
other_services = [{'name': 'mysql'}, {'name': 'rabbitmq-server'},
{'name': 'nova-compute', 'units': 2},
{'name': 'keystone'}, {'name': 'glance'}]
super(NovaCCBasicDeployment, self)._add_services(this_service,
other_services)
@ -378,19 +382,26 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment):
message = u.relation_error('glance image-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_restart_on_config_change(self):
def test_z_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed."""
is changed.
Note(coreycb): The method name with the _z_ is a little odd
but it forces the test to run last. It just makes things
easier because restarting services requires re-authorization.
"""
# NOTE(coreycb): Skipping failing test on essex until resolved.
# config-flags don't take effect on essex.
if self._get_openstack_release() == self.precise_essex:
u.log.error("Skipping failing test until resolved")
return
flags_set = 'quota_cores=20,quota_instances=40,quota_ram=102400'
flags_reset = 'quota_cores=10,quota_instances=20,quota_ram=51200'
services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore',
'nova-cert', 'nova-scheduler', 'nova-conductor']
self.d.configure('nova-cloud-controller',
{'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'})
self.d.configure('nova-cloud-controller', {'config-flags': flags_set})
pgrep_full = True
time = 20
@ -398,10 +409,14 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment):
for s in services:
if not u.service_restarted(self.nova_cc_sentry, s, conf,
pgrep_full=True, sleep_time=time):
self.d.configure('nova-cloud-controller',
{'config-flags': flags_reset})
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
time = 0
self.d.configure('nova-cloud-controller', {'config-flags': flags_reset})
def test_nova_default_config(self):
"""Verify the data in the nova config file's default section."""
# NOTE(coreycb): Currently no way to test on essex because config file

View File

@ -24,25 +24,31 @@ class AmuletDeployment(object):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're focused on testing and other_services are the other
charms that come from the charm store.
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
name, units = range(2)
if this_service[name] != os.path.basename(os.getcwd()):
s = this_service[name]
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
self.d.add(this_service[name], units=this_service[units])
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'])
for svc in other_services:
if self.series:
self.d.add(svc[name],
charm='cs:{}/{}'.format(self.series, svc[name]),
units=svc[units])
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
self.d.add(svc[name], units=svc[units])
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
def _add_relations(self, relations):
"""Add all of the relations for the services."""
@ -57,7 +63,7 @@ class AmuletDeployment(object):
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup()
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=900)
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")

View File

@ -10,32 +10,62 @@ class OpenStackAmuletDeployment(AmuletDeployment):
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None):
def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
if self.stable:
for svc in other_services:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
"""Add services to the deployment and set openstack-origin/source."""
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
if svc['name'] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc[name] in use_source:
if svc['name'] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""

View File

@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils):
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(cirros_img):
if not os.path.exists(local_path):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
with open(cirros_img) as f:
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)

View File

@ -29,12 +29,18 @@ TO_PATCH = [
'charm_dir',
'do_openstack_upgrade',
'openstack_upgrade_available',
'cmd_all_services',
'config',
'determine_packages',
'determine_ports',
'disable_services',
'enable_services',
'NovaCellContext',
'open_port',
'is_relation_made',
'local_unit',
'log',
'os_release',
'relation_get',
'relation_set',
'relation_ids',
@ -42,16 +48,21 @@ TO_PATCH = [
'ssh_known_hosts_lines',
'ssh_authorized_keys_lines',
'save_script_rc',
'service_restart',
'service_running',
'service_stop',
'services',
'execd_preinstall',
'network_manager',
'volume_service',
'unit_get',
'uuid',
'eligible_leader',
'keystone_ca_cert_b64',
'neutron_plugin',
'migrate_database',
'migrate_nova_database',
'migrate_neutron_database',
'uuid',
]
@ -86,6 +97,8 @@ class NovaCCHooksTests(CharmTestCase):
self.apt_install.assert_called_with(
['nova-scheduler', 'nova-api-ec2'], fatal=True)
self.execd_preinstall.assert_called()
self.disable_services.assert_called()
self.cmd_all_services.assert_called_with('stop')
@patch.object(hooks, 'configure_https')
def test_config_changed_no_upgrade(self, conf_https):
@ -93,11 +106,19 @@ class NovaCCHooksTests(CharmTestCase):
hooks.config_changed()
self.assertTrue(self.save_script_rc.called)
@patch.object(hooks, 'cluster_joined')
@patch.object(hooks, 'identity_joined')
@patch.object(hooks, 'neutron_api_relation_joined')
@patch.object(hooks, 'configure_https')
def test_config_changed_with_upgrade(self, conf_https):
def test_config_changed_with_upgrade(self, conf_https, neutron_api_joined,
identity_joined, cluster_joined):
self.openstack_upgrade_available.return_value = True
self.relation_ids.return_value = ['generic_rid']
hooks.config_changed()
self.assertTrue(self.do_openstack_upgrade.called)
self.assertTrue(neutron_api_joined.called)
self.assertTrue(identity_joined.called)
self.assertTrue(cluster_joined.called)
self.assertTrue(self.save_script_rc.called)
def test_compute_changed_ssh_migration(self):
@ -316,36 +337,147 @@ class NovaCCHooksTests(CharmTestCase):
configs.write = MagicMock()
hooks.postgresql_nova_db_changed()
@patch.object(hooks, 'conditional_neutron_migration')
@patch.object(hooks, 'CONFIGS')
def test_db_changed(self, configs):
def test_db_changed(self, configs, cond_neutron_mig):
self._shared_db_test(configs)
self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.migrate_nova_database.assert_called_with()
cond_neutron_mig.assert_called_with()
@patch.object(hooks, 'CONFIGS')
def test_db_changed_allowed(self, configs):
allowed_units = 'nova-cloud-controller/0 nova-cloud-controller/3'
self.test_relation.set({
'nova_allowed_units': allowed_units,
})
self.local_unit.return_value = 'nova-cloud-controller/3'
self._shared_db_test(configs)
self.assertTrue(configs.write_all.called)
self.migrate_nova_database.assert_called_with()
@patch.object(hooks, 'CONFIGS')
def test_db_changed_not_allowed(self, configs):
allowed_units = 'nova-cloud-controller/0 nova-cloud-controller/3'
self.test_relation.set({
'nova_allowed_units': allowed_units,
})
self.local_unit.return_value = 'nova-cloud-controller/1'
self._shared_db_test(configs)
self.assertTrue(configs.write_all.called)
self.assertFalse(self.migrate_nova_database.called)
@patch.object(hooks, 'CONFIGS')
def test_postgresql_db_changed(self, configs):
self._postgresql_db_test(configs)
self.assertTrue(configs.write_all.called)
self.migrate_database.assert_called_with()
self.migrate_nova_database.assert_called_with()
@patch.object(hooks, 'nova_cell_relation_joined')
@patch.object(hooks, 'compute_joined')
@patch.object(hooks, 'CONFIGS')
def test_db_changed_remote_restarts(self, configs, comp_joined,
cell_joined):
def _relation_ids(rel):
relid = {
'cloud-compute': ['nova-compute/0'],
'cell': ['nova-cell-api/0'],
'neutron-api': ['neutron-api/0'],
}
return relid[rel]
self.relation_ids.side_effect = _relation_ids
allowed_units = 'nova-cloud-controller/0'
self.test_relation.set({
'nova_allowed_units': allowed_units,
})
self.local_unit.return_value = 'nova-cloud-controller/0'
self._shared_db_test(configs)
comp_joined.assert_called_with(remote_restart=True,
rid='nova-compute/0')
cell_joined.assert_called_with(remote_restart=True,
rid='nova-cell-api/0')
self.migrate_nova_database.assert_called_with()
@patch.object(hooks, 'nova_cell_relation_joined')
@patch.object(hooks, 'CONFIGS')
def test_amqp_relation_broken(self, configs, cell_joined):
configs.write = MagicMock()
self.relation_ids.return_value = ['nova-cell-api/0']
hooks.relation_broken()
self.assertTrue(configs.write_all.called)
cell_joined.assert_called_with(rid='nova-cell-api/0')
@patch.object(hooks, 'nova_cell_relation_joined')
@patch.object(hooks, 'CONFIGS')
def test_amqp_changed_api_rel(self, configs, cell_joined):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['amqp']
configs.write = MagicMock()
self.is_relation_made.return_value = True
hooks.amqp_changed()
self.assertEquals(configs.write.call_args_list,
[call('/etc/nova/nova.conf')])
@patch.object(hooks, 'nova_cell_relation_joined')
@patch.object(hooks, 'CONFIGS')
def test_amqp_changed_noapi_rel(self, configs, cell_joined):
configs.complete_contexts = MagicMock()
configs.complete_contexts.return_value = ['amqp']
configs.write = MagicMock()
self.relation_ids.return_value = ['nova-cell-api/0']
self.is_relation_made.return_value = False
self.network_manager.return_value = 'neutron'
hooks.amqp_changed()
self.assertEquals(configs.write.call_args_list,
[call('/etc/nova/nova.conf'),
call('/etc/neutron/neutron.conf')])
cell_joined.assert_called_with(rid='nova-cell-api/0')
def test_nova_cell_relation_joined(self):
self.uuid.uuid4.return_value = 'bob'
self.canonical_url.return_value = 'http://novaurl'
hooks.nova_cell_relation_joined(rid='rid',
remote_restart=True)
self.relation_set.assert_called_with(restart_trigger='bob',
nova_url='http://novaurl:8774/v2',
relation_id='rid')
@patch.object(hooks, 'CONFIGS')
def test_nova_cell_relation_changed(self, configs):
hooks.nova_cell_relation_changed()
configs.write.assert_called_with('/etc/nova/nova.conf')
def test_get_cell_type(self):
self.NovaCellContext().return_value = {
'cell_type': 'parent',
'cell_name': 'api',
}
self.assertEquals(hooks.get_cell_type(), 'parent')
@patch.object(os, 'rename')
@patch.object(os.path, 'isfile')
@patch.object(hooks, 'CONFIGS')
def test_neutron_api_relation_joined(self, configs, isfile, rename):
@patch.object(hooks, 'get_cell_type')
def test_neutron_api_relation_joined(self, get_cell_type, configs, isfile,
rename):
neutron_conf = '/etc/neutron/neutron.conf'
nova_url = 'http://novaurl:8774/v2'
isfile.return_value = True
self.service_running.return_value = True
_identity_joined = self.patch('identity_joined')
self.relation_ids.side_effect = ['relid']
self.relation_ids.return_value = ['relid']
self.canonical_url.return_value = 'http://novaurl'
get_cell_type.return_value = 'parent'
self.uuid.uuid4.return_value = 'bob'
with patch_open() as (_open, _file):
hooks.neutron_api_relation_joined()
hooks.neutron_api_relation_joined(remote_restart=True)
self.service_stop.assert_called_with('neutron-server')
rename.assert_called_with(neutron_conf, neutron_conf + '_unused')
self.assertTrue(_identity_joined.called)
self.relation_set.assert_called_with(relation_id=None,
nova_url=nova_url)
cell_type='parent',
nova_url=nova_url,
restart_trigger='bob')
@patch.object(hooks, 'CONFIGS')
def test_neutron_api_relation_changed(self, configs):
@ -456,3 +588,28 @@ class NovaCCHooksTests(CharmTestCase):
'console_keymap': 'en-us'
}
self.assertEqual(_con_sets, console_settings)
def test_conditional_neutron_migration_api_rel(self):
self.relation_ids.return_value = ['neutron-api/0']
hooks.conditional_neutron_migration()
self.log.assert_called_with(
'Not running neutron database migration as neutron-api service'
'is present.'
)
def test_conditional_neutron_migration_noapi_rel(self):
self.os_release.return_value = 'juno'
self.relation_ids.return_value = []
self.services.return_value = ['neutron-server']
hooks.conditional_neutron_migration()
self.migrate_neutron_database.assert_called_with()
self.service_restart.assert_called_with('neutron-server')
def test_conditional_neutron_migration_noapi_rel_juno(self):
self.os_release.return_value = 'icehouse'
self.relation_ids.return_value = []
hooks.conditional_neutron_migration()
self.log.assert_called_with(
'Not running neutron database migration as migrations are handled'
'by the neutron-server process.'
)

View File

@ -16,11 +16,13 @@ TO_PATCH = [
'apt_update',
'apt_upgrade',
'apt_install',
'cmd_all_services',
'config',
'configure_installation_source',
'disable_policy_rcd',
'eligible_leader',
'enable_policy_rcd',
'enable_services',
'get_os_codename_install_source',
'is_relation_made',
'log',
@ -30,6 +32,7 @@ TO_PATCH = [
'neutron_plugin',
'neutron_plugin_attribute',
'os_release',
'peer_store',
'register_configs',
'relation_ids',
'remote_unit',
@ -516,7 +519,7 @@ class NovaCCUtilsTests(CharmTestCase):
def test_determine_endpoints_nova_volume(self):
self.is_relation_made.return_value = False
self.relation_ids.return_value = ['nova-volume-service/0']
self.relation_ids.side_effect = [['nova-volume-service/0'], []]
endpoints = deepcopy(BASE_ENDPOINTS)
endpoints.update({
'nova-volume_admin_url':
@ -550,7 +553,7 @@ class NovaCCUtilsTests(CharmTestCase):
def test_determine_endpoints_neutron_api_rel(self):
self.is_relation_made.return_value = True
self.relation_ids.return_value = []
self.relation_ids.side_effect = [[], ['neutron-api:1']]
self.network_manager.return_value = 'quantum'
endpoints = deepcopy(BASE_ENDPOINTS)
endpoints.update({
@ -589,16 +592,29 @@ class NovaCCUtilsTests(CharmTestCase):
_known_hosts.assert_called_with('bar', None)
@patch('subprocess.check_output')
def test_migrate_database(self, check_output):
def test_migrate_nova_database(self, check_output):
"Migrate database with nova-manage"
utils.migrate_database()
self.relation_ids.return_value = []
utils.migrate_nova_database()
check_output.assert_called_with(['nova-manage', 'db', 'sync'])
self.enable_services.assert_called()
self.cmd_all_services.assert_called_with('start')
@patch('subprocess.check_output')
def test_migrate_nova_database_cluster(self, check_output):
"Migrate database with nova-manage in a clustered env"
self.relation_ids.return_value = ['cluster:1']
utils.migrate_nova_database()
check_output.assert_called_with(['nova-manage', 'db', 'sync'])
self.peer_store.assert_called_with('dbsync_state', 'complete')
self.enable_services.assert_called()
self.cmd_all_services.assert_called_with('start')
@patch.object(utils, 'get_step_upgrade_source')
@patch.object(utils, 'migrate_database')
@patch.object(utils, 'migrate_nova_database')
@patch.object(utils, 'determine_packages')
def test_upgrade_grizzly_icehouse(self, determine_packages,
migrate_database,
migrate_nova_database,
get_step_upgrade_source):
"Simulate a call to do_openstack_upgrade() for grizzly->icehouse"
get_step_upgrade_source.return_value = 'cloud:precise-havana'
@ -607,6 +623,7 @@ class NovaCCUtilsTests(CharmTestCase):
'havana',
'icehouse']
self.eligible_leader.return_value = True
self.relation_ids.return_value = []
utils.do_openstack_upgrade()
expected = [call(['stamp', 'grizzly']), call(['upgrade', 'head']),
call(['stamp', 'havana']), call(['upgrade', 'head'])]
@ -618,19 +635,20 @@ class NovaCCUtilsTests(CharmTestCase):
expected = [call(release='havana'), call(release='icehouse')]
self.assertEquals(self.register_configs.call_args_list, expected)
self.assertEquals(self.ml2_migration.call_count, 1)
self.assertTrue(migrate_database.call_count, 2)
self.assertTrue(migrate_nova_database.call_count, 2)
@patch.object(utils, 'get_step_upgrade_source')
@patch.object(utils, 'migrate_database')
@patch.object(utils, 'migrate_nova_database')
@patch.object(utils, 'determine_packages')
def test_upgrade_havana_icehouse(self, determine_packages,
migrate_database,
migrate_nova_database,
get_step_upgrade_source):
"Simulate a call to do_openstack_upgrade() for havana->icehouse"
get_step_upgrade_source.return_value = None
self.os_release.return_value = 'havana'
self.get_os_codename_install_source.return_value = 'icehouse'
self.eligible_leader.return_value = True
self.relation_ids.return_value = []
utils.do_openstack_upgrade()
self.neutron_db_manage.assert_called_with(['upgrade', 'head'])
self.apt_update.assert_called_with(fatal=True)
@ -639,7 +657,28 @@ class NovaCCUtilsTests(CharmTestCase):
self.apt_install.assert_called_with(determine_packages(), fatal=True)
self.register_configs.assert_called_with(release='icehouse')
self.assertEquals(self.ml2_migration.call_count, 1)
self.assertTrue(migrate_database.call_count, 1)
self.assertTrue(migrate_nova_database.call_count, 1)
@patch.object(utils, 'get_step_upgrade_source')
@patch.object(utils, 'migrate_nova_database')
@patch.object(utils, 'determine_packages')
def test_upgrade_havana_icehouse_apirel(self, determine_packages,
migrate_nova_database,
get_step_upgrade_source):
"Simulate a call to do_openstack_upgrade() for havana->icehouse api"
get_step_upgrade_source.return_value = None
self.os_release.return_value = 'havana'
self.get_os_codename_install_source.return_value = 'icehouse'
self.eligible_leader.return_value = True
self.relation_ids.return_value = ['neutron-api/0']
utils.do_openstack_upgrade()
self.apt_update.assert_called_with(fatal=True)
self.apt_upgrade.assert_called_with(options=DPKG_OPTS, fatal=True,
dist=True)
self.apt_install.assert_called_with(determine_packages(), fatal=True)
self.register_configs.assert_called_with(release='icehouse')
self.assertEquals(self.ml2_migration.call_count, 1)
self.assertTrue(migrate_nova_database.call_count, 1)
@patch.object(utils, '_do_openstack_upgrade')
def test_upgrade_grizzly_icehouse_source(self, _do_openstack_upgrade):