This commit is contained in:
Liam Young 2014-07-29 14:42:08 +00:00
commit eeafe6583a
48 changed files with 2507 additions and 208 deletions

2
.bzrignore Normal file
View File

@ -0,0 +1,2 @@
bin
.coverage

View File

@ -2,16 +2,28 @@
PYTHON := /usr/bin/env python PYTHON := /usr/bin/env python
lint: lint:
@flake8 --exclude hooks/charmhelpers hooks unit_tests @flake8 --exclude hooks/charmhelpers hooks unit_tests tests
@charm proof @charm proof
unit_test:
@echo Starting unit tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
bin/charm_helpers_sync.py:
@mkdir -p bin
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
> bin/charm_helpers_sync.py
test: test:
@echo Starting tests... @echo Starting Amulet tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests # coreycb note: The -v should only be temporary until Amulet sends
# raise_status() messages to stderr:
# https://bugs.launchpad.net/amulet/+bug/1320357
@juju test -v -p AMULET_HTTP_PROXY
sync: sync:
@charm-helper-sync -c charm-helpers.yaml @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
publish: lint test publish: lint unit_test
bzr push lp:charms/nova-cloud-controller bzr push lp:charms/nova-cloud-controller
bzr push lp:charms/trusty/nova-cloud-controller bzr push lp:charms/trusty/nova-cloud-controller

View File

@ -8,3 +8,4 @@ include:
- contrib.hahelpers: - contrib.hahelpers:
- apache - apache
- payload.execd - payload.execd
- contrib.network.ip

5
charm-helpers-tests.yaml Normal file
View File

@ -0,0 +1,5 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
- contrib.amulet
- contrib.openstack.amulet

View File

@ -97,15 +97,11 @@ options:
# HA configuration settings # HA configuration settings
vip: vip:
type: string type: string
description: "Virtual IP to use to front API services in ha configuration" description: |
vip_iface: Virtual IP(s) to use to front API services in HA configuration.
type: string .
default: eth0 If multiple networks are being used, a VIP should be provided for each
description: "Network Interface where to place the Virtual IP" network, separated by spaces.
vip_cidr:
type: int
default: 24
description: "Netmask that will be used for the Virtual IP"
ha-bindiface: ha-bindiface:
type: string type: string
default: eth0 default: eth0
@ -165,6 +161,48 @@ options:
description: | description: |
This is uuid of the default NVP/NSX L3 Gateway Service. This is uuid of the default NVP/NSX L3 Gateway Service.
# end of NVP/NSX configuration # end of NVP/NSX configuration
# Network configuration options
# by default all access is over 'private-address'
os-admin-network:
type: string
description: |
The IP address and netmask of the OpenStack Admin network (e.g.,
192.168.0.0/24)
.
This network will be used for admin endpoints.
os-internal-network:
type: string
description: |
The IP address and netmask of the OpenStack Internal network (e.g.,
192.168.0.0/24)
.
This network will be used for internal endpoints.
os-public-network:
type: string
description: |
The IP address and netmask of the OpenStack Public network (e.g.,
192.168.0.0/24)
.
This network will be used for public endpoints.
service-guard:
type: boolean
default: false
description: |
Ensure required relations are made and complete before allowing services
to be started
.
By default, services may be up and accepting API request from install
onwards.
.
Enabling this flag ensures that services will not be started until the
minimum 'core relations' have been made between this charm and other
charms.
.
For this charm the following relations must be made:
.
* shared-db or (pgsql-nova-db, pgsql-neutron-db)
* amqp
* identity-service
console-access-protocol: console-access-protocol:
type: string type: string
description: | description: |

View File

@ -146,12 +146,12 @@ def get_hacluster_config():
Obtains all relevant configuration from charm configuration required Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster: for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr ha-bindiface, ha-mcastport, vip
returns: dict: A dict containing settings keyed by setting name. returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing. raises: HAIncompleteConfig if settings are missing.
''' '''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] settings = ['ha-bindiface', 'ha-mcastport', 'vip']
conf = {} conf = {}
for setting in settings: for setting in settings:
conf[setting] = config_get(setting) conf[setting] = config_get(setting)
@ -170,6 +170,7 @@ def canonical_url(configs, vip_setting='vip'):
:configs : OSTemplateRenderer: A config tempating object to inspect for :configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context. a complete https context.
:vip_setting: str: Setting in charm config that specifies :vip_setting: str: Setting in charm config that specifies
VIP address. VIP address.
''' '''

View File

@ -0,0 +1,156 @@
import sys
from functools import partial
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
ERROR, log,
)
try:
import netifaces
except ImportError:
apt_install('python-netifaces')
import netifaces
try:
import netaddr
except ImportError:
apt_install('python-netaddr')
import netaddr
def _validate_cidr(network):
try:
netaddr.IPNetwork(network)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" %
network)
def get_address_in_network(network, fallback=None, fatal=False):
"""
Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
:param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1).
"""
def not_found_error_out():
log("No IP address found in network: %s" % network,
level=ERROR)
sys.exit(1)
if network is None:
if fallback is not None:
return fallback
else:
if fatal:
not_found_error_out()
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
if fallback is not None:
return fallback
if fatal:
not_found_error_out()
return None
def is_ipv6(address):
'''Determine whether provided address is IPv6 or not'''
try:
address = netaddr.IPAddress(address)
except netaddr.AddrFormatError:
# probably a hostname - so not an address at all!
return False
else:
return address.version == 6
def is_address_in_network(network, address):
"""
Determine whether the provided address is within a network range.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
:param address: An individual IPv4 or IPv6 address without a net
mask or subnet prefix. For example, '192.168.1.1'.
:returns boolean: Flag indicating whether address is in network.
"""
try:
network = netaddr.IPNetwork(network)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" %
network)
try:
address = netaddr.IPAddress(address)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Address (%s) is not in correct presentation format" %
address)
if address in network:
return True
else:
return False
def _get_for_address(address, key):
"""Retrieve an attribute of or the physical interface that
the IP address provided could be bound to.
:param address (str): An individual IPv4 or IPv6 address without a net
mask or subnet prefix. For example, '192.168.1.1'.
:param key: 'iface' for the physical interface name or an attribute
of the configured interface, for example 'netmask'.
:returns str: Requested attribute or None if address is not bindable.
"""
address = netaddr.IPAddress(address)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if address.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if address in cidr:
if key == 'iface':
return iface
else:
return addresses[netifaces.AF_INET][0][key]
if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if address in cidr:
if key == 'iface':
return iface
else:
return addr[key]
return None
get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')

View File

@ -0,0 +1,55 @@
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, series=None, openstack=None, source=None):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
if self.source:
for svc in services:
if svc[name] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _get_openstack_release(self):
"""Return an integer representing the enum value of the openstack
release."""
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
self.precise_havana, self.precise_icehouse, \
self.trusty_icehouse = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,209 @@
import logging
import os
import time
import urllib
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint."""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if admin_port in ep.adminurl and internal_port in ep.internalurl \
and public_port in ep.publicurl:
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints."""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate a list of actual tenant data vs list of expected tenant
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate a list of actual role data vs a list of expected role
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate a list of actual user data vs a list of expected user
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate a list of actual flavors vs a list of expected flavors."""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists"""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
if not os.path.exists(cirros_img):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
f.close()
with open(cirros_img) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
glance.images.delete(image)
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status == 'BUILD':
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
nova.servers.delete(instance)

View File

@ -21,9 +21,11 @@ from charmhelpers.core.hookenv import (
relation_get, relation_get,
relation_ids, relation_ids,
related_units, related_units,
relation_set,
unit_get, unit_get,
unit_private_ip, unit_private_ip,
ERROR, ERROR,
INFO
) )
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
@ -42,6 +44,8 @@ from charmhelpers.contrib.openstack.neutron import (
neutron_plugin_attribute, neutron_plugin_attribute,
) )
from charmhelpers.contrib.network.ip import get_address_in_network
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -134,8 +138,26 @@ class SharedDBContext(OSContextGenerator):
'Missing required charm config options. ' 'Missing required charm config options. '
'(database name and user)') '(database name and user)')
raise OSContextError raise OSContextError
ctxt = {} ctxt = {}
# NOTE(jamespage) if mysql charm provides a network upon which
# access to the database should be made, reconfigure relation
# with the service units local address and defer execution
access_network = relation_get('access-network')
if access_network is not None:
if self.relation_prefix is not None:
hostname_key = "{}_hostname".format(self.relation_prefix)
else:
hostname_key = "hostname"
access_hostname = get_address_in_network(access_network,
unit_get('private-address'))
set_hostname = relation_get(attribute=hostname_key,
unit=local_unit())
if set_hostname != access_hostname:
relation_set(relation_settings={hostname_key: access_hostname})
return ctxt # Defer any further hook execution for now....
password_setting = 'password' password_setting = 'password'
if self.relation_prefix: if self.relation_prefix:
password_setting = self.relation_prefix + '_password' password_setting = self.relation_prefix + '_password'
@ -243,23 +265,31 @@ class IdentityServiceContext(OSContextGenerator):
class AMQPContext(OSContextGenerator): class AMQPContext(OSContextGenerator):
interfaces = ['amqp']
def __init__(self, ssl_dir=None): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
self.ssl_dir = ssl_dir self.ssl_dir = ssl_dir
self.rel_name = rel_name
self.relation_prefix = relation_prefix
self.interfaces = [rel_name]
def __call__(self): def __call__(self):
log('Generating template context for amqp') log('Generating template context for amqp')
conf = config() conf = config()
user_setting = 'rabbit-user'
vhost_setting = 'rabbit-vhost'
if self.relation_prefix:
user_setting = self.relation_prefix + '-rabbit-user'
vhost_setting = self.relation_prefix + '-rabbit-vhost'
try: try:
username = conf['rabbit-user'] username = conf[user_setting]
vhost = conf['rabbit-vhost'] vhost = conf[vhost_setting]
except KeyError as e: except KeyError as e:
log('Could not generate shared_db context. ' log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e) 'Missing required charm config options: %s.' % e)
raise OSContextError raise OSContextError
ctxt = {} ctxt = {}
for rid in relation_ids('amqp'): for rid in relation_ids(self.rel_name):
ha_vip_only = False ha_vip_only = False
for unit in related_units(rid): for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit): if relation_get('clustered', rid=rid, unit=unit):
@ -332,10 +362,12 @@ class CephContext(OSContextGenerator):
use_syslog = str(config('use-syslog')).lower() use_syslog = str(config('use-syslog')).lower()
for rid in relation_ids('ceph'): for rid in relation_ids('ceph'):
for unit in related_units(rid): for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit) auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit)
ceph_addr = \
relation_get('ceph-public-address', rid=rid, unit=unit) or \
relation_get('private-address', rid=rid, unit=unit)
mon_hosts.append(ceph_addr)
ctxt = { ctxt = {
'mon_hosts': ' '.join(mon_hosts), 'mon_hosts': ' '.join(mon_hosts),
@ -369,7 +401,9 @@ class HAProxyContext(OSContextGenerator):
cluster_hosts = {} cluster_hosts = {}
l_unit = local_unit().replace('/', '-') l_unit = local_unit().replace('/', '-')
cluster_hosts[l_unit] = unit_get('private-address') cluster_hosts[l_unit] = \
get_address_in_network(config('os-internal-network'),
unit_get('private-address'))
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in related_units(rid):
@ -418,12 +452,13 @@ class ApacheSSLContext(OSContextGenerator):
""" """
Generates a context for an apache vhost configuration that configures Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context HTTPS reverse proxying for one or many endpoints. Generated context
looks something like: looks something like::
{
'namespace': 'cinder', {
'private_address': 'iscsi.mycinderhost.com', 'namespace': 'cinder',
'endpoints': [(8776, 8766), (8777, 8767)] 'private_address': 'iscsi.mycinderhost.com',
} 'endpoints': [(8776, 8766), (8777, 8767)]
}
The endpoints list consists of a tuples mapping external ports The endpoints list consists of a tuples mapping external ports
to internal ports. to internal ports.
@ -541,6 +576,26 @@ class NeutronContext(OSContextGenerator):
return nvp_ctxt return nvp_ctxt
def n1kv_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
n1kv_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'n1kv',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': n1kv_config,
'vsm_ip': config('n1kv-vsm-ip'),
'vsm_username': config('n1kv-vsm-username'),
'vsm_password': config('n1kv-vsm-password'),
'restrict_policy_profiles': config(
'n1kv_restrict_policy_profiles'),
}
return n1kv_ctxt
def neutron_ctxt(self): def neutron_ctxt(self):
if https(): if https():
proto = 'https' proto = 'https'
@ -572,6 +627,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.ovs_ctxt()) ctxt.update(self.ovs_ctxt())
elif self.plugin in ['nvp', 'nsx']: elif self.plugin in ['nvp', 'nsx']:
ctxt.update(self.nvp_ctxt()) ctxt.update(self.nvp_ctxt())
elif self.plugin == 'n1kv':
ctxt.update(self.n1kv_ctxt())
alchemy_flags = config('neutron-alchemy-flags') alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags: if alchemy_flags:
@ -611,7 +668,7 @@ class SubordinateConfigContext(OSContextGenerator):
The subordinate interface allows subordinates to export their The subordinate interface allows subordinates to export their
configuration requirements to the principle for multiple config configuration requirements to the principle for multiple config
files and multiple serivces. Ie, a subordinate that has interfaces files and multiple serivces. Ie, a subordinate that has interfaces
to both glance and nova may export to following yaml blob as json: to both glance and nova may export to following yaml blob as json::
glance: glance:
/etc/glance/glance-api.conf: /etc/glance/glance-api.conf:
@ -630,7 +687,8 @@ class SubordinateConfigContext(OSContextGenerator):
It is then up to the principle charms to subscribe this context to It is then up to the principle charms to subscribe this context to
the service+config file it is interestd in. Configuration data will the service+config file it is interestd in. Configuration data will
be available in the template context, in glance's case, as: be available in the template context, in glance's case, as::
ctxt = { ctxt = {
... other context ... ... other context ...
'subordinate_config': { 'subordinate_config': {
@ -657,7 +715,7 @@ class SubordinateConfigContext(OSContextGenerator):
self.interface = interface self.interface = interface
def __call__(self): def __call__(self):
ctxt = {} ctxt = {'sections': {}}
for rid in relation_ids(self.interface): for rid in relation_ids(self.interface):
for unit in related_units(rid): for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration', sub_config = relation_get('subordinate_configuration',
@ -683,14 +741,29 @@ class SubordinateConfigContext(OSContextGenerator):
sub_config = sub_config[self.config_file] sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems(): for k, v in sub_config.iteritems():
ctxt[k] = v if k == 'sections':
for section, config_dict in v.iteritems():
log("adding section '%s'" % (section))
ctxt[k][section] = config_dict
else:
ctxt[k] = v
if not ctxt: log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
ctxt['sections'] = {}
return ctxt return ctxt
class LogLevelContext(OSContextGenerator):
def __call__(self):
ctxt = {}
ctxt['debug'] = \
False if config('debug') is None else config('debug')
ctxt['verbose'] = \
False if config('verbose') is None else config('verbose')
return ctxt
class SyslogContext(OSContextGenerator): class SyslogContext(OSContextGenerator):
def __call__(self): def __call__(self):

View File

@ -0,0 +1,75 @@
from charmhelpers.core.hookenv import (
config,
unit_get,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
_address_map = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:configs OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:endpoint_type str: The endpoint type to resolve.
:returns str: Base URL for services on the current service unit.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def resolve_address(endpoint_type=PUBLIC):
resolved_address = None
if is_clustered():
if config(_address_map[endpoint_type]['config']) is None:
# Assume vip is simple and pass back directly
resolved_address = config('vip')
else:
for vip in config('vip').split():
if is_address_in_network(
config(_address_map[endpoint_type]['config']),
vip):
resolved_address = vip
else:
resolved_address = get_address_in_network(
config(_address_map[endpoint_type]['config']),
unit_get(_address_map[endpoint_type]['fallback'])
)
if resolved_address is None:
raise ValueError('Unable to resolve a suitable IP address'
' based on charm state and configuration')
else:
return resolved_address

View File

@ -128,6 +128,20 @@ def neutron_plugins():
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
'neutron-plugin-vmware'], 'neutron-plugin-vmware'],
'server_services': ['neutron-server'] 'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
} }
} }
if release >= 'icehouse': if release >= 'icehouse':

View File

@ -27,7 +27,12 @@ listen stats :8888
{% if units -%} {% if units -%}
{% for service, ports in service_ports.iteritems() -%} {% for service, ports in service_ports.iteritems() -%}
listen {{ service }} 0.0.0.0:{{ ports[0] }} listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
listen {{ service }}_ipv6 :::{{ ports[0] }}
balance roundrobin balance roundrobin
{% for unit, address in units.iteritems() -%} {% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check server {{ unit }} {{ address }}:{{ ports[1] }} check

View File

@ -30,17 +30,17 @@ def get_loader(templates_dir, os_release):
loading dir. loading dir.
A charm may also ship a templates dir with this module A charm may also ship a templates dir with this module
and it will be appended to the bottom of the search list, eg: and it will be appended to the bottom of the search list, eg::
hooks/charmhelpers/contrib/openstack/templates.
:param templates_dir: str: Base template directory containing release hooks/charmhelpers/contrib/openstack/templates
sub-directories.
:param os_release : str: OpenStack release codename to construct template
loader.
:returns : jinja2.ChoiceLoader constructed with a list of :param templates_dir (str): Base template directory containing release
jinja2.FilesystemLoaders, ordered in descending sub-directories.
order by OpenStack release. :param os_release (str): OpenStack release codename to construct template
loader.
:returns: jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
""" """
tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()] for rel in OPENSTACK_CODENAMES.itervalues()]
@ -111,7 +111,8 @@ class OSConfigRenderer(object):
and ease the burden of managing config templates across multiple OpenStack and ease the burden of managing config templates across multiple OpenStack
releases. releases.
Basic usage: Basic usage::
# import some common context generates from charmhelpers # import some common context generates from charmhelpers
from charmhelpers.contrib.openstack import context from charmhelpers.contrib.openstack import context
@ -131,21 +132,19 @@ class OSConfigRenderer(object):
# write out all registered configs # write out all registered configs
configs.write_all() configs.write_all()
Details: **OpenStack Releases and template loading**
OpenStack Releases and template loading
---------------------------------------
When the object is instantiated, it is associated with a specific OS When the object is instantiated, it is associated with a specific OS
release. This dictates how the template loader will be constructed. release. This dictates how the template loader will be constructed.
The constructed loader attempts to load the template from several places The constructed loader attempts to load the template from several places
in the following order: in the following order:
- from the most recent OS release-specific template dir (if one exists) - from the most recent OS release-specific template dir (if one exists)
- the base templates_dir - the base templates_dir
- a template directory shipped in the charm with this helper file. - a template directory shipped in the charm with this helper file.
For the example above, '/tmp/templates' contains the following structure::
For the example above, '/tmp/templates' contains the following structure:
/tmp/templates/nova.conf /tmp/templates/nova.conf
/tmp/templates/api-paste.ini /tmp/templates/api-paste.ini
/tmp/templates/grizzly/api-paste.ini /tmp/templates/grizzly/api-paste.ini
@ -169,8 +168,8 @@ class OSConfigRenderer(object):
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
us to ship common templates (haproxy, apache) with the helpers. us to ship common templates (haproxy, apache) with the helpers.
Context generators **Context generators**
---------------------------------------
Context generators are used to generate template contexts during hook Context generators are used to generate template contexts during hook
execution. Doing so may require inspecting service relations, charm execution. Doing so may require inspecting service relations, charm
config, etc. When registered, a config file is associated with a list config, etc. When registered, a config file is associated with a list

View File

@ -3,7 +3,6 @@
# Common python helper functions used for OpenStack charms. # Common python helper functions used for OpenStack charms.
from collections import OrderedDict from collections import OrderedDict
import apt_pkg as apt
import subprocess import subprocess
import os import os
import socket import socket
@ -85,6 +84,8 @@ def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.''' '''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = '' rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed']: if src in ['distro', 'distro-proposed']:
try: try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@ -132,6 +133,7 @@ def get_os_version_codename(codename):
def get_os_codename_package(package, fatal=True): def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.''' '''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt
apt.init() apt.init()
# Tell apt to build an in-memory cache to prevent race conditions (if # Tell apt to build an in-memory cache to prevent race conditions (if
@ -189,7 +191,7 @@ def get_os_version_package(pkg, fatal=True):
for version, cname in vers_map.iteritems(): for version, cname in vers_map.iteritems():
if cname == codename: if cname == codename:
return version return version
#e = "Could not determine OpenStack version for package: %s" % pkg # e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e) # error_out(e)
@ -325,6 +327,7 @@ def openstack_upgrade_available(package):
""" """
import apt_pkg as apt
src = config('openstack-origin') src = config('openstack-origin')
cur_vers = get_os_version_package(package) cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src) available_vers = get_os_version_install_source(src)

View File

@ -303,7 +303,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]): blk_device, fstype, system_services=[]):
""" """
NOTE: This function must only be called from a single service unit for NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur. the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device, Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point. and the device is formatted and mounted at the given mount_point.

View File

@ -37,6 +37,7 @@ def zap_disk(block_device):
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
def is_device_mounted(device): def is_device_mounted(device):
'''Given a device path, return True if that device is mounted, and False '''Given a device path, return True if that device is mounted, and False
if it isn't. if it isn't.

View File

@ -0,0 +1,116 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import os
class Fstab(file):
"""This class extends file in order to implement a file reader/writer
for file `/etc/fstab`
"""
class Entry(object):
"""Entry class represents a non-comment line on the `/etc/fstab` file
"""
def __init__(self, device, mountpoint, filesystem,
options, d=0, p=0):
self.device = device
self.mountpoint = mountpoint
self.filesystem = filesystem
if not options:
options = "defaults"
self.options = options
self.d = d
self.p = p
def __eq__(self, o):
return str(self) == str(o)
def __str__(self):
return "{} {} {} {} {} {}".format(self.device,
self.mountpoint,
self.filesystem,
self.options,
self.d,
self.p)
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
def __init__(self, path=None):
if path:
self._path = path
else:
self._path = self.DEFAULT_PATH
file.__init__(self, self._path, 'r+')
def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any
# whitespace including tabs
return Fstab.Entry(*filter(
lambda x: x not in ('', None),
line.strip("\n").split()))
@property
def entries(self):
self.seek(0)
for line in self.readlines():
try:
if not line.startswith("#"):
yield self._hydrate_entry(line)
except ValueError:
pass
def get_entry_by_attr(self, attr, value):
for entry in self.entries:
e_attr = getattr(entry, attr)
if e_attr == value:
return entry
return None
def add_entry(self, entry):
if self.get_entry_by_attr('device', entry.device):
return False
self.write(str(entry) + '\n')
self.truncate()
return entry
def remove_entry(self, entry):
self.seek(0)
lines = self.readlines()
found = False
for index, line in enumerate(lines):
if not line.startswith("#"):
if self._hydrate_entry(line) == entry:
found = True
break
if not found:
return False
lines.remove(line)
self.seek(0)
self.write(''.join(lines))
self.truncate()
return True
@classmethod
def remove_by_mountpoint(cls, mountpoint, path=None):
fstab = cls(path=path)
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
if entry:
return fstab.remove_entry(entry)
return False
@classmethod
def add(cls, device, mountpoint, filesystem, options=None, path=None):
return cls(path=path).add_entry(Fstab.Entry(device,
mountpoint, filesystem,
options=options))

View File

@ -25,7 +25,7 @@ cache = {}
def cached(func): def cached(func):
"""Cache return values for multiple executions of func + args """Cache return values for multiple executions of func + args
For example: For example::
@cached @cached
def unit_get(attribute): def unit_get(attribute):
@ -445,18 +445,19 @@ class UnregisteredHookError(Exception):
class Hooks(object): class Hooks(object):
"""A convenient handler for hook functions. """A convenient handler for hook functions.
Example: Example::
hooks = Hooks() hooks = Hooks()
# register a hook, taking its name from the function name # register a hook, taking its name from the function name
@hooks.hook() @hooks.hook()
def install(): def install():
... pass # your code here
# register a hook, providing a custom hook name # register a hook, providing a custom hook name
@hooks.hook("config-changed") @hooks.hook("config-changed")
def config_changed(): def config_changed():
... pass # your code here
if __name__ == "__main__": if __name__ == "__main__":
# execute a hook based on the name the program is called by # execute a hook based on the name the program is called by

View File

@ -12,11 +12,11 @@ import random
import string import string
import subprocess import subprocess
import hashlib import hashlib
import apt_pkg
from collections import OrderedDict from collections import OrderedDict
from hookenv import log from hookenv import log
from fstab import Fstab
def service_start(service_name): def service_start(service_name):
@ -35,7 +35,8 @@ def service_restart(service_name):
def service_reload(service_name, restart_on_failure=False): def service_reload(service_name, restart_on_failure=False):
"""Reload a system service, optionally falling back to restart if reload fails""" """Reload a system service, optionally falling back to restart if
reload fails"""
service_result = service('reload', service_name) service_result = service('reload', service_name)
if not service_result and restart_on_failure: if not service_result and restart_on_failure:
service_result = service('restart', service_name) service_result = service('restart', service_name)
@ -144,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444):
target.write(content) target.write(content)
def mount(device, mountpoint, options=None, persist=False): def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab
"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file
"""
return Fstab.add(dev, mp, fs, options=options)
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint""" """Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount'] cmd_args = ['mount']
if options is not None: if options is not None:
@ -155,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False):
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError, e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False return False
if persist: if persist:
# TODO: update fstab return fstab_add(device, mountpoint, filesystem, options=options)
pass
return True return True
@ -169,9 +182,9 @@ def umount(mountpoint, persist=False):
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError, e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output)) log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False return False
if persist: if persist:
# TODO: update fstab return fstab_remove(mountpoint)
pass
return True return True
@ -198,13 +211,13 @@ def file_hash(path):
def restart_on_change(restart_map, stopstart=False): def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing """Restart services based on configuration files changing
This function is used a decorator, for example This function is used a decorator, for example::
@restart_on_change({ @restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
}) })
def ceph_client_changed(): def ceph_client_changed():
... pass # your code here
In this example, the cinder-api and cinder-volume services In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the would be restarted if /etc/ceph/ceph.conf is changed by the
@ -300,12 +313,19 @@ def get_nic_hwaddr(nic):
def cmp_pkgrevno(package, revno, pkgcache=None): def cmp_pkgrevno(package, revno, pkgcache=None):
'''Compare supplied revno with the revno of the installed package '''Compare supplied revno with the revno of the installed package
1 => Installed revno is greater than supplied arg
0 => Installed revno is the same as supplied arg * 1 => Installed revno is greater than supplied arg
-1 => Installed revno is less than supplied arg * 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
''' '''
import apt_pkg
if not pkgcache: if not pkgcache:
apt_pkg.init() apt_pkg.init()
# Force Apt to build its cache in memory. That way we avoid race
# conditions with other applications building the cache in the same
# place.
apt_pkg.config.set("Dir::Cache::pkgcache", "")
pkgcache = apt_pkg.Cache() pkgcache = apt_pkg.Cache()
pkg = pkgcache[package] pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -13,7 +13,6 @@ from charmhelpers.core.hookenv import (
config, config,
log, log,
) )
import apt_pkg
import os import os
@ -117,6 +116,7 @@ class BaseFetchHandler(object):
def filter_installed_packages(packages): def filter_installed_packages(packages):
"""Returns a list of packages that require installation""" """Returns a list of packages that require installation"""
import apt_pkg
apt_pkg.init() apt_pkg.init()
# Tell apt to build an in-memory cache to prevent race conditions (if # Tell apt to build an in-memory cache to prevent race conditions (if
@ -235,31 +235,39 @@ def configure_sources(update=False,
sources_var='install_sources', sources_var='install_sources',
keys_var='install_keys'): keys_var='install_keys'):
""" """
Configure multiple sources from charm configuration Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The frament needs to be included as a string.
Example config: Example config:
install_sources: install_sources: |
- "ppa:foo" - "ppa:foo"
- "http://example.com/repo precise main" - "http://example.com/repo precise main"
install_keys: install_keys: |
- null - null
- "a1b2c3d4" - "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted. Note that 'null' (a.k.a. None) should not be quoted.
""" """
sources = safe_load(config(sources_var)) sources = safe_load((config(sources_var) or '').strip()) or []
keys = config(keys_var) keys = safe_load((config(keys_var) or '').strip()) or None
if keys is not None:
keys = safe_load(keys) if isinstance(sources, basestring):
if isinstance(sources, basestring) and ( sources = [sources]
keys is None or isinstance(keys, basestring)):
add_source(sources, keys) if keys is None:
for source in sources:
add_source(source, None)
else: else:
if not len(sources) == len(keys): if isinstance(keys, basestring):
msg = 'Install sources and keys lists are different lengths' keys = [keys]
raise SourceConfigError(msg)
for src_num in range(len(sources)): if len(sources) != len(keys):
add_source(sources[src_num], keys[src_num]) raise SourceConfigError(
'Install sources and keys lists are different lengths')
for source, key in zip(sources, keys):
add_source(source, key)
if update: if update:
apt_update(fatal=True) apt_update(fatal=True)

View File

@ -39,7 +39,8 @@ class BzrUrlFetchHandler(BaseFetchHandler):
def install(self, source): def install(self, source):
url_parts = self.parse_url(source) url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1] branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir): if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755) mkdir(dest_dir, perms=0755)
try: try:

View File

@ -39,6 +39,7 @@ class ApacheSSLContext(context.ApacheSSLContext):
class NeutronAPIContext(context.OSContextGenerator): class NeutronAPIContext(context.OSContextGenerator):
def __call__(self): def __call__(self):
log('Generating template context from neutron api relation') log('Generating template context from neutron api relation')
ctxt = {} ctxt = {}

View File

@ -19,6 +19,7 @@ from charmhelpers.core.hookenv import (
relation_get, relation_get,
relation_ids, relation_ids,
relation_set, relation_set,
related_units,
open_port, open_port,
unit_get, unit_get,
) )
@ -45,7 +46,7 @@ from charmhelpers.contrib.openstack.neutron import (
from nova_cc_context import ( from nova_cc_context import (
NeutronAPIContext NeutronAPIContext
) )
from nova_cc_utils import ( from nova_cc_utils import (
api_port, api_port,
@ -60,8 +61,8 @@ from nova_cc_utils import (
save_script_rc, save_script_rc,
ssh_compute_add, ssh_compute_add,
ssh_compute_remove, ssh_compute_remove,
ssh_known_hosts_b64, ssh_known_hosts_lines,
ssh_authorized_keys_b64, ssh_authorized_keys_lines,
register_configs, register_configs,
restart_map, restart_map,
volume_service, volume_service,
@ -71,10 +72,11 @@ from nova_cc_utils import (
NEUTRON_CONF, NEUTRON_CONF,
QUANTUM_API_PASTE, QUANTUM_API_PASTE,
console_attributes, console_attributes,
service_guard,
guard_map,
) )
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
canonical_url,
eligible_leader, eligible_leader,
get_hacluster_config, get_hacluster_config,
is_leader, is_leader,
@ -82,6 +84,16 @@ from charmhelpers.contrib.hahelpers.cluster import (
from charmhelpers.payload.execd import execd_preinstall from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.openstack.ip import (
canonical_url,
PUBLIC, INTERNAL, ADMIN
)
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address
)
hooks = Hooks() hooks = Hooks()
CONFIGS = register_configs() CONFIGS = register_configs()
@ -103,6 +115,8 @@ def install():
@hooks.hook('config-changed') @hooks.hook('config-changed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map(), stopstart=True) @restart_on_change(restart_map(), stopstart=True)
def config_changed(): def config_changed():
global CONFIGS global CONFIGS
@ -116,6 +130,8 @@ def config_changed():
apt_install(console_attributes('packages'), fatal=True) apt_install(console_attributes('packages'), fatal=True)
[compute_joined(rid=rid) [compute_joined(rid=rid)
for rid in relation_ids('cloud-compute')] for rid in relation_ids('cloud-compute')]
for r_id in relation_ids('identity-service'):
identity_joined(rid=r_id)
@hooks.hook('amqp-relation-joined') @hooks.hook('amqp-relation-joined')
@ -126,6 +142,8 @@ def amqp_joined(relation_id=None):
@hooks.hook('amqp-relation-changed') @hooks.hook('amqp-relation-changed')
@hooks.hook('amqp-relation-departed') @hooks.hook('amqp-relation-departed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def amqp_changed(): def amqp_changed():
if 'amqp' not in CONFIGS.complete_contexts(): if 'amqp' not in CONFIGS.complete_contexts():
@ -184,6 +202,8 @@ def pgsql_neutron_db_joined():
@hooks.hook('shared-db-relation-changed') @hooks.hook('shared-db-relation-changed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def db_changed(): def db_changed():
if 'shared-db' not in CONFIGS.complete_contexts(): if 'shared-db' not in CONFIGS.complete_contexts():
@ -199,6 +219,8 @@ def db_changed():
@hooks.hook('pgsql-nova-db-relation-changed') @hooks.hook('pgsql-nova-db-relation-changed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def postgresql_nova_db_changed(): def postgresql_nova_db_changed():
if 'pgsql-nova-db' not in CONFIGS.complete_contexts(): if 'pgsql-nova-db' not in CONFIGS.complete_contexts():
@ -214,6 +236,8 @@ def postgresql_nova_db_changed():
@hooks.hook('pgsql-neutron-db-relation-changed') @hooks.hook('pgsql-neutron-db-relation-changed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def postgresql_neutron_db_changed(): def postgresql_neutron_db_changed():
if network_manager() in ['neutron', 'quantum']: if network_manager() in ['neutron', 'quantum']:
@ -223,6 +247,8 @@ def postgresql_neutron_db_changed():
@hooks.hook('image-service-relation-changed') @hooks.hook('image-service-relation-changed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def image_service_changed(): def image_service_changed():
if 'image-service' not in CONFIGS.complete_contexts(): if 'image-service' not in CONFIGS.complete_contexts():
@ -236,11 +262,17 @@ def image_service_changed():
def identity_joined(rid=None): def identity_joined(rid=None):
if not eligible_leader(CLUSTER_RES): if not eligible_leader(CLUSTER_RES):
return return
base_url = canonical_url(CONFIGS) public_url = canonical_url(CONFIGS, PUBLIC)
relation_set(relation_id=rid, **determine_endpoints(base_url)) internal_url = canonical_url(CONFIGS, INTERNAL)
admin_url = canonical_url(CONFIGS, ADMIN)
relation_set(relation_id=rid, **determine_endpoints(public_url,
internal_url,
admin_url))
@hooks.hook('identity-service-relation-changed') @hooks.hook('identity-service-relation-changed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def identity_changed(): def identity_changed():
if 'identity-service' not in CONFIGS.complete_contexts(): if 'identity-service' not in CONFIGS.complete_contexts():
@ -264,6 +296,8 @@ def identity_changed():
@hooks.hook('nova-volume-service-relation-joined', @hooks.hook('nova-volume-service-relation-joined',
'cinder-volume-service-relation-joined') 'cinder-volume-service-relation-joined')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def volume_joined(): def volume_joined():
CONFIGS.write(NOVA_CONF) CONFIGS.write(NOVA_CONF)
@ -326,8 +360,8 @@ def neutron_settings():
'quantum_plugin': neutron_plugin(), 'quantum_plugin': neutron_plugin(),
'region': config('region'), 'region': config('region'),
'quantum_security_groups': config('quantum-security-groups'), 'quantum_security_groups': config('quantum-security-groups'),
'quantum_url': (canonical_url(CONFIGS) + ':' + 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
str(api_port('neutron-server'))), str(api_port('neutron-server'))),
}) })
neutron_url = urlparse(neutron_settings['quantum_url']) neutron_url = urlparse(neutron_settings['quantum_url'])
neutron_settings['quantum_host'] = neutron_url.hostname neutron_settings['quantum_host'] = neutron_url.hostname
@ -399,21 +433,63 @@ def compute_joined(rid=None, remote_restart=False):
@hooks.hook('cloud-compute-relation-changed') @hooks.hook('cloud-compute-relation-changed')
def compute_changed(): def compute_changed(rid=None, unit=None):
migration_auth = relation_get('migration_auth_type') rel_settings = relation_get(rid=rid, unit=unit)
if migration_auth == 'ssh': if 'migration_auth_type' not in rel_settings:
key = relation_get('ssh_public_key') return
if rel_settings['migration_auth_type'] == 'ssh':
key = rel_settings.get('ssh_public_key')
if not key: if not key:
log('SSH migration set but peer did not publish key.') log('SSH migration set but peer did not publish key.')
return return
ssh_compute_add(key) ssh_compute_add(key, rid=rid, unit=unit)
relation_set(known_hosts=ssh_known_hosts_b64(), index = 0
authorized_keys=ssh_authorized_keys_b64()) for line in ssh_known_hosts_lines(unit=unit):
if relation_get('nova_ssh_public_key'): relation_set(
key = relation_get('nova_ssh_public_key') relation_id=rid,
ssh_compute_add(key, user='nova') relation_settings={
relation_set(nova_known_hosts=ssh_known_hosts_b64(user='nova'), 'known_hosts_{}'.format(index): line})
nova_authorized_keys=ssh_authorized_keys_b64(user='nova')) index += 1
relation_set(relation_id=rid, known_hosts_max_index=index)
index = 0
for line in ssh_authorized_keys_lines(unit=unit):
relation_set(
relation_id=rid,
relation_settings={
'authorized_keys_{}'.format(index): line})
index += 1
relation_set(relation_id=rid, authorized_keys_max_index=index)
if 'nova_ssh_public_key' not in rel_settings:
return
if rel_settings['nova_ssh_public_key']:
ssh_compute_add(rel_settings['nova_ssh_public_key'],
rid=rid, unit=unit, user='nova')
index = 0
for line in ssh_known_hosts_lines(unit=unit, user='nova'):
relation_set(
relation_id=rid,
relation_settings={
'{}_known_hosts_{}'.format(
'nova',
index): line})
index += 1
relation_set(
relation_id=rid,
relation_settings={
'{}_known_hosts_max_index'.format('nova'): index})
index = 0
for line in ssh_authorized_keys_lines(unit=unit, user='nova'):
relation_set(
relation_id=rid,
relation_settings={
'{}_authorized_keys_{}'.format(
'nova',
index): line})
index += 1
relation_set(
relation_id=rid,
relation_settings={
'{}_authorized_keys_max_index'.format('nova'): index})
@hooks.hook('cloud-compute-relation-departed') @hooks.hook('cloud-compute-relation-departed')
@ -442,6 +518,8 @@ def quantum_joined(rid=None):
@hooks.hook('cluster-relation-changed', @hooks.hook('cluster-relation-changed',
'cluster-relation-departed') 'cluster-relation-departed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map(), stopstart=True) @restart_on_change(restart_map(), stopstart=True)
def cluster_changed(): def cluster_changed():
CONFIGS.write_all() CONFIGS.write_all()
@ -451,15 +529,28 @@ def cluster_changed():
def ha_joined(): def ha_joined():
config = get_hacluster_config() config = get_hacluster_config()
resources = { resources = {
'res_nova_vip': 'ocf:heartbeat:IPaddr2',
'res_nova_haproxy': 'lsb:haproxy', 'res_nova_haproxy': 'lsb:haproxy',
} }
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
(config['vip'], config['vip_cidr'], config['vip_iface'])
resource_params = { resource_params = {
'res_nova_vip': vip_params,
'res_nova_haproxy': 'op monitor interval="5s"' 'res_nova_haproxy': 'op monitor interval="5s"'
} }
vip_group = []
for vip in config['vip'].split():
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_nova_{}_vip'.format(iface)
resources[vip_key] = 'ocf:heartbeat:IPaddr2'
resource_params[vip_key] = (
'params ip="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
vip_group.append(vip_key)
if len(vip_group) > 1:
relation_set(groups={'grp_nova_vips': ' '.join(vip_group)})
init_services = { init_services = {
'res_nova_haproxy': 'haproxy' 'res_nova_haproxy': 'haproxy'
} }
@ -498,6 +589,8 @@ def ha_changed():
'pgsql-nova-db-relation-broken', 'pgsql-nova-db-relation-broken',
'pgsql-neutron-db-relation-broken', 'pgsql-neutron-db-relation-broken',
'quantum-network-service-relation-broken') 'quantum-network-service-relation-broken')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
def relation_broken(): def relation_broken():
CONFIGS.write_all() CONFIGS.write_all()
@ -531,13 +624,15 @@ def nova_vmware_relation_joined(rid=None):
rel_settings.update({ rel_settings.update({
'quantum_plugin': neutron_plugin(), 'quantum_plugin': neutron_plugin(),
'quantum_security_groups': config('quantum-security-groups'), 'quantum_security_groups': config('quantum-security-groups'),
'quantum_url': (canonical_url(CONFIGS) + ':' + 'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
str(api_port('neutron-server')))}) str(api_port('neutron-server')))})
relation_set(relation_id=rid, **rel_settings) relation_set(relation_id=rid, **rel_settings)
@hooks.hook('nova-vmware-relation-changed') @hooks.hook('nova-vmware-relation-changed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def nova_vmware_relation_changed(): def nova_vmware_relation_changed():
CONFIGS.write('/etc/nova/nova.conf') CONFIGS.write('/etc/nova/nova.conf')
@ -549,6 +644,9 @@ def upgrade_charm():
amqp_joined(relation_id=r_id) amqp_joined(relation_id=r_id)
for r_id in relation_ids('identity-service'): for r_id in relation_ids('identity-service'):
identity_joined(rid=r_id) identity_joined(rid=r_id)
for r_id in relation_ids('cloud-compute'):
for unit in related_units(r_id):
compute_changed(r_id, unit)
@hooks.hook('neutron-api-relation-joined') @hooks.hook('neutron-api-relation-joined')
@ -561,11 +659,13 @@ def neutron_api_relation_joined(rid=None):
service_stop('neutron-server') service_stop('neutron-server')
for id_rid in relation_ids('identity-service'): for id_rid in relation_ids('identity-service'):
identity_joined(rid=id_rid) identity_joined(rid=id_rid)
nova_url = canonical_url(CONFIGS) + ":8774/v2" nova_url = canonical_url(CONFIGS, INTERNAL) + ":8774/v2"
relation_set(relation_id=rid, nova_url=nova_url) relation_set(relation_id=rid, nova_url=nova_url)
@hooks.hook('neutron-api-relation-changed') @hooks.hook('neutron-api-relation-changed')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def neutron_api_relation_changed(): def neutron_api_relation_changed():
CONFIGS.write(NOVA_CONF) CONFIGS.write(NOVA_CONF)
@ -576,6 +676,8 @@ def neutron_api_relation_changed():
@hooks.hook('neutron-api-relation-broken') @hooks.hook('neutron-api-relation-broken')
@service_guard(guard_map(), CONFIGS,
active=config('service-guard'))
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def neutron_api_relation_broken(): def neutron_api_relation_broken():
if os.path.isfile('/etc/init/neutron-server.override'): if os.path.isfile('/etc/init/neutron-server.override'):

View File

@ -40,14 +40,15 @@ from charmhelpers.core.hookenv import (
from charmhelpers.core.host import ( from charmhelpers.core.host import (
service_start, service_start,
service_stop,
service_running
) )
import nova_cc_context import nova_cc_context
TEMPLATES = 'templates/' TEMPLATES = 'templates/'
CLUSTER_RES = 'res_nova_vip' CLUSTER_RES = 'grp_nova_vips'
# removed from original: charm-helper-sh # removed from original: charm-helper-sh
BASE_PACKAGES = [ BASE_PACKAGES = [
@ -566,8 +567,11 @@ def keystone_ca_cert_b64():
return b64encode(_in.read()) return b64encode(_in.read())
def ssh_directory_for_unit(user=None): def ssh_directory_for_unit(unit=None, user=None):
remote_service = remote_unit().split('/')[0] if unit:
remote_service = unit.split('/')[0]
else:
remote_service = remote_unit().split('/')[0]
if user: if user:
remote_service = "{}_{}".format(remote_service, user) remote_service = "{}_{}".format(remote_service, user)
_dir = os.path.join(NOVA_SSH_DIR, remote_service) _dir = os.path.join(NOVA_SSH_DIR, remote_service)
@ -581,29 +585,29 @@ def ssh_directory_for_unit(user=None):
return _dir return _dir
def known_hosts(user=None): def known_hosts(unit=None, user=None):
return os.path.join(ssh_directory_for_unit(user), 'known_hosts') return os.path.join(ssh_directory_for_unit(unit, user), 'known_hosts')
def authorized_keys(user=None): def authorized_keys(unit=None, user=None):
return os.path.join(ssh_directory_for_unit(user), 'authorized_keys') return os.path.join(ssh_directory_for_unit(unit, user), 'authorized_keys')
def ssh_known_host_key(host, user=None): def ssh_known_host_key(host, unit=None, user=None):
cmd = ['ssh-keygen', '-f', known_hosts(user), '-H', '-F', host] cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host]
try: try:
return subprocess.check_output(cmd).strip() return subprocess.check_output(cmd).strip()
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
return None return None
def remove_known_host(host, user=None): def remove_known_host(host, unit=None, user=None):
log('Removing SSH known host entry for compute host at %s' % host) log('Removing SSH known host entry for compute host at %s' % host)
cmd = ['ssh-keygen', '-f', known_hosts(user), '-R', host] cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-R', host]
subprocess.check_call(cmd) subprocess.check_call(cmd)
def add_known_host(host, user=None): def add_known_host(host, unit=None, user=None):
'''Add variations of host to a known hosts file.''' '''Add variations of host to a known hosts file.'''
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
try: try:
@ -612,33 +616,34 @@ def add_known_host(host, user=None):
log('Could not obtain SSH host key from %s' % host, level=ERROR) log('Could not obtain SSH host key from %s' % host, level=ERROR)
raise e raise e
current_key = ssh_known_host_key(host, user) current_key = ssh_known_host_key(host, unit, user)
if current_key: if current_key:
if remote_key == current_key: if remote_key == current_key:
log('Known host key for compute host %s up to date.' % host) log('Known host key for compute host %s up to date.' % host)
return return
else: else:
remove_known_host(host, user) remove_known_host(host, unit, user)
log('Adding SSH host key to known hosts for compute node at %s.' % host) log('Adding SSH host key to known hosts for compute node at %s.' % host)
with open(known_hosts(user), 'a') as out: with open(known_hosts(unit, user), 'a') as out:
out.write(remote_key + '\n') out.write(remote_key + '\n')
def ssh_authorized_key_exists(public_key, user=None): def ssh_authorized_key_exists(public_key, unit=None, user=None):
with open(authorized_keys(user)) as keys: with open(authorized_keys(unit, user)) as keys:
return (' %s ' % public_key) in keys.read() return (' %s ' % public_key) in keys.read()
def add_authorized_key(public_key, user=None): def add_authorized_key(public_key, unit=None, user=None):
with open(authorized_keys(user), 'a') as keys: with open(authorized_keys(unit, user), 'a') as keys:
keys.write(public_key + '\n') keys.write(public_key + '\n')
def ssh_compute_add(public_key, user=None): def ssh_compute_add(public_key, rid=None, unit=None, user=None):
# If remote compute node hands us a hostname, ensure we have a # If remote compute node hands us a hostname, ensure we have a
# known hosts entry for its IP, hostname and FQDN. # known hosts entry for its IP, hostname and FQDN.
private_address = relation_get('private-address') private_address = relation_get(rid=rid, unit=unit,
attribute='private-address')
hosts = [private_address] hosts = [private_address]
if relation_get('hostname'): if relation_get('hostname'):
hosts.append(relation_get('hostname')) hosts.append(relation_get('hostname'))
@ -652,31 +657,41 @@ def ssh_compute_add(public_key, user=None):
hosts.append(hn.split('.')[0]) hosts.append(hn.split('.')[0])
for host in list(set(hosts)): for host in list(set(hosts)):
if not ssh_known_host_key(host, user): if not ssh_known_host_key(host, unit, user):
add_known_host(host, user) add_known_host(host, unit, user)
if not ssh_authorized_key_exists(public_key, user): if not ssh_authorized_key_exists(public_key, unit, user):
log('Saving SSH authorized key for compute host at %s.' % log('Saving SSH authorized key for compute host at %s.' %
private_address) private_address)
add_authorized_key(public_key, user) add_authorized_key(public_key, unit, user)
def ssh_known_hosts_b64(user=None): def ssh_known_hosts_lines(unit=None, user=None):
with open(known_hosts(user)) as hosts: known_hosts_list = []
return b64encode(hosts.read())
with open(known_hosts(unit, user)) as hosts:
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list)
def ssh_authorized_keys_b64(user=None): def ssh_authorized_keys_lines(unit=None, user=None):
with open(authorized_keys(user)) as keys: authorized_keys_list = []
return b64encode(keys.read())
with open(authorized_keys(unit, user)) as keys:
for authkey_line in keys:
if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list)
def ssh_compute_remove(public_key, user=None): def ssh_compute_remove(public_key, unit=None, user=None):
if not (os.path.isfile(authorized_keys(user)) or if not (os.path.isfile(authorized_keys(unit, user)) or
os.path.isfile(known_hosts(user))): os.path.isfile(known_hosts(unit, user))):
return return
with open(authorized_keys(user)) as _keys: with open(authorized_keys(unit, user)) as _keys:
keys = [k.strip() for k in _keys.readlines()] keys = [k.strip() for k in _keys.readlines()]
if public_key not in keys: if public_key not in keys:
@ -684,57 +699,83 @@ def ssh_compute_remove(public_key, user=None):
[keys.remove(key) for key in keys if key == public_key] [keys.remove(key) for key in keys if key == public_key]
with open(authorized_keys(user), 'w') as _keys: with open(authorized_keys(unit, user), 'w') as _keys:
keys = '\n'.join(keys) keys = '\n'.join(keys)
if not keys.endswith('\n'): if not keys.endswith('\n'):
keys += '\n' keys += '\n'
_keys.write(keys) _keys.write(keys)
def determine_endpoints(url): def determine_endpoints(public_url, internal_url, admin_url):
'''Generates a dictionary containing all relevant endpoints to be '''Generates a dictionary containing all relevant endpoints to be
passed to keystone as relation settings.''' passed to keystone as relation settings.'''
region = config('region') region = config('region')
os_rel = os_release('nova-common') os_rel = os_release('nova-common')
if os_rel >= 'grizzly': if os_rel >= 'grizzly':
nova_url = ('%s:%s/v2/$(tenant_id)s' % nova_public_url = ('%s:%s/v2/$(tenant_id)s' %
(url, api_port('nova-api-os-compute'))) (public_url, api_port('nova-api-os-compute')))
nova_internal_url = ('%s:%s/v2/$(tenant_id)s' %
(internal_url, api_port('nova-api-os-compute')))
nova_admin_url = ('%s:%s/v2/$(tenant_id)s' %
(admin_url, api_port('nova-api-os-compute')))
else: else:
nova_url = ('%s:%s/v1.1/$(tenant_id)s' % nova_public_url = ('%s:%s/v1.1/$(tenant_id)s' %
(url, api_port('nova-api-os-compute'))) (public_url, api_port('nova-api-os-compute')))
ec2_url = '%s:%s/services/Cloud' % (url, api_port('nova-api-ec2')) nova_internal_url = ('%s:%s/v1.1/$(tenant_id)s' %
nova_volume_url = ('%s:%s/v1/$(tenant_id)s' % (internal_url, api_port('nova-api-os-compute')))
(url, api_port('nova-api-os-compute'))) nova_admin_url = ('%s:%s/v1.1/$(tenant_id)s' %
neutron_url = '%s:%s' % (url, api_port('neutron-server')) (admin_url, api_port('nova-api-os-compute')))
s3_url = '%s:%s' % (url, api_port('nova-objectstore'))
ec2_public_url = '%s:%s/services/Cloud' % (
public_url, api_port('nova-api-ec2'))
ec2_internal_url = '%s:%s/services/Cloud' % (
internal_url, api_port('nova-api-ec2'))
ec2_admin_url = '%s:%s/services/Cloud' % (admin_url,
api_port('nova-api-ec2'))
nova_volume_public_url = ('%s:%s/v1/$(tenant_id)s' %
(public_url, api_port('nova-api-os-compute')))
nova_volume_internal_url = ('%s:%s/v1/$(tenant_id)s' %
(internal_url,
api_port('nova-api-os-compute')))
nova_volume_admin_url = ('%s:%s/v1/$(tenant_id)s' %
(admin_url, api_port('nova-api-os-compute')))
neutron_public_url = '%s:%s' % (public_url, api_port('neutron-server'))
neutron_internal_url = '%s:%s' % (internal_url, api_port('neutron-server'))
neutron_admin_url = '%s:%s' % (admin_url, api_port('neutron-server'))
s3_public_url = '%s:%s' % (public_url, api_port('nova-objectstore'))
s3_internal_url = '%s:%s' % (internal_url, api_port('nova-objectstore'))
s3_admin_url = '%s:%s' % (admin_url, api_port('nova-objectstore'))
# the base endpoints # the base endpoints
endpoints = { endpoints = {
'nova_service': 'nova', 'nova_service': 'nova',
'nova_region': region, 'nova_region': region,
'nova_public_url': nova_url, 'nova_public_url': nova_public_url,
'nova_admin_url': nova_url, 'nova_admin_url': nova_admin_url,
'nova_internal_url': nova_url, 'nova_internal_url': nova_internal_url,
'ec2_service': 'ec2', 'ec2_service': 'ec2',
'ec2_region': region, 'ec2_region': region,
'ec2_public_url': ec2_url, 'ec2_public_url': ec2_public_url,
'ec2_admin_url': ec2_url, 'ec2_admin_url': ec2_admin_url,
'ec2_internal_url': ec2_url, 'ec2_internal_url': ec2_internal_url,
's3_service': 's3', 's3_service': 's3',
's3_region': region, 's3_region': region,
's3_public_url': s3_url, 's3_public_url': s3_public_url,
's3_admin_url': s3_url, 's3_admin_url': s3_admin_url,
's3_internal_url': s3_url, 's3_internal_url': s3_internal_url,
} }
if relation_ids('nova-volume-service'): if relation_ids('nova-volume-service'):
endpoints.update({ endpoints.update({
'nova-volume_service': 'nova-volume', 'nova-volume_service': 'nova-volume',
'nova-volume_region': region, 'nova-volume_region': region,
'nova-volume_public_url': nova_volume_url, 'nova-volume_public_url': nova_volume_public_url,
'nova-volume_admin_url': nova_volume_url, 'nova-volume_admin_url': nova_volume_admin_url,
'nova-volume_internal_url': nova_volume_url, 'nova-volume_internal_url': nova_volume_internal_url,
}) })
# XXX: Keep these relations named quantum_*?? # XXX: Keep these relations named quantum_*??
@ -750,9 +791,9 @@ def determine_endpoints(url):
endpoints.update({ endpoints.update({
'quantum_service': 'quantum', 'quantum_service': 'quantum',
'quantum_region': region, 'quantum_region': region,
'quantum_public_url': neutron_url, 'quantum_public_url': neutron_public_url,
'quantum_admin_url': neutron_url, 'quantum_admin_url': neutron_admin_url,
'quantum_internal_url': neutron_url, 'quantum_internal_url': neutron_internal_url,
}) })
return endpoints return endpoints
@ -762,3 +803,59 @@ def neutron_plugin():
# quantum-plugin config setting can be safely overriden # quantum-plugin config setting can be safely overriden
# as we only supported OVS in G/neutron # as we only supported OVS in G/neutron
return config('neutron-plugin') or config('quantum-plugin') return config('neutron-plugin') or config('quantum-plugin')
def guard_map():
'''Map of services and required interfaces that must be present before
the service should be allowed to start'''
gmap = {}
nova_services = deepcopy(BASE_SERVICES)
if os_release('nova-common') not in ['essex', 'folsom']:
nova_services.append('nova-conductor')
nova_interfaces = ['identity-service', 'amqp']
if relation_ids('pgsql-nova-db'):
nova_interfaces.append('pgsql-nova-db')
else:
nova_interfaces.append('shared-db')
for svc in nova_services:
gmap[svc] = nova_interfaces
net_manager = network_manager()
if net_manager in ['neutron', 'quantum'] and \
not is_relation_made('neutron-api'):
neutron_interfaces = ['identity-service', 'amqp']
if relation_ids('pgsql-neutron-db'):
neutron_interfaces.append('pgsql-neutron-db')
else:
neutron_interfaces.append('shared-db')
if network_manager() == 'quantum':
gmap['quantum-server'] = neutron_interfaces
else:
gmap['neutron-server'] = neutron_interfaces
return gmap
def service_guard(guard_map, contexts, active=False):
'''Inhibit services in guard_map from running unless
required interfaces are found complete in contexts.'''
def wrap(f):
def wrapped_f(*args):
if active is True:
incomplete_services = []
for svc in guard_map:
for interface in guard_map[svc]:
if interface not in contexts.complete_contexts():
incomplete_services.append(svc)
f(*args)
for svc in incomplete_services:
if service_running(svc):
log('Service {} has unfulfilled '
'interface requirements, stopping.'.format(svc))
service_stop(svc)
else:
f(*args)
return wrapped_f
return wrap

10
tests/00-setup Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet
sudo apt-get install --yes python-glanceclient
sudo apt-get install --yes python-keystoneclient
sudo apt-get install --yes python-novaclient

10
tests/10-basic-precise-essex Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/python
"""Amulet tests on a basic nova cloud controller deployment on
precise-essex."""
from basic_deployment import NovaCCBasicDeployment
if __name__ == '__main__':
deployment = NovaCCBasicDeployment(series='precise')
deployment.run_tests()

18
tests/11-basic-precise-folsom Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/python
"""Amulet tests on a basic nova cloud controller deployment on
precise-folsom."""
import amulet
from basic_deployment import NovaCCBasicDeployment
if __name__ == '__main__':
# NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync'
# fails in shared-db-relation-changed (only fails on folsom)
message = "Skipping failing test until resolved"
amulet.raise_status(amulet.SKIP, msg=message)
deployment = NovaCCBasicDeployment(series='precise',
openstack='cloud:precise-folsom',
source='cloud:precise-updates/folsom')
deployment.run_tests()

12
tests/12-basic-precise-grizzly Executable file
View File

@ -0,0 +1,12 @@
#!/usr/bin/python
"""Amulet tests on a basic nova cloud controller deployment on
precise-grizzly."""
from basic_deployment import NovaCCBasicDeployment
if __name__ == '__main__':
deployment = NovaCCBasicDeployment(series='precise',
openstack='cloud:precise-grizzly',
source='cloud:precise-updates/grizzly')
deployment.run_tests()

12
tests/13-basic-precise-havana Executable file
View File

@ -0,0 +1,12 @@
#!/usr/bin/python
"""Amulet tests on a basic nova cloud controller deployment on
precise-havana."""
from basic_deployment import NovaCCBasicDeployment
if __name__ == '__main__':
deployment = NovaCCBasicDeployment(series='precise',
openstack='cloud:precise-havana',
source='cloud:precise-updates/havana')
deployment.run_tests()

12
tests/14-basic-precise-icehouse Executable file
View File

@ -0,0 +1,12 @@
#!/usr/bin/python
"""Amulet tests on a basic nova cloud controller deployment on
precise-icehouse."""
from basic_deployment import NovaCCBasicDeployment
if __name__ == '__main__':
deployment = NovaCCBasicDeployment(series='precise',
openstack='cloud:precise-icehouse',
source='cloud:precise-updates/icehouse')
deployment.run_tests()

10
tests/15-basic-trusty-icehouse Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/python
"""Amulet tests on a basic nova cloud controller deployment on
trusty-icehouse."""
from basic_deployment import NovaCCBasicDeployment
if __name__ == '__main__':
deployment = NovaCCBasicDeployment(series='trusty')
deployment.run_tests()

47
tests/README Normal file
View File

@ -0,0 +1,47 @@
This directory provides Amulet tests that focus on verification of Nova Cloud
Controller deployments.
If you use a web proxy server to access the web, you'll need to set the
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
The following examples demonstrate different ways that tests can be executed.
All examples are run from the charm's root directory.
* To run all tests (starting with 00-setup):
make test
* To run a specific test module (or modules):
juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To run a specific test module (or modules), and keep the environment
deployed after a failure:
juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To re-run a test module against an already deployed environment (one
that was deployed by a previous call to 'juju test --set-e'):
./tests/15-basic-trusty-icehouse
For debugging and test development purposes, all code should be idempotent.
In other words, the code should have the ability to be re-run without changing
the results beyond the initial run. This enables editing and re-running of a
test module against an already deployed environment, as described above.
Manual debugging tips:
* Set the following env vars before using the OpenStack CLI as admin:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=openstack
export OS_REGION_NAME=RegionOne
* Set the following env vars before using the OpenStack CLI as demoUser:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=demoTenant
export OS_USERNAME=demoUser
export OS_PASSWORD=password
export OS_REGION_NAME=RegionOne

520
tests/basic_deployment.py Normal file
View File

@ -0,0 +1,520 @@
#!/usr/bin/python
import amulet
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG, # flake8: noqa
ERROR
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(ERROR)
class NovaCCBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic nova cloud controller deployment."""
def __init__(self, series=None, openstack=None, source=None):
"""Deploy the entire test environment."""
super(NovaCCBasicDeployment, self).__init__(series, openstack, source)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
self._initialize_tests()
def _add_services(self):
"""Add the service that we're testing, including the number of units,
where nova-cloud-controller is local, and the other charms are from
the charm store."""
this_service = ('nova-cloud-controller', 1)
other_services = [('mysql', 1), ('rabbitmq-server', 1),
('nova-compute', 2), ('keystone', 1), ('glance', 1)]
super(NovaCCBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'nova-cloud-controller:shared-db': 'mysql:shared-db',
'nova-cloud-controller:identity-service': 'keystone:identity-service',
'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute',
'nova-cloud-controller:image-service': 'glance:image-service',
'nova-compute:image-service': 'glance:image-service',
'nova-compute:shared-db': 'mysql:shared-db',
'nova-compute:amqp': 'rabbitmq-server:amqp',
'keystone:shared-db': 'mysql:shared-db',
'glance:identity-service': 'keystone:identity-service',
'glance:shared-db': 'mysql:shared-db',
'glance:amqp': 'rabbitmq-server:amqp'
}
super(NovaCCBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting'}
configs = {'keystone': keystone_config}
super(NovaCCBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0']
self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
self.glance_sentry = self.d.sentry.unit['glance/0']
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone)
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='demo@demo.com')
# Authenticate demo user with keystone
self.keystone_demo = \
u.authenticate_keystone_user(self.keystone, user=self.demo_user,
password='password',
tenant=self.demo_tenant)
# Authenticate demo user with nova-api
self.nova_demo = u.authenticate_nova_user(self.keystone,
user=self.demo_user,
password='password',
tenant=self.demo_tenant)
def test_services(self):
"""Verify the expected services are running on the corresponding
service units."""
commands = {
self.mysql_sentry: ['status mysql'],
self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
self.nova_cc_sentry: ['status nova-api-ec2',
'status nova-api-os-compute',
'status nova-objectstore',
'status nova-cert',
'status nova-scheduler'],
self.nova_compute_sentry: ['status nova-compute',
'status nova-network',
'status nova-api'],
self.keystone_sentry: ['status keystone'],
self.glance_sentry: ['status glance-registry', 'status glance-api']
}
if self._get_openstack_release() >= self.precise_grizzly:
commands[self.nova_cc_sentry] = ['status nova-conductor']
ret = u.validate_services(commands)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
endpoint_vol = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
endpoint_id = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
if self._get_openstack_release() >= self.precise_folsom:
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null
expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
'ec2': [endpoint_vol], 'identity': [endpoint_id]}
actual = self.keystone_demo.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_openstack_compute_api_endpoint(self):
"""Verify the openstack compute api (osapi) endpoint data."""
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8774'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'osapi endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_ec2_api_endpoint(self):
"""Verify the EC2 api endpoint data."""
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8773'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'EC2 endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_s3_api_endpoint(self):
"""Verify the S3 api endpoint data."""
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '3333'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'S3 endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_nova_cc_shared_db_relation(self):
"""Verify the nova-cc to mysql shared-db relation data"""
unit = self.nova_cc_sentry
relation = ['shared-db', 'mysql:shared-db']
expected = {
'private-address': u.valid_ip,
'nova_database': 'nova',
'nova_username': 'nova',
'nova_hostname': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova-cc shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_mysql_shared_db_relation(self):
"""Verify the mysql to nova-cc shared-db relation data"""
unit = self.mysql_sentry
relation = ['shared-db', 'nova-cloud-controller:shared-db']
expected = {
'private-address': u.valid_ip,
'nova_password': u.not_null,
'db_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_nova_cc_identity_service_relation(self):
"""Verify the nova-cc to keystone identity-service relation data"""
unit = self.nova_cc_sentry
relation = ['identity-service', 'keystone:identity-service']
expected = {
'nova_internal_url': u.valid_url,
'nova_public_url': u.valid_url,
's3_public_url': u.valid_url,
's3_service': 's3',
'ec2_admin_url': u.valid_url,
'ec2_internal_url': u.valid_url,
'nova_service': 'nova',
's3_region': 'RegionOne',
'private-address': u.valid_ip,
'nova_region': 'RegionOne',
'ec2_public_url': u.valid_url,
'ec2_region': 'RegionOne',
's3_internal_url': u.valid_url,
's3_admin_url': u.valid_url,
'nova_admin_url': u.valid_url,
'ec2_service': 'ec2'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova-cc identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_keystone_identity_service_relation(self):
"""Verify the keystone to nova-cc identity-service relation data"""
unit = self.keystone_sentry
relation = ['identity-service',
'nova-cloud-controller:identity-service']
expected = {
'service_protocol': 'http',
'service_tenant': 'services',
'admin_token': 'ubuntutesting',
'service_password': u.not_null,
'service_port': '5000',
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'https_keystone': 'False',
'auth_host': u.valid_ip,
'service_username': 's3_ec2_nova',
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('keystone identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_nova_cc_amqp_relation(self):
"""Verify the nova-cc to rabbitmq-server amqp relation data"""
unit = self.nova_cc_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
'username': 'nova',
'private-address': u.valid_ip,
'vhost': 'openstack'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova-cc amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_rabbitmq_amqp_relation(self):
"""Verify the rabbitmq-server to nova-cc amqp relation data"""
unit = self.rabbitmq_sentry
relation = ['amqp', 'nova-cloud-controller:amqp']
expected = {
'private-address': u.valid_ip,
'password': u.not_null,
'hostname': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('rabbitmq amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_nova_cc_cloud_compute_relation(self):
"""Verify the nova-cc to nova-compute cloud-compute relation data"""
unit = self.nova_cc_sentry
relation = ['cloud-compute', 'nova-compute:cloud-compute']
expected = {
'volume_service': 'cinder',
'network_manager': 'flatdhcpmanager',
'ec2_host': u.valid_ip,
'private-address': u.valid_ip,
'restart_trigger': u.not_null
}
if self._get_openstack_release() == self.precise_essex:
expected['volume_service'] = 'nova-volume'
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova-cc cloud-compute', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_nova_cloud_compute_relation(self):
"""Verify the nova-compute to nova-cc cloud-compute relation data"""
unit = self.nova_compute_sentry
relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute']
expected = {
'private-address': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova-compute cloud-compute', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_nova_cc_image_service_relation(self):
"""Verify the nova-cc to glance image-service relation data"""
unit = self.nova_cc_sentry
relation = ['image-service', 'glance:image-service']
expected = {
'private-address': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova-cc image-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_glance_image_service_relation(self):
"""Verify the glance to nova-cc image-service relation data"""
unit = self.glance_sentry
relation = ['image-service', 'nova-cloud-controller:image-service']
expected = {
'private-address': u.valid_ip,
'glance-api-server': u.valid_url
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('glance image-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed."""
# NOTE(coreycb): Skipping failing test on essex until resolved.
# config-flags don't take effect on essex.
if self._get_openstack_release() == self.precise_essex:
u.log.error("Skipping failing test until resolved")
return
services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore',
'nova-cert', 'nova-scheduler', 'nova-conductor']
self.d.configure('nova-cloud-controller',
{'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'})
pgrep_full = True
time = 20
conf = '/etc/nova/nova.conf'
for s in services:
if not u.service_restarted(self.nova_cc_sentry, s, conf,
pgrep_full=True, sleep_time=time):
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
time = 0
def test_nova_default_config(self):
"""Verify the data in the nova config file's default section."""
# NOTE(coreycb): Currently no way to test on essex because config file
# has no section headers.
if self._get_openstack_release() == self.precise_essex:
return
unit = self.nova_cc_sentry
conf = '/etc/nova/nova.conf'
rabbitmq_relation = self.rabbitmq_sentry.relation('amqp',
'nova-cloud-controller:amqp')
glance_relation = self.glance_sentry.relation('image-service',
'nova-cloud-controller:image-service')
mysql_relation = self.mysql_sentry.relation('shared-db',
'nova-cloud-controller:shared-db')
db_uri = "mysql://{}:{}@{}/{}".format('nova',
mysql_relation['nova_password'],
mysql_relation['db_host'],
'nova')
keystone_ep = self.keystone_demo.service_catalog.url_for(\
service_type='identity',
endpoint_type='publicURL')
keystone_ec2 = "{}/ec2tokens".format(keystone_ep)
expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf',
'dhcpbridge': '/usr/bin/nova-dhcpbridge',
'logdir': '/var/log/nova',
'state_path': '/var/lib/nova',
'lock_path': '/var/lock/nova',
'force_dhcp_release': 'True',
'iscsi_helper': 'tgtadm',
'libvirt_use_virtio_for_bridges': 'True',
'connection_type': 'libvirt',
'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf',
'verbose': 'True',
'ec2_private_dns_show_ip': 'True',
'api_paste_config': '/etc/nova/api-paste.ini',
'volumes_path': '/var/lib/nova/volumes',
'enabled_apis': 'ec2,osapi_compute,metadata',
'auth_strategy': 'keystone',
'compute_driver': 'libvirt.LibvirtDriver',
'keystone_ec2_url': keystone_ec2,
'sql_connection': db_uri,
'rabbit_userid': 'nova',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rabbitmq_relation['password'],
'rabbit_host': rabbitmq_relation['hostname'],
'glance_api_servers': glance_relation['glance-api-server'],
'network_manager': 'nova.network.manager.FlatDHCPManager',
's3_listen_port': '3333',
'osapi_compute_listen_port': '8774',
'ec2_listen_port': '8773'}
ret = u.validate_config_data(unit, conf, 'DEFAULT', expected)
if ret:
message = "nova config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_nova_keystone_authtoken_config(self):
"""Verify the data in the nova config file's keystone_authtoken
section. This data only exists since icehouse."""
if self._get_openstack_release() < self.precise_icehouse:
return
unit = self.nova_cc_sentry
conf = '/etc/nova/nova.conf'
keystone_relation = self.keystone_sentry.relation('identity-service',
'nova-cloud-controller:identity-service')
keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'],
keystone_relation['service_port'])
expected = {'auth_uri': keystone_uri,
'auth_host': keystone_relation['service_host'],
'auth_port': keystone_relation['auth_port'],
'auth_protocol': keystone_relation['auth_protocol'],
'admin_tenant_name': keystone_relation['service_tenant'],
'admin_user': keystone_relation['service_username'],
'admin_password': keystone_relation['service_password']}
ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected)
if ret:
message = "nova config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_image_instance_create(self):
"""Create an image/instance, verify they exist, and delete them."""
# NOTE(coreycb): Skipping failing test on essex until resolved. essex
# nova API calls are getting "Malformed request url (HTTP
# 400)".
if self._get_openstack_release() == self.precise_essex:
u.log.error("Skipping failing test until resolved")
return
image = u.create_cirros_image(self.glance, "cirros-image")
if not image:
amulet.raise_status(amulet.FAIL, msg="Image create failed")
instance = u.create_instance(self.nova_demo, "cirros-image", "cirros",
"m1.tiny")
if not instance:
amulet.raise_status(amulet.FAIL, msg="Instance create failed")
found = False
for instance in self.nova_demo.servers.list():
if instance.name == 'cirros':
found = True
if instance.status != 'ACTIVE':
msg = "cirros instance is not active"
amulet.raise_status(amulet.FAIL, msg=message)
if not found:
message = "nova cirros instance does not exist"
amulet.raise_status(amulet.FAIL, msg=message)
u.delete_image(self.glance, image)
u.delete_instance(self.nova_demo, instance)

View File

View File

View File

@ -0,0 +1,58 @@
import amulet
class AmuletDeployment(object):
"""This class provides generic Amulet deployment and test runner
methods."""
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = None
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services to the deployment where this_service is the local charm
that we're focused on testing and other_services are the other
charms that come from the charm store."""
name, units = range(2)
self.this_service = this_service[name]
self.d.add(this_service[name], units=this_service[units])
for svc in other_services:
if self.series:
self.d.add(svc[name],
charm='cs:{}/{}'.format(self.series, svc[name]),
units=svc[units])
else:
self.d.add(svc[name], units=svc[units])
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in relations.iteritems():
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup()
self.d.sentry.wait()
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
except:
raise
def run_tests(self):
"""Run all of the methods that are prefixed with 'test_'."""
for test in dir(self):
if test.startswith('test_'):
getattr(self, test)()

View File

@ -0,0 +1,157 @@
import ConfigParser
import io
import logging
import re
import sys
from time import sleep
class AmuletUtils(object):
"""This class provides common utility functions that are used by Amulet
tests."""
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = \
log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def valid_ip(self, ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
else:
return False
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def validate_services(self, commands):
"""Verify the specified services are running on the corresponding
service units."""
for k, v in commands.iteritems():
for cmd in v:
output, code = k.run(cmd)
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
config = ConfigParser.ConfigParser()
config.readfp(io.StringIO(file_contents))
return config
def validate_config_data(self, sentry_unit, config_file, section, expected):
"""Verify that the specified section of the config file contains
the expected option key:value pairs."""
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
return "section [{}] does not exist".format(section)
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
if config.get(section, k) != expected[k]:
return "section [{}] {}:{} != expected {}:{}".format(section,
k, config.get(section, k), k, expected[k])
return None
def _validate_dict_data(self, expected, actual):
"""Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluate a variable and returns a
bool."""
for k, v in expected.iteritems():
if k in actual:
if isinstance(v, basestring) or \
isinstance(v, bool) or \
isinstance(v, (int, long)):
if v != actual[k]:
return "{}:{}".format(k, actual[k])
elif not v(actual[k]):
return "{}:{}".format(k, actual[k])
else:
return "key '{}' does not exist".format(k)
return None
def validate_relation_data(self, sentry_unit, relation, expected):
"""Validate actual relation data based on expected relation data."""
actual = sentry_unit.relation(relation[0], relation[1])
self.log.debug('actual: {}'.format(repr(actual)))
return self._validate_dict_data(expected, actual)
def _validate_list_data(self, expected, actual):
"""Compare expected list vs actual list data."""
for e in expected:
if e not in actual:
return "expected item {} not found in actual list".format(e)
return None
def not_null(self, string):
if string != None:
return True
else:
return False
def _get_file_mtime(self, sentry_unit, filename):
"""Get last modification time of file."""
return sentry_unit.file_stat(filename)['mtime']
def _get_dir_mtime(self, sentry_unit, directory):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
"""Determine start time of the process based on the last modification
time of the /proc/pid directory. If pgrep_full is True, the process
name is matched against the full command line."""
if pgrep_full:
cmd = 'pgrep -o -f {}'.format(service)
else:
cmd = 'pgrep -o {}'.format(service)
proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False):
"""Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted."""
sleep(10)
if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
self._get_file_mtime(sentry_unit, filename):
return True
else:
return False
def relation_error(self, name, data):
return 'unexpected relation data in {} - {}'.format(name, data)
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)

View File

@ -0,0 +1,55 @@
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, series=None, openstack=None, source=None):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
if self.source:
for svc in services:
if svc[name] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _get_openstack_release(self):
"""Return an integer representing the enum value of the openstack
release."""
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
self.precise_havana, self.precise_icehouse, \
self.trusty_icehouse = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,209 @@
import logging
import os
import time
import urllib
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint."""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if admin_port in ep.adminurl and internal_port in ep.internalurl \
and public_port in ep.publicurl:
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints."""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate a list of actual tenant data vs list of expected tenant
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate a list of actual role data vs a list of expected role
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate a list of actual user data vs a list of expected user
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate a list of actual flavors vs a list of expected flavors."""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists"""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
if not os.path.exists(cirros_img):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
f.close()
with open(cirros_img) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
glance.images.delete(image)
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status == 'BUILD':
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
nova.servers.delete(instance)

View File

@ -1,4 +1,4 @@
from mock import MagicMock, patch from mock import MagicMock, patch, call
from test_utils import CharmTestCase, patch_open from test_utils import CharmTestCase, patch_open
import os import os
with patch('charmhelpers.core.hookenv.config') as config: with patch('charmhelpers.core.hookenv.config') as config:
@ -11,7 +11,11 @@ _map = utils.restart_map
utils.register_configs = MagicMock() utils.register_configs = MagicMock()
utils.restart_map = MagicMock() utils.restart_map = MagicMock()
import nova_cc_hooks as hooks with patch('nova_cc_utils.guard_map') as gmap:
with patch('charmhelpers.core.hookenv.config') as config:
config.return_value = False
gmap.return_value = {}
import nova_cc_hooks as hooks
utils.register_configs = _reg utils.register_configs = _reg
utils.restart_map = _map utils.restart_map = _map
@ -35,8 +39,8 @@ TO_PATCH = [
'relation_set', 'relation_set',
'relation_ids', 'relation_ids',
'ssh_compute_add', 'ssh_compute_add',
'ssh_known_hosts_b64', 'ssh_known_hosts_lines',
'ssh_authorized_keys_b64', 'ssh_authorized_keys_lines',
'save_script_rc', 'save_script_rc',
'service_running', 'service_running',
'service_stop', 'service_stop',
@ -100,12 +104,60 @@ class NovaCCHooksTests(CharmTestCase):
self.test_relation.set({ self.test_relation.set({
'migration_auth_type': 'ssh', 'ssh_public_key': 'fookey', 'migration_auth_type': 'ssh', 'ssh_public_key': 'fookey',
'private-address': '10.0.0.1'}) 'private-address': '10.0.0.1'})
self.ssh_known_hosts_b64.return_value = 'hosts' self.ssh_known_hosts_lines.return_value = [
self.ssh_authorized_keys_b64.return_value = 'keys' 'k_h_0', 'k_h_1', 'k_h_2']
self.ssh_authorized_keys_lines.return_value = [
'auth_0', 'auth_1', 'auth_2']
hooks.compute_changed() hooks.compute_changed()
self.ssh_compute_add.assert_called_with('fookey') self.ssh_compute_add.assert_called_with('fookey', rid=None, unit=None)
self.relation_set.assert_called_with(known_hosts='hosts', expected_relations = [
authorized_keys='keys') call(relation_settings={'authorized_keys_0': 'auth_0'},
relation_id=None),
call(relation_settings={'authorized_keys_1': 'auth_1'},
relation_id=None),
call(relation_settings={'authorized_keys_2': 'auth_2'},
relation_id=None),
call(relation_settings={'known_hosts_0': 'k_h_0'},
relation_id=None),
call(relation_settings={'known_hosts_1': 'k_h_1'},
relation_id=None),
call(relation_settings={'known_hosts_2': 'k_h_2'},
relation_id=None),
call(authorized_keys_max_index=3, relation_id=None),
call(known_hosts_max_index=3, relation_id=None)]
self.assertEquals(sorted(self.relation_set.call_args_list),
sorted(expected_relations))
def test_compute_changed_nova_public_key(self):
self.test_relation.set({
'migration_auth_type': 'sasl', 'nova_ssh_public_key': 'fookey',
'private-address': '10.0.0.1'})
self.ssh_known_hosts_lines.return_value = [
'k_h_0', 'k_h_1', 'k_h_2']
self.ssh_authorized_keys_lines.return_value = [
'auth_0', 'auth_1', 'auth_2']
hooks.compute_changed()
self.ssh_compute_add.assert_called_with('fookey', user='nova',
rid=None, unit=None)
expected_relations = [
call(relation_settings={'nova_authorized_keys_0': 'auth_0'},
relation_id=None),
call(relation_settings={'nova_authorized_keys_1': 'auth_1'},
relation_id=None),
call(relation_settings={'nova_authorized_keys_2': 'auth_2'},
relation_id=None),
call(relation_settings={'nova_known_hosts_0': 'k_h_0'},
relation_id=None),
call(relation_settings={'nova_known_hosts_1': 'k_h_1'},
relation_id=None),
call(relation_settings={'nova_known_hosts_2': 'k_h_2'},
relation_id=None),
call(relation_settings={'nova_known_hosts_max_index': 3},
relation_id=None),
call(relation_settings={'nova_authorized_keys_max_index': 3},
relation_id=None)]
self.assertEquals(sorted(self.relation_set.call_args_list),
sorted(expected_relations))
@patch.object(utils, 'config') @patch.object(utils, 'config')
@patch.object(hooks, '_auth_config') @patch.object(hooks, '_auth_config')

View File

@ -35,7 +35,9 @@ TO_PATCH = [
'remote_unit', 'remote_unit',
'_save_script_rc', '_save_script_rc',
'service_start', 'service_start',
'services' 'services',
'service_running',
'service_stop'
] ]
SCRIPTRC_ENV_VARS = { SCRIPTRC_ENV_VARS = {
@ -405,8 +407,8 @@ class NovaCCUtilsTests(CharmTestCase):
check_output.return_value = 'fookey' check_output.return_value = 'fookey'
host_key.return_value = 'fookey_old' host_key.return_value = 'fookey_old'
with patch_open() as (_open, _file): with patch_open() as (_open, _file):
utils.add_known_host('foohost') utils.add_known_host('foohost', None, None)
rm.assert_called_with('foohost', None) rm.assert_called_with('foohost', None, None)
@patch.object(utils, 'known_hosts') @patch.object(utils, 'known_hosts')
@patch.object(utils, 'remove_known_host') @patch.object(utils, 'remove_known_host')
@ -439,19 +441,19 @@ class NovaCCUtilsTests(CharmTestCase):
def test_known_hosts(self, ssh_dir): def test_known_hosts(self, ssh_dir):
ssh_dir.return_value = '/tmp/foo' ssh_dir.return_value = '/tmp/foo'
self.assertEquals(utils.known_hosts(), '/tmp/foo/known_hosts') self.assertEquals(utils.known_hosts(), '/tmp/foo/known_hosts')
ssh_dir.assert_called_with(None) ssh_dir.assert_called_with(None, None)
self.assertEquals(utils.known_hosts('bar'), '/tmp/foo/known_hosts') self.assertEquals(utils.known_hosts('bar'), '/tmp/foo/known_hosts')
ssh_dir.assert_called_with('bar') ssh_dir.assert_called_with('bar', None)
@patch.object(utils, 'ssh_directory_for_unit') @patch.object(utils, 'ssh_directory_for_unit')
def test_authorized_keys(self, ssh_dir): def test_authorized_keys(self, ssh_dir):
ssh_dir.return_value = '/tmp/foo' ssh_dir.return_value = '/tmp/foo'
self.assertEquals(utils.authorized_keys(), '/tmp/foo/authorized_keys') self.assertEquals(utils.authorized_keys(), '/tmp/foo/authorized_keys')
ssh_dir.assert_called_with(None) ssh_dir.assert_called_with(None, None)
self.assertEquals( self.assertEquals(
utils.authorized_keys('bar'), utils.authorized_keys('bar'),
'/tmp/foo/authorized_keys') '/tmp/foo/authorized_keys')
ssh_dir.assert_called_with('bar') ssh_dir.assert_called_with('bar', None)
@patch.object(utils, 'known_hosts') @patch.object(utils, 'known_hosts')
@patch('subprocess.check_call') @patch('subprocess.check_call')
@ -508,7 +510,9 @@ class NovaCCUtilsTests(CharmTestCase):
self.is_relation_made.return_value = False self.is_relation_made.return_value = False
self.relation_ids.return_value = [] self.relation_ids.return_value = []
self.assertEquals( self.assertEquals(
BASE_ENDPOINTS, utils.determine_endpoints('http://foohost.com')) BASE_ENDPOINTS, utils.determine_endpoints('http://foohost.com',
'http://foohost.com',
'http://foohost.com'))
def test_determine_endpoints_nova_volume(self): def test_determine_endpoints_nova_volume(self):
self.is_relation_made.return_value = False self.is_relation_made.return_value = False
@ -524,7 +528,9 @@ class NovaCCUtilsTests(CharmTestCase):
'nova-volume_region': 'RegionOne', 'nova-volume_region': 'RegionOne',
'nova-volume_service': 'nova-volume'}) 'nova-volume_service': 'nova-volume'})
self.assertEquals( self.assertEquals(
endpoints, utils.determine_endpoints('http://foohost.com')) endpoints, utils.determine_endpoints('http://foohost.com',
'http://foohost.com',
'http://foohost.com'))
def test_determine_endpoints_quantum_neutron(self): def test_determine_endpoints_quantum_neutron(self):
self.is_relation_made.return_value = False self.is_relation_made.return_value = False
@ -538,7 +544,9 @@ class NovaCCUtilsTests(CharmTestCase):
'quantum_region': 'RegionOne', 'quantum_region': 'RegionOne',
'quantum_service': 'quantum'}) 'quantum_service': 'quantum'})
self.assertEquals( self.assertEquals(
endpoints, utils.determine_endpoints('http://foohost.com')) endpoints, utils.determine_endpoints('http://foohost.com',
'http://foohost.com',
'http://foohost.com'))
def test_determine_endpoints_neutron_api_rel(self): def test_determine_endpoints_neutron_api_rel(self):
self.is_relation_made.return_value = True self.is_relation_made.return_value = True
@ -552,7 +560,9 @@ class NovaCCUtilsTests(CharmTestCase):
'quantum_region': None, 'quantum_region': None,
'quantum_service': None}) 'quantum_service': None})
self.assertEquals( self.assertEquals(
endpoints, utils.determine_endpoints('http://foohost.com')) endpoints, utils.determine_endpoints('http://foohost.com',
'http://foohost.com',
'http://foohost.com'))
@patch.object(utils, 'known_hosts') @patch.object(utils, 'known_hosts')
@patch('subprocess.check_output') @patch('subprocess.check_output')
@ -562,9 +572,9 @@ class NovaCCUtilsTests(CharmTestCase):
_check_output.assert_called_with( _check_output.assert_called_with(
['ssh-keygen', '-f', '/foo/known_hosts', ['ssh-keygen', '-f', '/foo/known_hosts',
'-H', '-F', 'test']) '-H', '-F', 'test'])
_known_hosts.assert_called_with(None) _known_hosts.assert_called_with(None, None)
utils.ssh_known_host_key('test', 'bar') utils.ssh_known_host_key('test', 'bar')
_known_hosts.assert_called_with('bar') _known_hosts.assert_called_with('bar', None)
@patch.object(utils, 'known_hosts') @patch.object(utils, 'known_hosts')
@patch('subprocess.check_call') @patch('subprocess.check_call')
@ -574,9 +584,9 @@ class NovaCCUtilsTests(CharmTestCase):
_check_call.assert_called_with( _check_call.assert_called_with(
['ssh-keygen', '-f', '/foo/known_hosts', ['ssh-keygen', '-f', '/foo/known_hosts',
'-R', 'test']) '-R', 'test'])
_known_hosts.assert_called_with(None) _known_hosts.assert_called_with(None, None)
utils.remove_known_host('test', 'bar') utils.remove_known_host('test', 'bar')
_known_hosts.assert_called_with('bar') _known_hosts.assert_called_with('bar', None)
@patch('subprocess.check_output') @patch('subprocess.check_output')
def test_migrate_database(self, check_output): def test_migrate_database(self, check_output):
@ -656,3 +666,115 @@ class NovaCCUtilsTests(CharmTestCase):
utils.do_openstack_upgrade() utils.do_openstack_upgrade()
expected = [call('cloud:precise-icehouse')] expected = [call('cloud:precise-icehouse')]
self.assertEquals(_do_openstack_upgrade.call_args_list, expected) self.assertEquals(_do_openstack_upgrade.call_args_list, expected)
def test_guard_map_nova(self):
self.relation_ids.return_value = []
self.os_release.return_value = 'havana'
self.assertEqual(
{'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'],
'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'],
'nova-cert': ['identity-service', 'amqp', 'shared-db'],
'nova-conductor': ['identity-service', 'amqp', 'shared-db'],
'nova-objectstore': ['identity-service', 'amqp', 'shared-db'],
'nova-scheduler': ['identity-service', 'amqp', 'shared-db']},
utils.guard_map()
)
self.os_release.return_value = 'essex'
self.assertEqual(
{'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'],
'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'],
'nova-cert': ['identity-service', 'amqp', 'shared-db'],
'nova-objectstore': ['identity-service', 'amqp', 'shared-db'],
'nova-scheduler': ['identity-service', 'amqp', 'shared-db']},
utils.guard_map()
)
def test_guard_map_neutron(self):
self.relation_ids.return_value = []
self.network_manager.return_value = 'neutron'
self.os_release.return_value = 'icehouse'
self.is_relation_made.return_value = False
self.assertEqual(
{'neutron-server': ['identity-service', 'amqp', 'shared-db'],
'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'],
'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'],
'nova-cert': ['identity-service', 'amqp', 'shared-db'],
'nova-conductor': ['identity-service', 'amqp', 'shared-db'],
'nova-objectstore': ['identity-service', 'amqp', 'shared-db'],
'nova-scheduler': ['identity-service', 'amqp', 'shared-db'], },
utils.guard_map()
)
self.network_manager.return_value = 'quantum'
self.os_release.return_value = 'grizzly'
self.assertEqual(
{'quantum-server': ['identity-service', 'amqp', 'shared-db'],
'nova-api-ec2': ['identity-service', 'amqp', 'shared-db'],
'nova-api-os-compute': ['identity-service', 'amqp', 'shared-db'],
'nova-cert': ['identity-service', 'amqp', 'shared-db'],
'nova-conductor': ['identity-service', 'amqp', 'shared-db'],
'nova-objectstore': ['identity-service', 'amqp', 'shared-db'],
'nova-scheduler': ['identity-service', 'amqp', 'shared-db'], },
utils.guard_map()
)
def test_guard_map_pgsql(self):
self.relation_ids.return_value = ['pgsql:1']
self.network_manager.return_value = 'neutron'
self.is_relation_made.return_value = False
self.os_release.return_value = 'icehouse'
self.assertEqual(
{'neutron-server': ['identity-service', 'amqp',
'pgsql-neutron-db'],
'nova-api-ec2': ['identity-service', 'amqp', 'pgsql-nova-db'],
'nova-api-os-compute': ['identity-service', 'amqp',
'pgsql-nova-db'],
'nova-cert': ['identity-service', 'amqp', 'pgsql-nova-db'],
'nova-conductor': ['identity-service', 'amqp', 'pgsql-nova-db'],
'nova-objectstore': ['identity-service', 'amqp',
'pgsql-nova-db'],
'nova-scheduler': ['identity-service', 'amqp',
'pgsql-nova-db'], },
utils.guard_map()
)
def test_service_guard_inactive(self):
'''Ensure that if disabled, service guards nothing'''
contexts = MagicMock()
@utils.service_guard({'test': ['interfacea', 'interfaceb']},
contexts, False)
def dummy_func():
pass
dummy_func()
self.assertFalse(self.service_running.called)
self.assertFalse(contexts.complete_contexts.called)
def test_service_guard_active_guard(self):
'''Ensure services with incomplete interfaces are stopped'''
contexts = MagicMock()
contexts.complete_contexts.return_value = ['interfacea']
self.service_running.return_value = True
@utils.service_guard({'test': ['interfacea', 'interfaceb']},
contexts, True)
def dummy_func():
pass
dummy_func()
self.service_running.assert_called_with('test')
self.service_stop.assert_called_with('test')
self.assertTrue(contexts.complete_contexts.called)
def test_service_guard_active_release(self):
'''Ensure services with complete interfaces are not stopped'''
contexts = MagicMock()
contexts.complete_contexts.return_value = ['interfacea',
'interfaceb']
@utils.service_guard({'test': ['interfacea', 'interfaceb']},
contexts, True)
def dummy_func():
pass
dummy_func()
self.assertFalse(self.service_running.called)
self.assertFalse(self.service_stop.called)
self.assertTrue(contexts.complete_contexts.called)

View File

@ -82,9 +82,9 @@ class TestConfig(object):
return self.config return self.config
def set(self, attr, value): def set(self, attr, value):
if attr not in self.config: if attr not in self.config:
raise KeyError raise KeyError
self.config[attr] = value self.config[attr] = value
class TestRelation(object): class TestRelation(object):