[corey.bryant,r=james-page] Add amulet tests.

This commit is contained in:
James Page 2014-07-28 12:54:32 +01:00
commit 7c2e1eccfd
36 changed files with 1588 additions and 72 deletions

View File

@ -3,15 +3,23 @@ PYTHON := /usr/bin/env python
lint: lint:
@flake8 --exclude hooks/charmhelpers hooks @flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers unit_tests @flake8 --exclude hooks/charmhelpers unit_tests tests
@charm proof @charm proof
test: unit_test:
@echo Starting tests... @echo Starting unit tests...
@$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests
test:
@echo Starting Amulet tests...
# coreycb note: The -v should only be temporary until Amulet sends
# raise_status() messages to stderr:
# https://bugs.launchpad.net/amulet/+bug/1320357
@juju test -v -p AMULET_HTTP_PROXY
sync: sync:
@charm-helper-sync -c charm-helpers.yaml @charm-helper-sync -c charm-helpers-hooks.yaml
@charm-helper-sync -c charm-helpers-tests.yaml
publish: lint test publish: lint test
bzr push lp:charms/swift-storage bzr push lp:charms/swift-storage

5
charm-helpers-tests.yaml Normal file
View File

@ -0,0 +1,5 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
- contrib.amulet
- contrib.openstack.amulet

View File

@ -146,12 +146,12 @@ def get_hacluster_config():
Obtains all relevant configuration from charm configuration required Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster: for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr ha-bindiface, ha-mcastport, vip
returns: dict: A dict containing settings keyed by setting name. returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing. raises: HAIncompleteConfig if settings are missing.
''' '''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] settings = ['ha-bindiface', 'ha-mcastport', 'vip']
conf = {} conf = {}
for setting in settings: for setting in settings:
conf[setting] = config_get(setting) conf[setting] = config_get(setting)
@ -170,6 +170,7 @@ def canonical_url(configs, vip_setting='vip'):
:configs : OSTemplateRenderer: A config tempating object to inspect for :configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context. a complete https context.
:vip_setting: str: Setting in charm config that specifies :vip_setting: str: Setting in charm config that specifies
VIP address. VIP address.
''' '''

View File

@ -0,0 +1,55 @@
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, series=None, openstack=None, source=None):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
if self.source:
for svc in services:
if svc[name] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _get_openstack_release(self):
"""Return an integer representing the enum value of the openstack
release."""
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
self.precise_havana, self.precise_icehouse, \
self.trusty_icehouse = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,209 @@
import logging
import os
import time
import urllib
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint."""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if admin_port in ep.adminurl and internal_port in ep.internalurl \
and public_port in ep.publicurl:
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints."""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate a list of actual tenant data vs list of expected tenant
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate a list of actual role data vs a list of expected role
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate a list of actual user data vs a list of expected user
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate a list of actual flavors vs a list of expected flavors."""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists"""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
if not os.path.exists(cirros_img):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
f.close()
with open(cirros_img) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
glance.images.delete(image)
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status == 'BUILD':
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
nova.servers.delete(instance)

View File

@ -21,9 +21,11 @@ from charmhelpers.core.hookenv import (
relation_get, relation_get,
relation_ids, relation_ids,
related_units, related_units,
relation_set,
unit_get, unit_get,
unit_private_ip, unit_private_ip,
ERROR, ERROR,
INFO
) )
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
@ -42,6 +44,8 @@ from charmhelpers.contrib.openstack.neutron import (
neutron_plugin_attribute, neutron_plugin_attribute,
) )
from charmhelpers.contrib.network.ip import get_address_in_network
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -134,8 +138,26 @@ class SharedDBContext(OSContextGenerator):
'Missing required charm config options. ' 'Missing required charm config options. '
'(database name and user)') '(database name and user)')
raise OSContextError raise OSContextError
ctxt = {} ctxt = {}
# NOTE(jamespage) if mysql charm provides a network upon which
# access to the database should be made, reconfigure relation
# with the service units local address and defer execution
access_network = relation_get('access-network')
if access_network is not None:
if self.relation_prefix is not None:
hostname_key = "{}_hostname".format(self.relation_prefix)
else:
hostname_key = "hostname"
access_hostname = get_address_in_network(access_network,
unit_get('private-address'))
set_hostname = relation_get(attribute=hostname_key,
unit=local_unit())
if set_hostname != access_hostname:
relation_set(relation_settings={hostname_key: access_hostname})
return ctxt # Defer any further hook execution for now....
password_setting = 'password' password_setting = 'password'
if self.relation_prefix: if self.relation_prefix:
password_setting = self.relation_prefix + '_password' password_setting = self.relation_prefix + '_password'
@ -243,23 +265,31 @@ class IdentityServiceContext(OSContextGenerator):
class AMQPContext(OSContextGenerator): class AMQPContext(OSContextGenerator):
interfaces = ['amqp']
def __init__(self, ssl_dir=None): def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
self.ssl_dir = ssl_dir self.ssl_dir = ssl_dir
self.rel_name = rel_name
self.relation_prefix = relation_prefix
self.interfaces = [rel_name]
def __call__(self): def __call__(self):
log('Generating template context for amqp') log('Generating template context for amqp')
conf = config() conf = config()
user_setting = 'rabbit-user'
vhost_setting = 'rabbit-vhost'
if self.relation_prefix:
user_setting = self.relation_prefix + '-rabbit-user'
vhost_setting = self.relation_prefix + '-rabbit-vhost'
try: try:
username = conf['rabbit-user'] username = conf[user_setting]
vhost = conf['rabbit-vhost'] vhost = conf[vhost_setting]
except KeyError as e: except KeyError as e:
log('Could not generate shared_db context. ' log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e) 'Missing required charm config options: %s.' % e)
raise OSContextError raise OSContextError
ctxt = {} ctxt = {}
for rid in relation_ids('amqp'): for rid in relation_ids(self.rel_name):
ha_vip_only = False ha_vip_only = False
for unit in related_units(rid): for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit): if relation_get('clustered', rid=rid, unit=unit):
@ -332,10 +362,12 @@ class CephContext(OSContextGenerator):
use_syslog = str(config('use-syslog')).lower() use_syslog = str(config('use-syslog')).lower()
for rid in relation_ids('ceph'): for rid in relation_ids('ceph'):
for unit in related_units(rid): for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit) auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit)
ceph_addr = \
relation_get('ceph-public-address', rid=rid, unit=unit) or \
relation_get('private-address', rid=rid, unit=unit)
mon_hosts.append(ceph_addr)
ctxt = { ctxt = {
'mon_hosts': ' '.join(mon_hosts), 'mon_hosts': ' '.join(mon_hosts),
@ -369,7 +401,9 @@ class HAProxyContext(OSContextGenerator):
cluster_hosts = {} cluster_hosts = {}
l_unit = local_unit().replace('/', '-') l_unit = local_unit().replace('/', '-')
cluster_hosts[l_unit] = unit_get('private-address') cluster_hosts[l_unit] = \
get_address_in_network(config('os-internal-network'),
unit_get('private-address'))
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in related_units(rid):
@ -418,12 +452,13 @@ class ApacheSSLContext(OSContextGenerator):
""" """
Generates a context for an apache vhost configuration that configures Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context HTTPS reverse proxying for one or many endpoints. Generated context
looks something like: looks something like::
{
'namespace': 'cinder', {
'private_address': 'iscsi.mycinderhost.com', 'namespace': 'cinder',
'endpoints': [(8776, 8766), (8777, 8767)] 'private_address': 'iscsi.mycinderhost.com',
} 'endpoints': [(8776, 8766), (8777, 8767)]
}
The endpoints list consists of a tuples mapping external ports The endpoints list consists of a tuples mapping external ports
to internal ports. to internal ports.
@ -541,6 +576,26 @@ class NeutronContext(OSContextGenerator):
return nvp_ctxt return nvp_ctxt
def n1kv_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
n1kv_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'n1kv',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': n1kv_config,
'vsm_ip': config('n1kv-vsm-ip'),
'vsm_username': config('n1kv-vsm-username'),
'vsm_password': config('n1kv-vsm-password'),
'restrict_policy_profiles': config(
'n1kv_restrict_policy_profiles'),
}
return n1kv_ctxt
def neutron_ctxt(self): def neutron_ctxt(self):
if https(): if https():
proto = 'https' proto = 'https'
@ -572,6 +627,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.ovs_ctxt()) ctxt.update(self.ovs_ctxt())
elif self.plugin in ['nvp', 'nsx']: elif self.plugin in ['nvp', 'nsx']:
ctxt.update(self.nvp_ctxt()) ctxt.update(self.nvp_ctxt())
elif self.plugin == 'n1kv':
ctxt.update(self.n1kv_ctxt())
alchemy_flags = config('neutron-alchemy-flags') alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags: if alchemy_flags:
@ -611,7 +668,7 @@ class SubordinateConfigContext(OSContextGenerator):
The subordinate interface allows subordinates to export their The subordinate interface allows subordinates to export their
configuration requirements to the principle for multiple config configuration requirements to the principle for multiple config
files and multiple serivces. Ie, a subordinate that has interfaces files and multiple serivces. Ie, a subordinate that has interfaces
to both glance and nova may export to following yaml blob as json: to both glance and nova may export to following yaml blob as json::
glance: glance:
/etc/glance/glance-api.conf: /etc/glance/glance-api.conf:
@ -630,7 +687,8 @@ class SubordinateConfigContext(OSContextGenerator):
It is then up to the principle charms to subscribe this context to It is then up to the principle charms to subscribe this context to
the service+config file it is interestd in. Configuration data will the service+config file it is interestd in. Configuration data will
be available in the template context, in glance's case, as: be available in the template context, in glance's case, as::
ctxt = { ctxt = {
... other context ... ... other context ...
'subordinate_config': { 'subordinate_config': {
@ -657,7 +715,7 @@ class SubordinateConfigContext(OSContextGenerator):
self.interface = interface self.interface = interface
def __call__(self): def __call__(self):
ctxt = {} ctxt = {'sections': {}}
for rid in relation_ids(self.interface): for rid in relation_ids(self.interface):
for unit in related_units(rid): for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration', sub_config = relation_get('subordinate_configuration',
@ -683,14 +741,29 @@ class SubordinateConfigContext(OSContextGenerator):
sub_config = sub_config[self.config_file] sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems(): for k, v in sub_config.iteritems():
ctxt[k] = v if k == 'sections':
for section, config_dict in v.iteritems():
log("adding section '%s'" % (section))
ctxt[k][section] = config_dict
else:
ctxt[k] = v
if not ctxt: log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
ctxt['sections'] = {}
return ctxt return ctxt
class LogLevelContext(OSContextGenerator):
def __call__(self):
ctxt = {}
ctxt['debug'] = \
False if config('debug') is None else config('debug')
ctxt['verbose'] = \
False if config('verbose') is None else config('verbose')
return ctxt
class SyslogContext(OSContextGenerator): class SyslogContext(OSContextGenerator):
def __call__(self): def __call__(self):

View File

@ -0,0 +1,75 @@
from charmhelpers.core.hookenv import (
config,
unit_get,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
_address_map = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:configs OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:endpoint_type str: The endpoint type to resolve.
:returns str: Base URL for services on the current service unit.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def resolve_address(endpoint_type=PUBLIC):
resolved_address = None
if is_clustered():
if config(_address_map[endpoint_type]['config']) is None:
# Assume vip is simple and pass back directly
resolved_address = config('vip')
else:
for vip in config('vip').split():
if is_address_in_network(
config(_address_map[endpoint_type]['config']),
vip):
resolved_address = vip
else:
resolved_address = get_address_in_network(
config(_address_map[endpoint_type]['config']),
unit_get(_address_map[endpoint_type]['fallback'])
)
if resolved_address is None:
raise ValueError('Unable to resolve a suitable IP address'
' based on charm state and configuration')
else:
return resolved_address

View File

@ -128,6 +128,20 @@ def neutron_plugins():
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
'neutron-plugin-vmware'], 'neutron-plugin-vmware'],
'server_services': ['neutron-server'] 'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
} }
} }
if release >= 'icehouse': if release >= 'icehouse':

View File

@ -27,7 +27,12 @@ listen stats :8888
{% if units -%} {% if units -%}
{% for service, ports in service_ports.iteritems() -%} {% for service, ports in service_ports.iteritems() -%}
listen {{ service }} 0.0.0.0:{{ ports[0] }} listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
listen {{ service }}_ipv6 :::{{ ports[0] }}
balance roundrobin balance roundrobin
{% for unit, address in units.iteritems() -%} {% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check server {{ unit }} {{ address }}:{{ ports[1] }} check

View File

@ -30,17 +30,17 @@ def get_loader(templates_dir, os_release):
loading dir. loading dir.
A charm may also ship a templates dir with this module A charm may also ship a templates dir with this module
and it will be appended to the bottom of the search list, eg: and it will be appended to the bottom of the search list, eg::
hooks/charmhelpers/contrib/openstack/templates.
:param templates_dir: str: Base template directory containing release hooks/charmhelpers/contrib/openstack/templates
sub-directories.
:param os_release : str: OpenStack release codename to construct template
loader.
:returns : jinja2.ChoiceLoader constructed with a list of :param templates_dir (str): Base template directory containing release
jinja2.FilesystemLoaders, ordered in descending sub-directories.
order by OpenStack release. :param os_release (str): OpenStack release codename to construct template
loader.
:returns: jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
""" """
tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()] for rel in OPENSTACK_CODENAMES.itervalues()]
@ -111,7 +111,8 @@ class OSConfigRenderer(object):
and ease the burden of managing config templates across multiple OpenStack and ease the burden of managing config templates across multiple OpenStack
releases. releases.
Basic usage: Basic usage::
# import some common context generates from charmhelpers # import some common context generates from charmhelpers
from charmhelpers.contrib.openstack import context from charmhelpers.contrib.openstack import context
@ -131,21 +132,19 @@ class OSConfigRenderer(object):
# write out all registered configs # write out all registered configs
configs.write_all() configs.write_all()
Details: **OpenStack Releases and template loading**
OpenStack Releases and template loading
---------------------------------------
When the object is instantiated, it is associated with a specific OS When the object is instantiated, it is associated with a specific OS
release. This dictates how the template loader will be constructed. release. This dictates how the template loader will be constructed.
The constructed loader attempts to load the template from several places The constructed loader attempts to load the template from several places
in the following order: in the following order:
- from the most recent OS release-specific template dir (if one exists) - from the most recent OS release-specific template dir (if one exists)
- the base templates_dir - the base templates_dir
- a template directory shipped in the charm with this helper file. - a template directory shipped in the charm with this helper file.
For the example above, '/tmp/templates' contains the following structure::
For the example above, '/tmp/templates' contains the following structure:
/tmp/templates/nova.conf /tmp/templates/nova.conf
/tmp/templates/api-paste.ini /tmp/templates/api-paste.ini
/tmp/templates/grizzly/api-paste.ini /tmp/templates/grizzly/api-paste.ini
@ -169,8 +168,8 @@ class OSConfigRenderer(object):
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
us to ship common templates (haproxy, apache) with the helpers. us to ship common templates (haproxy, apache) with the helpers.
Context generators **Context generators**
---------------------------------------
Context generators are used to generate template contexts during hook Context generators are used to generate template contexts during hook
execution. Doing so may require inspecting service relations, charm execution. Doing so may require inspecting service relations, charm
config, etc. When registered, a config file is associated with a list config, etc. When registered, a config file is associated with a list

View File

@ -84,6 +84,8 @@ def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.''' '''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = '' rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed']: if src in ['distro', 'distro-proposed']:
try: try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@ -189,7 +191,7 @@ def get_os_version_package(pkg, fatal=True):
for version, cname in vers_map.iteritems(): for version, cname in vers_map.iteritems():
if cname == codename: if cname == codename:
return version return version
#e = "Could not determine OpenStack version for package: %s" % pkg # e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e) # error_out(e)

View File

@ -303,7 +303,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]): blk_device, fstype, system_services=[]):
""" """
NOTE: This function must only be called from a single service unit for NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur. the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device, Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point. and the device is formatted and mounted at the given mount_point.

View File

@ -37,6 +37,7 @@ def zap_disk(block_device):
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
def is_device_mounted(device): def is_device_mounted(device):
'''Given a device path, return True if that device is mounted, and False '''Given a device path, return True if that device is mounted, and False
if it isn't. if it isn't.

View File

@ -25,7 +25,7 @@ cache = {}
def cached(func): def cached(func):
"""Cache return values for multiple executions of func + args """Cache return values for multiple executions of func + args
For example: For example::
@cached @cached
def unit_get(attribute): def unit_get(attribute):
@ -445,18 +445,19 @@ class UnregisteredHookError(Exception):
class Hooks(object): class Hooks(object):
"""A convenient handler for hook functions. """A convenient handler for hook functions.
Example: Example::
hooks = Hooks() hooks = Hooks()
# register a hook, taking its name from the function name # register a hook, taking its name from the function name
@hooks.hook() @hooks.hook()
def install(): def install():
... pass # your code here
# register a hook, providing a custom hook name # register a hook, providing a custom hook name
@hooks.hook("config-changed") @hooks.hook("config-changed")
def config_changed(): def config_changed():
... pass # your code here
if __name__ == "__main__": if __name__ == "__main__":
# execute a hook based on the name the program is called by # execute a hook based on the name the program is called by

View File

@ -211,13 +211,13 @@ def file_hash(path):
def restart_on_change(restart_map, stopstart=False): def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing """Restart services based on configuration files changing
This function is used a decorator, for example This function is used a decorator, for example::
@restart_on_change({ @restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
}) })
def ceph_client_changed(): def ceph_client_changed():
... pass # your code here
In this example, the cinder-api and cinder-volume services In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the would be restarted if /etc/ceph/ceph.conf is changed by the
@ -313,13 +313,19 @@ def get_nic_hwaddr(nic):
def cmp_pkgrevno(package, revno, pkgcache=None): def cmp_pkgrevno(package, revno, pkgcache=None):
'''Compare supplied revno with the revno of the installed package '''Compare supplied revno with the revno of the installed package
1 => Installed revno is greater than supplied arg
0 => Installed revno is the same as supplied arg * 1 => Installed revno is greater than supplied arg
-1 => Installed revno is less than supplied arg * 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
''' '''
import apt_pkg import apt_pkg
if not pkgcache: if not pkgcache:
apt_pkg.init() apt_pkg.init()
# Force Apt to build its cache in memory. That way we avoid race
# conditions with other applications building the cache in the same
# place.
apt_pkg.config.set("Dir::Cache::pkgcache", "")
pkgcache = apt_pkg.Cache() pkgcache = apt_pkg.Cache()
pkg = pkgcache[package] pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -235,31 +235,39 @@ def configure_sources(update=False,
sources_var='install_sources', sources_var='install_sources',
keys_var='install_keys'): keys_var='install_keys'):
""" """
Configure multiple sources from charm configuration Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The frament needs to be included as a string.
Example config: Example config:
install_sources: install_sources: |
- "ppa:foo" - "ppa:foo"
- "http://example.com/repo precise main" - "http://example.com/repo precise main"
install_keys: install_keys: |
- null - null
- "a1b2c3d4" - "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted. Note that 'null' (a.k.a. None) should not be quoted.
""" """
sources = safe_load(config(sources_var)) sources = safe_load((config(sources_var) or '').strip()) or []
keys = config(keys_var) keys = safe_load((config(keys_var) or '').strip()) or None
if keys is not None:
keys = safe_load(keys) if isinstance(sources, basestring):
if isinstance(sources, basestring) and ( sources = [sources]
keys is None or isinstance(keys, basestring)):
add_source(sources, keys) if keys is None:
for source in sources:
add_source(source, None)
else: else:
if not len(sources) == len(keys): if isinstance(keys, basestring):
msg = 'Install sources and keys lists are different lengths' keys = [keys]
raise SourceConfigError(msg)
for src_num in range(len(sources)): if len(sources) != len(keys):
add_source(sources[src_num], keys[src_num]) raise SourceConfigError(
'Install sources and keys lists are different lengths')
for source, key in zip(sources, keys):
add_source(source, key)
if update: if update:
apt_update(fatal=True) apt_update(fatal=True)

11
tests/00-setup Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet
sudo apt-get install --yes python-swiftclient
sudo apt-get install --yes python-glanceclient
sudo apt-get install --yes python-keystoneclient
sudo apt-get install --yes python-novaclient

9
tests/10-basic-precise-essex Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic swift-storage deployment on precise-essex."""
from basic_deployment import SwiftStorageBasicDeployment
if __name__ == '__main__':
deployment = SwiftStorageBasicDeployment(series='precise')
deployment.run_tests()

11
tests/11-basic-precise-folsom Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic swift-storage deployment on precise-folsom."""
from basic_deployment import SwiftStorageBasicDeployment
if __name__ == '__main__':
deployment = SwiftStorageBasicDeployment(series='precise',
openstack='cloud:precise-folsom',
source='cloud:precise-updates/folsom')
deployment.run_tests()

11
tests/12-basic-precise-grizzly Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic swift-storage deployment on precise-grizzly."""
from basic_deployment import SwiftStorageBasicDeployment
if __name__ == '__main__':
deployment = SwiftStorageBasicDeployment(series='precise',
openstack='cloud:precise-grizzly',
source='cloud:precise-updates/grizzly')
deployment.run_tests()

11
tests/13-basic-precise-havana Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic swift-storage deployment on precise-havana."""
from basic_deployment import SwiftStorageBasicDeployment
if __name__ == '__main__':
deployment = SwiftStorageBasicDeployment(series='precise',
openstack='cloud:precise-havana',
source='cloud:precise-updates/havana')
deployment.run_tests()

11
tests/14-basic-precise-icehouse Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic swift-storage deployment on precise-icehouse."""
from basic_deployment import SwiftStorageBasicDeployment
if __name__ == '__main__':
deployment = SwiftStorageBasicDeployment(series='precise',
openstack='cloud:precise-icehouse',
source='cloud:precise-updates/icehouse')
deployment.run_tests()

9
tests/15-basic-trusty-icehouse Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic swift-storage deployment on trusty-icehouse."""
from basic_deployment import SwiftStorageBasicDeployment
if __name__ == '__main__':
deployment = SwiftStorageBasicDeployment(series='trusty')
deployment.run_tests()

52
tests/README Normal file
View File

@ -0,0 +1,52 @@
This directory provides Amulet tests that focus on verification of swift-storage
deployments.
If you use a web proxy server to access the web, you'll need to set the
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
The following examples demonstrate different ways that tests can be executed.
All examples are run from the charm's root directory.
* To run all tests (starting with 00-setup):
make test
* To run a specific test module (or modules):
juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To run a specific test module (or modules), and keep the environment
deployed after a failure:
juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To re-run a test module against an already deployed environment (one
that was deployed by a previous call to 'juju test --set-e'):
./tests/15-basic-trusty-icehouse
For debugging and test development purposes, all code should be idempotent.
In other words, the code should have the ability to be re-run without changing
the results beyond the initial run. This enables editing and re-running of a
test module against an already deployed environment, as described above.
Manual debugging tips:
* Set the following env vars before using the OpenStack CLI as admin:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=openstack
export OS_REGION_NAME=RegionOne
* Set the following env vars before using the OpenStack CLI as demoUser:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=demoTenant
export OS_USERNAME=demoUser
export OS_PASSWORD=password
export OS_REGION_NAME=RegionOne
* Sample swift command:
swift -A $OS_AUTH_URL --os-tenant-name services --os-username swift \
--os-password password list
(where tenant/user names and password are in swift-proxy's nova.conf file)

450
tests/basic_deployment.py Normal file
View File

@ -0,0 +1,450 @@
#!/usr/bin/python
import amulet
import swiftclient
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG, # flake8: noqa
ERROR
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(ERROR)
class SwiftStorageBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic swift-storage deployment."""
def __init__(self, series, openstack=None, source=None):
"""Deploy the entire test environment."""
super(SwiftStorageBasicDeployment, self).__init__(series, openstack,
source)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
self._initialize_tests()
def _add_services(self):
"""Add the service that we're testing, including the number of units,
where swift-storage is local, and the other charms are from
the charm store."""
this_service = ('swift-storage', 1)
other_services = [('mysql', 1),
('keystone', 1), ('glance', 1), ('swift-proxy', 1)]
super(SwiftStorageBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'keystone:shared-db': 'mysql:shared-db',
'swift-proxy:identity-service': 'keystone:identity-service',
'swift-storage:swift-storage': 'swift-proxy:swift-storage',
'glance:identity-service': 'keystone:identity-service',
'glance:shared-db': 'mysql:shared-db',
'glance:object-store': 'swift-proxy:object-store'
}
super(SwiftStorageBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting'}
swift_proxy_config = {'zone-assignment': 'manual',
'replicas': '1',
'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae',
'use-https': 'no'}
swift_storage_config = {'zone': '1',
'block-device': 'vdb',
'overwrite': 'true'}
configs = {'keystone': keystone_config,
'swift-proxy': swift_proxy_config,
'swift-storage': swift_storage_config}
super(SwiftStorageBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.glance_sentry = self.d.sentry.unit['glance/0']
self.swift_proxy_sentry = self.d.sentry.unit['swift-proxy/0']
self.swift_storage_sentry = self.d.sentry.unit['swift-storage/0']
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone)
# Authenticate swift user
keystone_relation = self.keystone_sentry.relation('identity-service',
'swift-proxy:identity-service')
ep = self.keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
self.swift = swiftclient.Connection(authurl=ep,
user=keystone_relation['service_username'],
key=keystone_relation['service_password'],
tenant_name=keystone_relation['service_tenant'],
auth_version='2.0')
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='demo@demo.com')
# Authenticate demo user with keystone
self.keystone_demo = \
u.authenticate_keystone_user(self.keystone, user=self.demo_user,
password='password',
tenant=self.demo_tenant)
def test_services(self):
"""Verify the expected services are running on the corresponding
service units."""
swift_storage_services = ['status swift-account',
'status swift-account-auditor',
'status swift-account-reaper',
'status swift-account-replicator',
'status swift-container',
'status swift-container-auditor',
'status swift-container-replicator',
'status swift-container-updater',
'status swift-object',
'status swift-object-auditor',
'status swift-object-replicator',
'status swift-object-updater']
if self._get_openstack_release() >= self.precise_icehouse:
swift_storage_services.append('status swift-container-sync')
commands = {
self.mysql_sentry: ['status mysql'],
self.keystone_sentry: ['status keystone'],
self.glance_sentry: ['status glance-registry', 'status glance-api'],
self.swift_proxy_sentry: ['status swift-proxy'],
self.swift_storage_sentry: swift_storage_services
}
ret = u.validate_services(commands)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_users(self):
"""Verify all existing roles."""
user1 = {'name': 'demoUser',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'demo@demo.com'}
user2 = {'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'}
user3 = {'name': 'glance',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': u'juju@localhost'}
user4 = {'name': 'swift',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': u'juju@localhost'}
expected = [user1, user2, user3, user4]
actual = self.keystone.users.list()
ret = u.validate_user_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
endpoint_vol = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
endpoint_id = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
if self._get_openstack_release() >= self.precise_folsom:
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null
expected = {'image': [endpoint_id], 'object-store': [endpoint_id],
'identity': [endpoint_id]}
actual = self.keystone_demo.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_openstack_object_store_endpoint(self):
"""Verify the swift object-store endpoint data."""
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8080'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'object-store endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_swift_storage_swift_storage_relation(self):
"""Verify the swift-storage to swift-proxy swift-storage relation
data."""
unit = self.swift_storage_sentry
relation = ['swift-storage', 'swift-proxy:swift-storage']
expected = {
'account_port': '6002',
'zone': '1',
'object_port': '6000',
'container_port': '6001',
'private-address': u.valid_ip,
'device': 'vdb'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('swift-storage swift-storage', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_swift_proxy_swift_storage_relation(self):
"""Verify the swift-proxy to swift-storage swift-storage relation
data."""
unit = self.swift_proxy_sentry
relation = ['swift-storage', 'swift-storage:swift-storage']
expected = {
'private-address': u.valid_ip,
'trigger': u.not_null,
'rings_url': u.valid_url,
'swift_hash': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('swift-proxy swift-storage', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed."""
# NOTE(coreycb): Skipping failing test on until resolved. This test
# fails because the config file's last mod time is
# slightly after the process' last mod time.
if self._get_openstack_release() >= self.precise_essex:
u.log.error("Skipping failing test until resolved")
return
services = {'swift-account-server': 'account-server.conf',
'swift-account-auditor': 'account-server.conf',
'swift-account-reaper': 'account-server.conf',
'swift-account-replicator': 'account-server.conf',
'swift-container-server': 'container-server.conf',
'swift-container-auditor': 'container-server.conf',
'swift-container-replicator': 'container-server.conf',
'swift-container-updater': 'container-server.conf',
'swift-object-server': 'object-server.conf',
'swift-object-auditor': 'object-server.conf',
'swift-object-replicator': 'object-server.conf',
'swift-object-updater': 'object-server.conf'}
if self._get_openstack_release() >= self.precise_icehouse:
services['swift-container-sync'] = 'container-server.conf'
self.d.configure('swift-storage',
{'object-server-threads-per-disk': '2'})
time = 20
for s, conf in services.iteritems():
config = '/etc/swift/{}'.format(conf)
if not u.service_restarted(self.swift_storage_sentry, s, config,
pgrep_full=True, sleep_time=time):
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
time = 0
self.d.configure('swift-storage',
{'object-server-threads-per-disk': '4'})
def test_swift_config(self):
"""Verify the data in the swift-hash section of the swift config
file."""
unit = self.swift_storage_sentry
conf = '/etc/swift/swift.conf'
swift_proxy_relation = self.swift_proxy_sentry.relation('swift-storage',
'swift-storage:swift-storage')
expected = {
'swift_hash_path_suffix': swift_proxy_relation['swift_hash']
}
ret = u.validate_config_data(unit, conf, 'swift-hash', expected)
if ret:
message = "swift config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_account_server_config(self):
"""Verify the data in the account server config file."""
unit = self.swift_storage_sentry
conf = '/etc/swift/account-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6002',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon account-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:account-server': {
'use': 'egg:swift#account'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "account server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_container_server_config(self):
"""Verify the data in the container server config file."""
unit = self.swift_storage_sentry
conf = '/etc/swift/container-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6001',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon container-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:container-server': {
'use': 'egg:swift#container',
'allow_versions': 'true'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "container server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_object_server_config(self):
"""Verify the data in the object server config file."""
unit = self.swift_storage_sentry
conf = '/etc/swift/object-server.conf'
expected = {
'DEFAULT': {
'bind_ip': '0.0.0.0',
'bind_port': '6000',
'workers': '1'
},
'pipeline:main': {
'pipeline': 'recon object-server'
},
'filter:recon': {
'use': 'egg:swift#recon',
'recon_cache_path': '/var/cache/swift'
},
'app:object-server': {
'use': 'egg:swift#object',
'threads_per_disk': '4'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "object server config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_image_create(self):
"""Create an instance in glance, which is backed by swift, and validate
that some of the metadata for the image match in glance and swift."""
# NOTE(coreycb): Skipping failing test on folsom until resolved. On
# folsom only, uploading an image to glance gets 400 Bad
# Request - Error uploading image: (error): [Errno 111]
# ECONNREFUSED (HTTP 400)
if self._get_openstack_release() == self.precise_folsom:
u.log.error("Skipping failing test until resolved")
return
# Create glance image
image = u.create_cirros_image(self.glance, "cirros-image")
if not image:
amulet.raise_status(amulet.FAIL, msg="Image create failed")
# Validate that cirros image exists in glance and get its checksum/size
images = list(self.glance.images.list())
if len(images) != 1:
msg = "Expected 1 glance image, found {}".format(len(images))
amulet.raise_status(amulet.FAIL, msg=msg)
if images[0].name != 'cirros-image':
message = "cirros image does not exist"
amulet.raise_status(amulet.FAIL, msg=message)
glance_image_md5 = image.checksum
glance_image_size = image.size
# Validate that swift object's checksum/size match that from glance
headers, containers = self.swift.get_account()
if len(containers) != 1:
msg = "Expected 1 swift container, found {}".format(len(containers))
amulet.raise_status(amulet.FAIL, msg=msg)
container_name = containers[0].get('name')
headers, objects = self.swift.get_container(container_name)
if len(objects) != 1:
msg = "Expected 1 swift object, found {}".format(len(objects))
amulet.raise_status(amulet.FAIL, msg=msg)
swift_object_size = objects[0].get('bytes')
swift_object_md5 = objects[0].get('hash')
if glance_image_size != swift_object_size:
msg = "Glance image size {} != swift object size {}".format( \
glance_image_size, swift_object_size)
amulet.raise_status(amulet.FAIL, msg=msg)
if glance_image_md5 != swift_object_md5:
msg = "Glance image hash {} != swift object hash {}".format( \
glance_image_md5, swift_object_md5)
amulet.raise_status(amulet.FAIL, msg=msg)
# Cleanup
u.delete_image(self.glance, image)

View File

View File

View File

@ -0,0 +1,58 @@
import amulet
class AmuletDeployment(object):
"""This class provides generic Amulet deployment and test runner
methods."""
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = None
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services to the deployment where this_service is the local charm
that we're focused on testing and other_services are the other
charms that come from the charm store."""
name, units = range(2)
self.this_service = this_service[name]
self.d.add(this_service[name], units=this_service[units])
for svc in other_services:
if self.series:
self.d.add(svc[name],
charm='cs:{}/{}'.format(self.series, svc[name]),
units=svc[units])
else:
self.d.add(svc[name], units=svc[units])
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in relations.iteritems():
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup()
self.d.sentry.wait()
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
except:
raise
def run_tests(self):
"""Run all of the methods that are prefixed with 'test_'."""
for test in dir(self):
if test.startswith('test_'):
getattr(self, test)()

View File

@ -0,0 +1,157 @@
import ConfigParser
import io
import logging
import re
import sys
from time import sleep
class AmuletUtils(object):
"""This class provides common utility functions that are used by Amulet
tests."""
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = \
log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def valid_ip(self, ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
else:
return False
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def validate_services(self, commands):
"""Verify the specified services are running on the corresponding
service units."""
for k, v in commands.iteritems():
for cmd in v:
output, code = k.run(cmd)
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
config = ConfigParser.ConfigParser()
config.readfp(io.StringIO(file_contents))
return config
def validate_config_data(self, sentry_unit, config_file, section, expected):
"""Verify that the specified section of the config file contains
the expected option key:value pairs."""
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
return "section [{}] does not exist".format(section)
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
if config.get(section, k) != expected[k]:
return "section [{}] {}:{} != expected {}:{}".format(section,
k, config.get(section, k), k, expected[k])
return None
def _validate_dict_data(self, expected, actual):
"""Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluate a variable and returns a
bool."""
for k, v in expected.iteritems():
if k in actual:
if isinstance(v, basestring) or \
isinstance(v, bool) or \
isinstance(v, (int, long)):
if v != actual[k]:
return "{}:{}".format(k, actual[k])
elif not v(actual[k]):
return "{}:{}".format(k, actual[k])
else:
return "key '{}' does not exist".format(k)
return None
def validate_relation_data(self, sentry_unit, relation, expected):
"""Validate actual relation data based on expected relation data."""
actual = sentry_unit.relation(relation[0], relation[1])
self.log.debug('actual: {}'.format(repr(actual)))
return self._validate_dict_data(expected, actual)
def _validate_list_data(self, expected, actual):
"""Compare expected list vs actual list data."""
for e in expected:
if e not in actual:
return "expected item {} not found in actual list".format(e)
return None
def not_null(self, string):
if string != None:
return True
else:
return False
def _get_file_mtime(self, sentry_unit, filename):
"""Get last modification time of file."""
return sentry_unit.file_stat(filename)['mtime']
def _get_dir_mtime(self, sentry_unit, directory):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
"""Determine start time of the process based on the last modification
time of the /proc/pid directory. If pgrep_full is True, the process
name is matched against the full command line."""
if pgrep_full:
cmd = 'pgrep -o -f {}'.format(service)
else:
cmd = 'pgrep -o {}'.format(service)
proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False):
"""Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted."""
sleep(10)
if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
self._get_file_mtime(sentry_unit, filename):
return True
else:
return False
def relation_error(self, name, data):
return 'unexpected relation data in {} - {}'.format(name, data)
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)

View File

@ -0,0 +1,55 @@
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, series=None, openstack=None, source=None):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
if self.source:
for svc in services:
if svc[name] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _get_openstack_release(self):
"""Return an integer representing the enum value of the openstack
release."""
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
self.precise_havana, self.precise_icehouse, \
self.trusty_icehouse = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,209 @@
import logging
import os
import time
import urllib
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint."""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if admin_port in ep.adminurl and internal_port in ep.internalurl \
and public_port in ep.publicurl:
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints."""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate a list of actual tenant data vs list of expected tenant
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate a list of actual role data vs a list of expected role
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate a list of actual user data vs a list of expected user
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate a list of actual flavors vs a list of expected flavors."""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists"""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
if not os.path.exists(cirros_img):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
f.close()
with open(cirros_img) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
glance.images.delete(image)
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status == 'BUILD':
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
nova.servers.delete(instance)