update amulet tests for vivid:kilo; re-sync tests/charmhelpers

This commit is contained in:
Ryan Beisner 2015-06-09 17:19:00 +00:00
parent cf855bbf77
commit 2ec28a3708
7 changed files with 277 additions and 141 deletions
hooks/charmhelpers/contrib
hahelpers
openstack
tests
00-setupbasic_deployment.py
charmhelpers/contrib
amulet
openstack/amulet
tests.yaml

@ -44,6 +44,7 @@ from charmhelpers.core.hookenv import (
ERROR,
WARNING,
unit_get,
is_leader as juju_is_leader
)
from charmhelpers.core.decorators import (
retry_on_exception,
@ -63,17 +64,30 @@ class CRMResourceNotFound(Exception):
pass
class CRMDCNotFound(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
It relies on two mechanisms to determine leadership:
1. If the charm is part of a corosync cluster, call corosync to
1. If juju is sufficiently new and leadership election is supported,
the is_leader command will be used.
2. If the charm is part of a corosync cluster, call corosync to
determine leadership.
2. If the charm is not part of a corosync cluster, the leader is
3. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit numer". In
other words, the oldest surviving unit.
"""
try:
return juju_is_leader()
except NotImplementedError:
log('Juju leadership election feature not enabled'
', using fallback support',
level=WARNING)
if is_clustered():
if not is_crm_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
@ -106,8 +120,9 @@ def is_crm_dc():
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError:
return False
except subprocess.CalledProcessError as ex:
raise CRMDCNotFound(str(ex))
current_dc = ''
for line in status.split('\n'):
if line.startswith('Current DC'):
@ -115,10 +130,14 @@ def is_crm_dc():
current_dc = line.split(':')[1].split()[0]
if current_dc == get_unit_hostname():
return True
elif current_dc == 'NONE':
raise CRMDCNotFound('Current DC: NONE')
return False
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
@retry_on_exception(5, base_delay=2,
exc_type=(CRMResourceNotFound, CRMDCNotFound))
def is_crm_leader(resource, retry=False):
"""
Returns True if the charm calling this is the elected corosync leader,

@ -17,6 +17,7 @@
from charmhelpers.core.hookenv import (
config,
unit_get,
service_name,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
@ -26,8 +27,6 @@ from charmhelpers.contrib.network.ip import (
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
from functools import partial
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
@ -35,15 +34,18 @@ ADMIN = 'admin'
ADDRESS_MAP = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
'fallback': 'private-address',
'override': 'os-admin-hostname',
}
}
@ -57,15 +59,50 @@ def canonical_url(configs, endpoint_type=PUBLIC):
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
scheme = _get_scheme(configs)
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def _get_scheme(configs):
"""Returns the scheme to use for the url (either http or https)
depending upon whether https is in the configs value.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:returns: either 'http' or 'https' depending on whether https is
configured within the configs context.
"""
scheme = 'http'
if configs and 'https' in configs.complete_contexts():
scheme = 'https'
return scheme
def _get_address_override(endpoint_type=PUBLIC):
"""Returns any address overrides that the user has defined based on the
endpoint type.
Note: this function allows for the service name to be inserted into the
address if the user specifies {service_name}.somehost.org.
:param endpoint_type: the type of endpoint to retrieve the override
value for.
:returns: any endpoint address or hostname that the user has overridden
or None if an override is not present.
"""
override_key = ADDRESS_MAP[endpoint_type]['override']
addr_override = config(override_key)
if not addr_override:
return None
else:
return addr_override.format(service_name=service_name())
def resolve_address(endpoint_type=PUBLIC):
"""Return unit address depending on net config.
@ -77,7 +114,10 @@ def resolve_address(endpoint_type=PUBLIC):
:param endpoint_type: Network endpoing type
"""
resolved_address = None
resolved_address = _get_address_override(endpoint_type)
if resolved_address:
return resolved_address
vips = config('vip')
if vips:
vips = vips.split()
@ -109,38 +149,3 @@ def resolve_address(endpoint_type=PUBLIC):
"clustered=%s)" % (net_type, clustered))
return resolved_address
def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC,
override=None):
"""Returns the correct endpoint URL to advertise to Keystone.
This method provides the correct endpoint URL which should be advertised to
the keystone charm for endpoint creation. This method allows for the url to
be overridden to force a keystone endpoint to have specific URL for any of
the defined scopes (admin, internal, public).
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param url_template: str format string for creating the url template. Only
two values will be passed - the scheme+hostname
returned by the canonical_url and the port.
:param endpoint_type: str endpoint type to resolve.
:param override: str the name of the config option which overrides the
endpoint URL defined by the charm itself. None will
disable any overrides (default).
"""
if override:
# Return any user-defined overrides for the keystone endpoint URL.
user_value = config(override)
if user_value:
return user_value.strip()
return url_template % (canonical_url(configs, endpoint_type), port)
public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC)
internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL)
admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN)

@ -5,6 +5,7 @@ set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet \
python-distro-info \
python-glanceclient \
python-keystoneclient \
python-novaclient

@ -127,7 +127,6 @@ class HeatBasicDeployment(OpenStackAmuletDeployment):
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
self.glance_sentry = self.d.sentry.unit['glance/0']
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
@ -178,20 +177,20 @@ class HeatBasicDeployment(OpenStackAmuletDeployment):
def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
commands = {
self.heat_sentry: ['status heat-api',
'status heat-api-cfn',
'status heat-engine'],
self.mysql_sentry: ['status mysql'],
self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
self.nova_compute_sentry: ['status nova-compute',
'status nova-network',
'status nova-api'],
self.keystone_sentry: ['status keystone'],
self.glance_sentry: ['status glance-registry', 'status glance-api']
service_names = {
self.heat_sentry: ['heat-api',
'heat-api-cfn',
'heat-engine'],
self.mysql_sentry: ['mysql'],
self.rabbitmq_sentry: ['rabbitmq-server'],
self.nova_compute_sentry: ['nova-compute',
'nova-network',
'nova-api'],
self.keystone_sentry: ['keystone'],
self.glance_sentry: ['glance-registry', 'glance-api']
}
ret = u.validate_services(commands)
ret = u.validate_services_by_name(service_names)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
@ -209,10 +208,15 @@ class HeatBasicDeployment(OpenStackAmuletDeployment):
if self._get_openstack_release() >= self.precise_folsom:
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null
expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
'ec2': [endpoint_vol], 'identity': [endpoint_id]}
actual = self.keystone.service_catalog.get_endpoints()
expected = {'compute': [endpoint_vol], 'orchestration': [endpoint_vol],
'image': [endpoint_vol], 'identity': [endpoint_id]}
if self._get_openstack_release() <= self.trusty_juno:
# Before Kilo
expected['s3'] = [endpoint_vol]
expected['ec2'] = [endpoint_vol]
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
@ -221,7 +225,14 @@ class HeatBasicDeployment(OpenStackAmuletDeployment):
"""Verify the heat api endpoint data."""
u.log.debug('Checking api endpoint data...')
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '3333'
if self._get_openstack_release() <= self.trusty_juno:
# Before Kilo
admin_port = internal_port = public_port = '3333'
else:
# Kilo and later
admin_port = internal_port = public_port = '8004'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
@ -352,28 +363,14 @@ class HeatBasicDeployment(OpenStackAmuletDeployment):
},
}
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
expected.update(
{
'oslo_messaging_rabbit': {
'rabbit_userid': 'heat',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rmq_rel['password'],
'rabbit_host': rmq_rel['hostname']
}
}
)
else:
# Juno or earlier
expected['DEFAULT'].update(
{
'rabbit_userid': 'heat',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rmq_rel['password'],
'rabbit_host': rmq_rel['hostname']
}
)
expected['DEFAULT'].update(
{
'rabbit_userid': 'heat',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rmq_rel['password'],
'rabbit_host': rmq_rel['hostname']
}
)
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
@ -490,9 +487,9 @@ class HeatBasicDeployment(OpenStackAmuletDeployment):
# /!\ Heat stacks reach a COMPLETE status even when nova cannot
# find resources (a valid hypervisor) to fit the instance, in
# which case the heat stack self-deletes! Confirm anyway...
ret = u.thing_reaches_status(self.heat.stacks, _stack_id,
expected_stat="COMPLETE",
msg="Stack status wait")
ret = u.resource_reaches_status(self.heat.stacks, _stack_id,
expected_stat="COMPLETE",
msg="Stack status wait")
_stacks = list(self.heat.stacks.list())
u.log.debug('All stacks: {}'.format(_stacks))
if not ret:
@ -531,9 +528,9 @@ class HeatBasicDeployment(OpenStackAmuletDeployment):
amulet.raise_status(amulet.FAIL, msg=msg)
# Confirm nova instance reaches ACTIVE status.
ret = u.thing_reaches_status(self.nova.servers, _server_id,
expected_stat="ACTIVE",
msg="nova instance")
ret = u.resource_reaches_status(self.nova.servers, _server_id,
expected_stat="ACTIVE",
msg="nova instance")
if not ret:
msg = 'Nova compute instance failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
@ -541,18 +538,18 @@ class HeatBasicDeployment(OpenStackAmuletDeployment):
def test_490_heat_stack_delete(self):
"""Delete a heat stack, verify."""
u.log.debug('Deleting heat stack...')
u.delete_thing(self.heat.stacks, STACK_NAME, msg="heat stack")
u.delete_resource(self.heat.stacks, STACK_NAME, msg="heat stack")
def test_491_image_delete(self):
"""Delete that image."""
u.log.debug('Deleting glance image...')
image = self.nova.images.find(name=IMAGE_NAME)
u.delete_thing(self.nova.images, image, msg="glance image")
u.delete_resource(self.nova.images, image, msg="glance image")
def test_492_keypair_delete(self):
"""Delete that keypair."""
u.log.debug('Deleting keypair...')
u.delete_thing(self.nova.keypairs, KEYPAIR_NAME, msg="nova keypair")
u.delete_resource(self.nova.keypairs, KEYPAIR_NAME, msg="nova keypair")
def test_900_heat_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config

@ -15,14 +15,14 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import distro_info
import io
import logging
import re
import six
import sys
import time
import six
class AmuletUtils(object):
"""Amulet utilities.
@ -33,6 +33,7 @@ class AmuletUtils(object):
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
self.ubuntu_releases = self.get_ubuntu_releases()
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
@ -70,17 +71,125 @@ class AmuletUtils(object):
else:
return False
def get_ubuntu_release_from_sentry(self, sentry_unit):
"""Get Ubuntu release codename from sentry unit"""
msg = None
cmd = 'lsb_release -cs'
release, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} lsb_release: {}'.format(
sentry_unit.info['unit_name'], release))
else:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, release, code))
if release not in self.ubuntu_releases:
msg = ("Release ({}) not found in Ubuntu releases "
"({})".format(release, self.ubuntu_releases))
return release, msg
def normalize_service_check_command(self, series, cmd):
"""Normalize a service check command with init system logic,
providing backward compatibility for tests which presume
a specific init system is present.
"""
# NOTE(beisner): this work-around is intended to be a temporary
# unblocker of vivid, wily and later tests. See deprecation
# warning on validate_services().
systemd_switch = self.ubuntu_releases.index('vivid')
# Preserve sudo usage and strip it out if present
if cmd.startswith('sudo '):
sudo_if_sudo, cmd = cmd[:5], cmd[5:]
else:
sudo_if_sudo = ''
# Guess the service name
cmd_words = list(set(cmd.split()))
for remove_items in ['status', 'service']:
if remove_items in cmd_words:
cmd_words.remove(remove_items)
service_name = cmd_words[0]
self.log.debug('Service name: {}'.format(service_name))
if (cmd.startswith('status') and
self.ubuntu_releases.index(series) >= systemd_switch):
# systemd init expected, but upstart command found
self.log.debug('Correcting for an upstart command '
'on a systemd release')
return '{}{} {} {}'.format(sudo_if_sudo, 'service',
service_name, 'status')
elif (cmd.startswith('service') and
self.ubuntu_releases.index(series) < systemd_switch):
# upstart init expected, but systemd command found
self.log.debug('Correcting for a systemd command on '
'an upstart release')
return '{}{} {}'.format(sudo_if_sudo, 'status', service_name)
return cmd
def validate_services(self, commands):
"""Validate services.
Verify the specified services are running on the corresponding
service units.
"""
self.log.debug('Checking status of system services...')
# /!\ DEPRECATION WARNING (beisner):
# This method is present to preserve functionality
# of older tests which presume upstart init system, until they are
# rewritten to use validate_services_by_name().
self.log.warn('/!\\ DEPRECATION WARNING: use '
'validate_services_by_name instead of validate_services '
'due to init system differences.')
for k, v in six.iteritems(commands):
for cmd in v:
output, code = k.run(cmd)
# Ask unit for its Ubuntu release codename
release, ret = self.get_ubuntu_release_from_sentry(k)
if ret:
return ret
# Conditionally correct for init system assumptions
cmd_normalized = self.normalize_service_check_command(release,
cmd)
self.log.debug('Command, normalized with init logic: '
'{}'.format(cmd_normalized))
output, code = k.run(cmd_normalized)
self.log.debug('{} `{}` returned '
'{}'.format(k.info['unit_name'],
cmd_normalized, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def validate_services_by_name(self, sentry_services):
"""Validate system service status by service name, automatically
detecting init system based on Ubuntu release codename."""
self.log.debug('Checking status of system services...')
# Point at which systemd became a thing
systemd_switch = self.ubuntu_releases.index('vivid')
for sentry_unit, services_list in six.iteritems(sentry_services):
# Get lsb_release codename from unit
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
if ret:
return ret
for service_name in services_list:
if self.ubuntu_releases.index(release) >= systemd_switch:
# init is systemd
cmd = 'sudo service {} status'.format(service_name)
elif self.ubuntu_releases.index(release) < systemd_switch:
# init is upstart
cmd = 'sudo status {}'.format(service_name)
output, code = sentry_unit.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
@ -104,6 +213,9 @@ class AmuletUtils(object):
Verify that the specified section of the config file contains
the expected option key:value pairs.
"""
self.log.debug('Validating config file data ({} in {} on {})'
'...'.format(section, config_file,
sentry_unit.info['unit_name']))
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
@ -321,3 +433,10 @@ class AmuletUtils(object):
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)
def get_ubuntu_releases(self):
"""Return a list of all Ubuntu releases in order of release."""
_d = distro_info.UbuntuDistroInfo()
_release_list = _d.all
self.log.debug('Ubuntu release list: {}'.format(_release_list))
return _release_list

@ -25,7 +25,6 @@ import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from time import sleep
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
@ -53,6 +52,7 @@ class OpenStackAmuletUtils(AmuletUtils):
are used to find the matching endpoint.
"""
self.log.debug('Validating endpoint data...')
self.log.debug('actual: {}'.format(repr(endpoints)))
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
@ -258,7 +258,7 @@ class OpenStackAmuletUtils(AmuletUtils):
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_thing instead of delete_image.')
'delete_resource instead of delete_image.')
self.log.debug('Deleting glance image ({})...'.format(image))
num_before = len(list(glance.images.list()))
glance.images.delete(image)
@ -306,7 +306,7 @@ class OpenStackAmuletUtils(AmuletUtils):
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_thing instead of delete_instance.')
'delete_resource instead of delete_instance.')
self.log.debug('Deleting instance ({})...'.format(instance))
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
@ -325,41 +325,30 @@ class OpenStackAmuletUtils(AmuletUtils):
return True
# NOTE(beisner):
# Rather than having a delete_XYZ method for each of the numerous
# openstack types/objects/things, use delete_thing and pass a pointer.
#
# Similarly, instead of having wait/check/timeout/confirm loops
# built into numerous methods, use thing_reaches_status + a pointer.
#
# Not an homage to Dr. Seuss. "Thing" is used due to conflict with
# other more suitable names such as instance or object, both of
# which may be confused with nova instance or swift object rather than a
# python object or instance. See heat amulet test for usage examples.
def delete_thing(self, thing, thing_id, msg="thing", max_wait=120):
"""Delete one openstack object/thing, such as one instance, keypair,
def delete_resource(self, resource, resource_id,
msg="resource", max_wait=120):
"""Delete one openstack resource, such as one instance, keypair,
image, volume, stack, etc., and confirm deletion within max wait time.
:param thing: pointer to openstack object type, ex:glance_client.images
:param thing_id: unique name or id for the openstack object/thing
:param resource: pointer to os resource type, ex:glance_client.images
:param resource_id: unique name or id for the openstack resource
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, otherwise False
"""
num_before = len(list(thing.list()))
thing.delete(thing_id)
num_before = len(list(resource.list()))
resource.delete(resource_id)
tries = 0
num_after = len(list(thing.list()))
while num_after != (num_before - 1) and tries < (max_wait/4):
num_after = len(list(resource.list()))
while num_after != (num_before - 1) and tries < (max_wait / 4):
self.log.debug('{} delete check: '
'{} [{}:{}] {}'.format(msg, tries,
num_before,
num_after,
thing_id))
resource_id))
time.sleep(4)
num_after = len(list(thing.list()))
num_after = len(list(resource.list()))
tries += 1
self.log.debug('{}: expected, actual count = {}, '
@ -371,39 +360,40 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.error('{} delete timed out'.format(msg))
return False
def thing_reaches_status(self, thing, thing_id, expected_stat='available',
msg='thing', max_wait=120):
"""Wait for an openstack object/thing's status to reach an
def resource_reaches_status(self, resource, resource_id,
expected_stat='available',
msg='resource', max_wait=120):
"""Wait for an openstack resources status to reach an
expected status within a specified time. Useful to confirm that
nova instances, cinder vols, snapshots, glance images, heat stacks
and other objects/things eventually reach the expected status.
and other resources eventually reach the expected status.
:param thing: pointer to openstack object type, ex: heat_client.stacks
:param thing_id: unique id for the openstack object/thing
:param expected_stat: status to expect object/thing to reach
:param resource: pointer to os resource type, ex: heat_client.stacks
:param resource_id: unique id for the openstack resource
:param expected_stat: status to expect resource to reach
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, False if status is not reached
"""
tries = 0
thing_stat = thing.get(thing_id).status
while thing_stat != expected_stat and tries < (max_wait/4):
resource_stat = resource.get(resource_id).status
while resource_stat != expected_stat and tries < (max_wait / 4):
self.log.debug('{} status check: '
'{} [{}:{}] {}'.format(msg, tries,
thing_stat,
resource_stat,
expected_stat,
thing_id))
sleep(4)
thing_stat = thing.get(thing_id).status
resource_id))
time.sleep(4)
resource_stat = resource.get(resource_id).status
tries += 1
self.log.debug('{}: expected, actual status = {}, '
'{}'.format(msg, thing_stat, expected_stat))
'{}'.format(msg, resource_stat, expected_stat))
if thing_stat == expected_stat:
if resource_stat == expected_stat:
return True
else:
self.log.debug('{} never reached expected status: '
'{}'.format(thing_id, expected_stat))
'{}'.format(resource_id, expected_stat))
return False

@ -8,3 +8,8 @@ sources:
- ppa:juju/stable
packages:
- amulet
- python-amulet
- python-distro-info
- python-glanceclient
- python-keystoneclient
- python-novaclient