[beisner,r=corey.bryant] Add basic amulet tests.

This commit is contained in:
Corey Bryant 2015-06-12 09:50:03 -04:00
commit aff3411853
24 changed files with 2029 additions and 14 deletions

View File

@ -2,13 +2,17 @@
PYTHON := /usr/bin/env python
lint:
@echo -n "Running flake8 tests: "
@flake8 --exclude hooks/charmhelpers hooks
@flake8 unit_tests
@echo "OK"
@echo -n "Running charm proof: "
@echo Lint inspections and charm proof...
@flake8 --exclude hooks/charmhelpers hooks tests unit_tests
@charm proof
@echo "OK"
test:
@# Bundletester expects unit tests here.
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
functional_test:
@echo Starting all functional, lint and unit tests...
@juju test -v -p AMULET_HTTP_PROXY --timeout 2700
bin/charm_helpers_sync.py:
@mkdir -p bin
@ -16,9 +20,9 @@ bin/charm_helpers_sync.py:
> bin/charm_helpers_sync.py
sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
unit_test:
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
all: unit_test lint
publish: lint unit_test
bzr push lp:charms/heat
bzr push lp:charms/trusty/heat

5
charm-helpers-tests.yaml Normal file
View File

@ -0,0 +1,5 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
- contrib.amulet
- contrib.openstack.amulet

View File

@ -64,6 +64,10 @@ class CRMResourceNotFound(Exception):
pass
class CRMDCNotFound(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
@ -116,8 +120,9 @@ def is_crm_dc():
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError:
return False
except subprocess.CalledProcessError as ex:
raise CRMDCNotFound(str(ex))
current_dc = ''
for line in status.split('\n'):
if line.startswith('Current DC'):
@ -125,10 +130,14 @@ def is_crm_dc():
current_dc = line.split(':')[1].split()[0]
if current_dc == get_unit_hostname():
return True
elif current_dc == 'NONE':
raise CRMDCNotFound('Current DC: NONE')
return False
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
@retry_on_exception(5, base_delay=2,
exc_type=(CRMResourceNotFound, CRMDCNotFound))
def is_crm_leader(resource, retry=False):
"""
Returns True if the charm calling this is the elected corosync leader,

11
tests/00-setup Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes python-amulet \
python-distro-info \
python-glanceclient \
python-keystoneclient \
python-novaclient

View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic heat deployment on precise-icehouse."""
from basic_deployment import HeatBasicDeployment
if __name__ == '__main__':
deployment = HeatBasicDeployment(series='precise',
openstack='cloud:precise-icehouse',
source='cloud:precise-updates/icehouse')
deployment.run_tests()

View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic heat deployment on trusty-icehouse."""
from basic_deployment import HeatBasicDeployment
if __name__ == '__main__':
deployment = HeatBasicDeployment(series='trusty')
deployment.run_tests()

11
tests/016-basic-trusty-juno Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic heat deployment on trusty-juno."""
from basic_deployment import HeatBasicDeployment
if __name__ == '__main__':
deployment = HeatBasicDeployment(series='trusty',
openstack='cloud:trusty-juno',
source='cloud:trusty-updates/juno')
deployment.run_tests()

11
tests/017-basic-trusty-kilo Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic heat deployment on trusty-kilo."""
from basic_deployment import HeatBasicDeployment
if __name__ == '__main__':
deployment = HeatBasicDeployment(series='trusty',
openstack='cloud:trusty-kilo',
source='cloud:trusty-updates/kilo')
deployment.run_tests()

9
tests/018-basic-utopic-juno Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic heat deployment on utopic-juno."""
from basic_deployment import HeatBasicDeployment
if __name__ == '__main__':
deployment = HeatBasicDeployment(series='utopic')
deployment.run_tests()

9
tests/019-basic-vivid-kilo Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic heat deployment on vivid-kilo."""
from basic_deployment import HeatBasicDeployment
if __name__ == '__main__':
deployment = HeatBasicDeployment(series='vivid')
deployment.run_tests()

76
tests/README Normal file
View File

@ -0,0 +1,76 @@
This directory provides Amulet tests that focus on verification of heat
deployments.
test_* methods are called in lexical sort order.
Test name convention to ensure desired test order:
1xx service and endpoint checks
2xx relation checks
3xx config checks
4xx functional checks
9xx restarts and other final checks
Common uses of heat relations in deployments:
- [ heat, mysql ]
- [ heat, keystone ]
- [ heat, rabbitmq-server ]
More detailed relations of heat service in a common deployment:
relations:
amqp:
- rabbitmq-server
identity-service:
- keystone
shared-db:
- mysql
In order to run tests, you'll need charm-tools installed (in addition to
juju, of course):
sudo add-apt-repository ppa:juju/stable
sudo apt-get update
sudo apt-get install charm-tools
If you use a web proxy server to access the web, you'll need to set the
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
The following examples demonstrate different ways that tests can be executed.
All examples are run from the charm's root directory.
* To run all tests (starting with 00-setup):
make test
* To run a specific test module (or modules):
juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To run a specific test module (or modules), and keep the environment
deployed after a failure:
juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To re-run a test module against an already deployed environment (one
that was deployed by a previous call to 'juju test --set-e'):
./tests/15-basic-trusty-icehouse
For debugging and test development purposes, all code should be idempotent.
In other words, the code should have the ability to be re-run without changing
the results beyond the initial run. This enables editing and re-running of a
test module against an already deployed environment, as described above.
Manual debugging tips:
* Set the following env vars before using the OpenStack CLI as admin:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=openstack
export OS_REGION_NAME=RegionOne
* Set the following env vars before using the OpenStack CLI as demoUser:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=demoTenant
export OS_USERNAME=demoUser
export OS_PASSWORD=password
export OS_REGION_NAME=RegionOne

606
tests/basic_deployment.py Normal file
View File

@ -0,0 +1,606 @@
#!/usr/bin/python
"""
Basic heat functional test.
"""
import amulet
import time
from heatclient.common import template_utils
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
# ERROR,
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
# Resource and name constants
IMAGE_NAME = 'cirros-image-1'
KEYPAIR_NAME = 'testkey'
STACK_NAME = 'hello_world'
RESOURCE_TYPE = 'server'
TEMPLATE_REL_PATH = 'tests/files/hot_hello_world.yaml'
class HeatBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic heat deployment."""
def __init__(self, series=None, openstack=None, source=None, git=False,
stable=False):
"""Deploy the entire test environment."""
super(HeatBasicDeployment, self).__init__(series, openstack,
source, stable)
self.git = git
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where heat is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'heat'}
other_services = [{'name': 'keystone'},
{'name': 'rabbitmq-server'},
{'name': 'mysql'},
{'name': 'glance'},
{'name': 'nova-cloud-controller'},
{'name': 'nova-compute'}]
super(HeatBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'heat:amqp': 'rabbitmq-server:amqp',
'heat:identity-service': 'keystone:identity-service',
'heat:shared-db': 'mysql:shared-db',
'nova-compute:image-service': 'glance:image-service',
'nova-compute:shared-db': 'mysql:shared-db',
'nova-compute:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:shared-db': 'mysql:shared-db',
'nova-cloud-controller:identity-service':
'keystone:identity-service',
'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:cloud-compute':
'nova-compute:cloud-compute',
'nova-cloud-controller:image-service': 'glance:image-service',
'keystone:shared-db': 'mysql:shared-db',
'glance:identity-service': 'keystone:identity-service',
'glance:shared-db': 'mysql:shared-db',
'glance:amqp': 'rabbitmq-server:amqp'
}
super(HeatBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
nova_config = {'config-flags': 'auto_assign_floating_ip=False',
'enable-live-migration': 'False'}
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting'}
configs = {'nova-compute': nova_config, 'keystone': keystone_config}
super(HeatBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.heat_sentry = self.d.sentry.unit['heat/0']
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
self.glance_sentry = self.d.sentry.unit['glance/0']
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Let things settle a bit before moving forward
time.sleep(30)
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone)
# Authenticate admin with nova endpoint
self.nova = u.authenticate_nova_user(self.keystone,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with heat endpoint
self.heat = u.authenticate_heat_admin(self.keystone)
def _image_create(self):
"""Create an image to be used by the heat template, verify it exists"""
u.log.debug('Creating glance image ({})...'.format(IMAGE_NAME))
# Create a new image
image_new = u.create_cirros_image(self.glance, IMAGE_NAME)
# Confirm image is created and has status of 'active'
if not image_new:
message = 'glance image create failed'
amulet.raise_status(amulet.FAIL, msg=message)
# Verify new image name
images_list = list(self.glance.images.list())
if images_list[0].name != IMAGE_NAME:
message = ('glance image create failed or unexpected '
'image name {}'.format(images_list[0].name))
amulet.raise_status(amulet.FAIL, msg=message)
def _keypair_create(self):
"""Create a keypair to be used by the heat template,
or get a keypair if it exists."""
self.keypair = u.create_or_get_keypair(self.nova,
keypair_name=KEYPAIR_NAME)
if not self.keypair:
msg = 'Failed to create or get keypair.'
amulet.raise_status(amulet.FAIL, msg=msg)
u.log.debug("Keypair: {} {}".format(self.keypair.id,
self.keypair.fingerprint))
def _stack_create(self):
"""Create a heat stack from a basic heat template, verify its status"""
u.log.debug('Creating heat stack...')
t_url = u.file_to_url(TEMPLATE_REL_PATH)
r_req = self.heat.http_client.raw_request
u.log.debug('template url: {}'.format(t_url))
t_files, template = template_utils.get_template_contents(t_url, r_req)
env_files, env = template_utils.process_environment_and_files(
env_path=None)
fields = {
'stack_name': STACK_NAME,
'timeout_mins': '15',
'disable_rollback': False,
'parameters': {
'admin_pass': 'Ubuntu',
'key_name': KEYPAIR_NAME,
'image': IMAGE_NAME
},
'template': template,
'files': dict(list(t_files.items()) + list(env_files.items())),
'environment': env
}
# Create the stack.
try:
_stack = self.heat.stacks.create(**fields)
u.log.debug('Stack data: {}'.format(_stack))
_stack_id = _stack['stack']['id']
u.log.debug('Creating new stack, ID: {}'.format(_stack_id))
except Exception as e:
# Generally, an api or cloud config error if this is hit.
msg = 'Failed to create heat stack: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
# Confirm stack reaches COMPLETE status.
# /!\ Heat stacks reach a COMPLETE status even when nova cannot
# find resources (a valid hypervisor) to fit the instance, in
# which case the heat stack self-deletes! Confirm anyway...
ret = u.resource_reaches_status(self.heat.stacks, _stack_id,
expected_stat="COMPLETE",
msg="Stack status wait")
_stacks = list(self.heat.stacks.list())
u.log.debug('All stacks: {}'.format(_stacks))
if not ret:
msg = 'Heat stack failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
# Confirm stack still exists.
try:
_stack = self.heat.stacks.get(STACK_NAME)
except Exception as e:
# Generally, a resource availability issue if this is hit.
msg = 'Failed to get heat stack: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
# Confirm stack name.
u.log.debug('Expected, actual stack name: {}, '
'{}'.format(STACK_NAME, _stack.stack_name))
if STACK_NAME != _stack.stack_name:
msg = 'Stack name mismatch, {} != {}'.format(STACK_NAME,
_stack.stack_name)
amulet.raise_status(amulet.FAIL, msg=msg)
def _stack_resource_compute(self):
"""Confirm that the stack has created a subsequent nova
compute resource, and confirm its status."""
u.log.debug('Confirming heat stack resource status...')
# Confirm existence of a heat-generated nova compute resource.
_resource = self.heat.resources.get(STACK_NAME, RESOURCE_TYPE)
_server_id = _resource.physical_resource_id
if _server_id:
u.log.debug('Heat template spawned nova instance, '
'ID: {}'.format(_server_id))
else:
msg = 'Stack failed to spawn a nova compute resource (instance).'
amulet.raise_status(amulet.FAIL, msg=msg)
# Confirm nova instance reaches ACTIVE status.
ret = u.resource_reaches_status(self.nova.servers, _server_id,
expected_stat="ACTIVE",
msg="nova instance")
if not ret:
msg = 'Nova compute instance failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
def _stack_delete(self):
"""Delete a heat stack, verify."""
u.log.debug('Deleting heat stack...')
u.delete_resource(self.heat.stacks, STACK_NAME, msg="heat stack")
def _image_delete(self):
"""Delete that image."""
u.log.debug('Deleting glance image...')
image = self.nova.images.find(name=IMAGE_NAME)
u.delete_resource(self.nova.images, image, msg="glance image")
def _keypair_delete(self):
"""Delete that keypair."""
u.log.debug('Deleting keypair...')
u.delete_resource(self.nova.keypairs, KEYPAIR_NAME, msg="nova keypair")
def test_100_services(self):
"""Verify the expected services are running on the corresponding
service units."""
service_names = {
self.heat_sentry: ['heat-api',
'heat-api-cfn',
'heat-engine'],
self.mysql_sentry: ['mysql'],
self.rabbitmq_sentry: ['rabbitmq-server'],
self.nova_compute_sentry: ['nova-compute',
'nova-network',
'nova-api'],
self.keystone_sentry: ['keystone'],
self.glance_sentry: ['glance-registry', 'glance-api']
}
ret = u.validate_services_by_name(service_names)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_110_service_catalog(self):
"""Verify that the service catalog endpoint data is valid."""
u.log.debug('Checking service catalog endpoint data...')
endpoint_vol = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
endpoint_id = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
if self._get_openstack_release() >= self.precise_folsom:
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null
expected = {'compute': [endpoint_vol], 'orchestration': [endpoint_vol],
'image': [endpoint_vol], 'identity': [endpoint_id]}
if self._get_openstack_release() <= self.trusty_juno:
# Before Kilo
expected['s3'] = [endpoint_vol]
expected['ec2'] = [endpoint_vol]
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_120_heat_endpoint(self):
"""Verify the heat api endpoint data."""
u.log.debug('Checking api endpoint data...')
endpoints = self.keystone.endpoints.list()
if self._get_openstack_release() <= self.trusty_juno:
# Before Kilo
admin_port = internal_port = public_port = '3333'
else:
# Kilo and later
admin_port = internal_port = public_port = '8004'
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
message = 'heat endpoint: {}'.format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_200_heat_mysql_shared_db_relation(self):
"""Verify the heat:mysql shared-db relation data"""
u.log.debug('Checking heat:mysql shared-db relation data...')
unit = self.heat_sentry
relation = ['shared-db', 'mysql:shared-db']
expected = {
'private-address': u.valid_ip,
'heat_database': 'heat',
'heat_username': 'heat',
'heat_hostname': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('heat:mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_201_mysql_heat_shared_db_relation(self):
"""Verify the mysql:heat shared-db relation data"""
u.log.debug('Checking mysql:heat shared-db relation data...')
unit = self.mysql_sentry
relation = ['shared-db', 'heat:shared-db']
expected = {
'private-address': u.valid_ip,
'db_host': u.valid_ip,
'heat_allowed_units': 'heat/0',
'heat_password': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('mysql:heat shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_202_heat_keystone_identity_relation(self):
"""Verify the heat:keystone identity-service relation data"""
u.log.debug('Checking heat:keystone identity-service relation data...')
unit = self.heat_sentry
relation = ['identity-service', 'keystone:identity-service']
expected = {
'heat_service': 'heat',
'heat_region': 'RegionOne',
'heat_public_url': u.valid_url,
'heat_admin_url': u.valid_url,
'heat_internal_url': u.valid_url,
'heat-cfn_service': 'heat-cfn',
'heat-cfn_region': 'RegionOne',
'heat-cfn_public_url': u.valid_url,
'heat-cfn_admin_url': u.valid_url,
'heat-cfn_internal_url': u.valid_url
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('heat:keystone identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_203_keystone_heat_identity_relation(self):
"""Verify the keystone:heat identity-service relation data"""
u.log.debug('Checking keystone:heat identity-service relation data...')
unit = self.keystone_sentry
relation = ['identity-service', 'heat:identity-service']
expected = {
'service_protocol': 'http',
'service_tenant': 'services',
'admin_token': 'ubuntutesting',
'service_password': u.not_null,
'service_port': '5000',
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'auth_host': u.valid_ip,
'service_username': 'heat-cfn_heat',
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('keystone:heat identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_204_heat_rmq_amqp_relation(self):
"""Verify the heat:rabbitmq-server amqp relation data"""
u.log.debug('Checking heat:rabbitmq-server amqp relation data...')
unit = self.heat_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
'username': u.not_null,
'private-address': u.valid_ip,
'vhost': 'openstack'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('heat:rabbitmq-server amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_205_rmq_heat_amqp_relation(self):
"""Verify the rabbitmq-server:heat amqp relation data"""
u.log.debug('Checking rabbitmq-server:heat amqp relation data...')
unit = self.rabbitmq_sentry
relation = ['amqp', 'heat:amqp']
expected = {
'private-address': u.valid_ip,
'password': u.not_null,
'hostname': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('rabbitmq-server:heat amqp', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_300_heat_config(self):
"""Verify the data in the heat config file."""
u.log.debug('Checking heat config file data...')
unit = self.heat_sentry
conf = '/etc/heat/heat.conf'
ks_rel = self.keystone_sentry.relation('identity-service',
'heat:identity-service')
rmq_rel = self.rabbitmq_sentry.relation('amqp',
'heat:amqp')
mysql_rel = self.mysql_sentry.relation('shared-db',
'heat:shared-db')
u.log.debug('keystone:heat relation: {}'.format(ks_rel))
u.log.debug('rabbitmq:heat relation: {}'.format(rmq_rel))
u.log.debug('mysql:heat relation: {}'.format(mysql_rel))
db_uri = "mysql://{}:{}@{}/{}".format('heat',
mysql_rel['heat_password'],
mysql_rel['db_host'],
'heat')
auth_uri = '{}://{}:{}/v2.0'.format(ks_rel['service_protocol'],
ks_rel['service_host'],
ks_rel['service_port'])
expected = {
'DEFAULT': {
'use_syslog': 'False',
'debug': 'False',
'verbose': 'False',
'log_dir': '/var/log/heat',
'instance_driver': 'heat.engine.nova',
'plugin_dirs': '/usr/lib64/heat,/usr/lib/heat',
'environment_dir': '/etc/heat/environment.d',
'deferred_auth_method': 'password',
'host': 'heat',
'rabbit_userid': 'heat',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rmq_rel['password'],
'rabbit_host': rmq_rel['hostname']
},
'keystone_authtoken': {
'auth_uri': auth_uri,
'auth_host': ks_rel['service_host'],
'auth_port': ks_rel['auth_port'],
'auth_protocol': ks_rel['auth_protocol'],
'admin_tenant_name': 'services',
'admin_user': 'heat-cfn_heat',
'admin_password': ks_rel['service_password'],
'signing_dir': '/var/cache/heat'
},
'database': {
'connection': db_uri
},
'heat_api': {
'bind_port': '7994'
},
'heat_api_cfn': {
'bind_port': '7990'
},
'paste_deploy': {
'api_paste_config': '/etc/heat/api-paste.ini'
},
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "heat config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_heat_resource_types_list(self):
"""Check default heat resource list behavior, also confirm
heat functionality."""
u.log.debug('Checking default heat resouce list...')
try:
types = list(self.heat.resource_types.list())
if type(types) is list:
u.log.debug('Resource type list check is ok.')
else:
msg = 'Resource type list is not a list!'
u.log.error('{}'.format(msg))
raise
if len(types) > 0:
u.log.debug('Resource type list is populated '
'({}, ok).'.format(len(types)))
else:
msg = 'Resource type list length is zero!'
u.log.error(msg)
raise
except:
msg = 'Resource type list failed.'
u.log.error(msg)
raise
def test_402_heat_stack_list(self):
"""Check default heat stack list behavior, also confirm
heat functionality."""
u.log.debug('Checking default heat stack list...')
try:
stacks = list(self.heat.stacks.list())
if type(stacks) is list:
u.log.debug("Stack list check is ok.")
else:
msg = 'Stack list returned something other than a list.'
u.log.error(msg)
raise
except:
msg = 'Heat stack list failed.'
u.log.error(msg)
raise
def test_410_heat_stack_create_delete(self):
"""Create a heat stack from template, confirm that a corresponding
nova compute resource is spawned, delete stack."""
self._image_create()
self._keypair_create()
self._stack_create()
self._stack_resource_compute()
self._stack_delete()
self._image_delete()
self._keypair_delete()
def test_900_heat_restart_on_config_change(self):
"""Verify that the specified services are restarted when the config
is changed."""
sentry = self.heat_sentry
juju_service = 'heat'
# Expected default and alternate values
set_default = {'use-syslog': 'False'}
set_alternate = {'use-syslog': 'True'}
# Config file affected by juju set config change
conf_file = '/etc/heat/heat.conf'
# Services which are expected to restart upon config change
services = ['heat-api',
'heat-api-cfn',
'heat-engine']
# Make config change, check for service restarts
u.log.debug('Making config change on {}...'.format(juju_service))
self.d.configure(juju_service, set_alternate)
sleep_time = 30
for s in services:
u.log.debug("Checking that service restarted: {}".format(s))
if not u.service_restarted(sentry, s,
conf_file, sleep_time=sleep_time):
self.d.configure(juju_service, set_default)
msg = "service {} didn't restart after config change".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
sleep_time = 0
self.d.configure(juju_service, set_default)

View File

@ -0,0 +1,38 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,93 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import os
import six
class AmuletDeployment(object):
"""Amulet deployment.
This class provides generic Amulet deployment and test runner
methods.
"""
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = None
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'])
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in six.iteritems(relations):
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=900)
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
except Exception:
raise
def run_tests(self):
"""Run all of the methods that are prefixed with 'test_'."""
for test in dir(self):
if test.startswith('test_'):
getattr(self, test)()

View File

@ -0,0 +1,408 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import distro_info
import io
import logging
import os
import re
import six
import sys
import time
import urlparse
class AmuletUtils(object):
"""Amulet utilities.
This class provides common utility functions that are used by Amulet
tests.
"""
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
self.ubuntu_releases = self.get_ubuntu_releases()
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def valid_ip(self, ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
else:
return False
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def get_ubuntu_release_from_sentry(self, sentry_unit):
"""Get Ubuntu release codename from sentry unit.
:param sentry_unit: amulet sentry/service unit pointer
:returns: list of strings - release codename, failure message
"""
msg = None
cmd = 'lsb_release -cs'
release, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} lsb_release: {}'.format(
sentry_unit.info['unit_name'], release))
else:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, release, code))
if release not in self.ubuntu_releases:
msg = ("Release ({}) not found in Ubuntu releases "
"({})".format(release, self.ubuntu_releases))
return release, msg
def validate_services(self, commands):
"""Validate that lists of commands succeed on service units. Can be
used to verify system services are running on the corresponding
service units.
:param command: dict with sentry keys and arbitrary command list values
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# /!\ DEPRECATION WARNING (beisner):
# New and existing tests should be rewritten to use
# validate_services_by_name() as it is aware of init systems.
self.log.warn('/!\\ DEPRECATION WARNING: use '
'validate_services_by_name instead of validate_services '
'due to init system differences.')
for k, v in six.iteritems(commands):
for cmd in v:
output, code = k.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(k.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def validate_services_by_name(self, sentry_services):
"""Validate system service status by service name, automatically
detecting init system based on Ubuntu release codename.
:param sentry_resources: dict with sentry keys and svc list values
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# Point at which systemd became a thing
systemd_switch = self.ubuntu_releases.index('vivid')
for sentry_unit, services_list in six.iteritems(sentry_services):
# Get lsb_release codename from unit
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
if ret:
return ret
for service_name in services_list:
if (self.ubuntu_releases.index(release) >= systemd_switch or
service_name == "rabbitmq-server"):
# init is systemd
cmd = 'sudo service {} status'.format(service_name)
elif self.ubuntu_releases.index(release) < systemd_switch:
# init is upstart
cmd = 'sudo status {}'.format(service_name)
output, code = sentry_unit.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
# NOTE(beisner): by default, ConfigParser does not handle options
# with no value, such as the flags used in the mysql my.cnf file.
# https://bugs.python.org/issue7005
config = ConfigParser.ConfigParser(allow_no_value=True)
config.readfp(io.StringIO(file_contents))
return config
def validate_config_data(self, sentry_unit, config_file, section,
expected):
"""Validate config file data.
Verify that the specified section of the config file contains
the expected option key:value pairs.
"""
self.log.debug('Validating config file data ({} in {} on {})'
'...'.format(section, config_file,
sentry_unit.info['unit_name']))
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
return "section [{}] does not exist".format(section)
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
if config.get(section, k) != expected[k]:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, config.get(section, k), k, expected[k])
return None
def _validate_dict_data(self, expected, actual):
"""Validate dictionary data.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluate a variable and returns a
bool.
"""
self.log.debug('actual: {}'.format(repr(actual)))
self.log.debug('expected: {}'.format(repr(expected)))
for k, v in six.iteritems(expected):
if k in actual:
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
if v != actual[k]:
return "{}:{}".format(k, actual[k])
elif not v(actual[k]):
return "{}:{}".format(k, actual[k])
else:
return "key '{}' does not exist".format(k)
return None
def validate_relation_data(self, sentry_unit, relation, expected):
"""Validate actual relation data based on expected relation data."""
actual = sentry_unit.relation(relation[0], relation[1])
return self._validate_dict_data(expected, actual)
def _validate_list_data(self, expected, actual):
"""Compare expected list vs actual list data."""
for e in expected:
if e not in actual:
return "expected item {} not found in actual list".format(e)
return None
def not_null(self, string):
if string is not None:
return True
else:
return False
def _get_file_mtime(self, sentry_unit, filename):
"""Get last modification time of file."""
return sentry_unit.file_stat(filename)['mtime']
def _get_dir_mtime(self, sentry_unit, directory):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
"""Get process' start time.
Determine start time of the process based on the last modification
time of the /proc/pid directory. If pgrep_full is True, the process
name is matched against the full command line.
"""
if pgrep_full:
cmd = 'pgrep -o -f {}'.format(service)
else:
cmd = 'pgrep -o {}'.format(service)
cmd = cmd + ' | grep -v pgrep || exit 0'
cmd_out = sentry_unit.run(cmd)
self.log.debug('CMDout: ' + str(cmd_out))
if cmd_out[0]:
self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
proc_dir = '/proc/{}'.format(cmd_out[0].strip())
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
return True
else:
return False
def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=False, sleep_time=20,
retry_count=2):
"""Check if service was been started after a given time.
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
Returns:
bool: True if service found and its start time it newer than mtime,
False if service is older than mtime or if service was
not found.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
time.sleep(sleep_time)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
while retry_count > 0 and not proc_start_time:
self.log.debug('No pid file found for service %s, will retry %i '
'more times' % (service, retry_count))
time.sleep(30)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
retry_count = retry_count - 1
if not proc_start_time:
self.log.warn('No proc start time found, assuming service did '
'not start')
return False
if proc_start_time >= mtime:
self.log.debug('proc start time is newer than provided mtime'
'(%s >= %s)' % (proc_start_time, mtime))
return True
else:
self.log.warn('proc start time (%s) is older than provided mtime '
'(%s), service did not restart' % (proc_start_time,
mtime))
return False
def config_updated_since(self, sentry_unit, filename, mtime,
sleep_time=20):
"""Check if file was modified after a given time.
Args:
sentry_unit (sentry): The sentry unit to check the file mtime on
filename (string): The file to check mtime of
mtime (float): The epoch time to check against
sleep_time (int): Seconds to sleep before looking for process
Returns:
bool: True if file was modified more recently than mtime, False if
file was modified before mtime,
"""
self.log.debug('Checking %s updated since %s' % (filename, mtime))
time.sleep(sleep_time)
file_mtime = self._get_file_mtime(sentry_unit, filename)
if file_mtime >= mtime:
self.log.debug('File mtime is newer than provided mtime '
'(%s >= %s)' % (file_mtime, mtime))
return True
else:
self.log.warn('File mtime %s is older than provided mtime %s'
% (file_mtime, mtime))
return False
def validate_service_config_changed(self, sentry_unit, mtime, service,
filename, pgrep_full=False,
sleep_time=20, retry_count=2):
"""Check service and file were updated after mtime
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
filename (string): The file to check mtime of
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
Typical Usage:
u = OpenStackAmuletUtils(ERROR)
...
mtime = u.get_sentry_time(self.cinder_sentry)
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
if not u.validate_service_config_changed(self.cinder_sentry,
mtime,
'cinder-api',
'/etc/cinder/cinder.conf')
amulet.raise_status(amulet.FAIL, msg='update failed')
Returns:
bool: True if both service and file where updated/restarted after
mtime, False if service is older than mtime or if service was
not found or if filename was modified before mtime.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
time.sleep(sleep_time)
service_restart = self.service_restarted_since(sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=0,
retry_count=retry_count)
config_update = self.config_updated_since(sentry_unit, filename, mtime,
sleep_time=0)
return service_restart and config_update
def get_sentry_time(self, sentry_unit):
"""Return current epoch time on a sentry"""
cmd = "date +'%s'"
return float(sentry_unit.run(cmd)[0])
def relation_error(self, name, data):
return 'unexpected relation data in {} - {}'.format(name, data)
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)
def get_ubuntu_releases(self):
"""Return a list of all Ubuntu releases in order of release."""
_d = distro_info.UbuntuDistroInfo()
_release_list = _d.all
self.log.debug('Ubuntu release list: {}'.format(_release_list))
return _release_list
def file_to_url(self, file_rel_path):
"""Convert a relative file path to a file URL."""
_abs_path = os.path.abspath(file_rel_path)
return urlparse.urlparse(_abs_path, scheme='file').geturl()

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,151 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb']
if self.series in ['precise', 'trusty']:
base_series = self.series
else:
base_series = self.current_next
if self.stable:
for svc in other_services:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
# Openstack subordinate charms do not expose an origin option as that
# is controlled by the principle
ignore = ['neutron-openvswitch']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + ignore:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in ignore:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty) = range(12)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]

View File

@ -0,0 +1,413 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import six
import time
import urllib
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charm tests.
"""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
self.log.debug('Validating endpoint data...')
self.log.debug('actual: {}'.format(repr(endpoints)))
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if (admin_port in ep.adminurl and
internal_port in ep.internalurl and
public_port in ep.publicurl):
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('Validating service catalog endpoint data...')
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in six.iteritems(expected):
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate tenant data.
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('Validating tenant data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate role data.
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('Validating role data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('Validating user data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate flavor data.
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('Validating flavor data...')
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists."""
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
self.log.debug('Authenticating keystone user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
self.log.debug('Authenticating glance admin...')
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_heat_admin(self, keystone):
"""Authenticates the admin user with heat."""
self.log.debug('Authenticating heat admin...')
ep = keystone.service_catalog.url_for(service_type='orchestration',
endpoint_type='publicURL')
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
self.log.debug('Creating glance image ({})...'.format(image_name))
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(local_path):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
count = 1
status = image.status
while status != 'active' and count < 10:
time.sleep(3)
image = glance.images.get(image.id)
status = image.status
self.log.debug('image status: {}'.format(status))
count += 1
if status != 'active':
self.log.error('image creation timed out')
return None
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_image.')
self.log.debug('Deleting glance image ({})...'.format(image))
num_before = len(list(glance.images.list()))
glance.images.delete(image)
count = 1
num_after = len(list(glance.images.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(glance.images.list()))
self.log.debug('number of images: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('image deletion timed out')
return False
return True
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
self.log.debug('Creating instance '
'({}|{}|{})'.format(instance_name, image_name, flavor))
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_instance.')
self.log.debug('Deleting instance ({})...'.format(instance))
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
count = 1
num_after = len(list(nova.servers.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(nova.servers.list()))
self.log.debug('number of instances: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('instance deletion timed out')
return False
return True
def create_or_get_keypair(self, nova, keypair_name="testkey"):
"""Create a new keypair, or return pointer if it already exists."""
try:
_keypair = nova.keypairs.get(keypair_name)
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
except:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
def delete_resource(self, resource, resource_id,
msg="resource", max_wait=120):
"""Delete one openstack resource, such as one instance, keypair,
image, volume, stack, etc., and confirm deletion within max wait time.
:param resource: pointer to os resource type, ex:glance_client.images
:param resource_id: unique name or id for the openstack resource
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, otherwise False
"""
num_before = len(list(resource.list()))
resource.delete(resource_id)
tries = 0
num_after = len(list(resource.list()))
while num_after != (num_before - 1) and tries < (max_wait / 4):
self.log.debug('{} delete check: '
'{} [{}:{}] {}'.format(msg, tries,
num_before,
num_after,
resource_id))
time.sleep(4)
num_after = len(list(resource.list()))
tries += 1
self.log.debug('{}: expected, actual count = {}, '
'{}'.format(msg, num_before - 1, num_after))
if num_after == (num_before - 1):
return True
else:
self.log.error('{} delete timed out'.format(msg))
return False
def resource_reaches_status(self, resource, resource_id,
expected_stat='available',
msg='resource', max_wait=120):
"""Wait for an openstack resources status to reach an
expected status within a specified time. Useful to confirm that
nova instances, cinder vols, snapshots, glance images, heat stacks
and other resources eventually reach the expected status.
:param resource: pointer to os resource type, ex: heat_client.stacks
:param resource_id: unique id for the openstack resource
:param expected_stat: status to expect resource to reach
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, False if status is not reached
"""
tries = 0
resource_stat = resource.get(resource_id).status
while resource_stat != expected_stat and tries < (max_wait / 4):
self.log.debug('{} status check: '
'{} [{}:{}] {}'.format(msg, tries,
resource_stat,
expected_stat,
resource_id))
time.sleep(4)
resource_stat = resource.get(resource_id).status
tries += 1
self.log.debug('{}: expected, actual status = {}, '
'{}'.format(msg, resource_stat, expected_stat))
if resource_stat == expected_stat:
return True
else:
self.log.debug('{} never reached expected status: '
'{}'.format(resource_id, expected_stat))
return False

View File

@ -0,0 +1,66 @@
#
# This is a hello world HOT template just defining a single compute
# server.
#
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single server.
Contains just base features to verify base HOT support.
parameters:
key_name:
type: string
description: Name of an existing key pair to use for the server
constraints:
- custom_constraint: nova.keypair
flavor:
type: string
description: Flavor for the server to be created
default: m1.tiny
constraints:
- custom_constraint: nova.flavor
image:
type: string
description: Image ID or image name to use for the server
constraints:
- custom_constraint: glance.image
admin_pass:
type: string
description: Admin password
hidden: true
constraints:
- length: { min: 6, max: 8 }
description: Password length must be between 6 and 8 characters
- allowed_pattern: "[a-zA-Z0-9]+"
description: Password must consist of characters and numbers only
- allowed_pattern: "[A-Z]+[a-zA-Z0-9]*"
description: Password must start with an uppercase character
db_port:
type: number
description: Database port number
default: 50000
constraints:
- range: { min: 40000, max: 60000 }
description: Port number must be between 40000 and 60000
resources:
server:
type: OS::Nova::Server
properties:
key_name: { get_param: key_name }
image: { get_param: image }
flavor: { get_param: flavor }
admin_pass: { get_param: admin_pass }
user_data:
str_replace:
template: |
#!/bin/bash
echo db_port
params:
db_port: { get_param: db_port }
outputs:
server_networks:
description: The networks of the deployed server
value: { get_attr: [server, networks] }

15
tests/tests.yaml Normal file
View File

@ -0,0 +1,15 @@
bootstrap: true
reset: true
virtualenv: true
makefile:
- lint
- unit_test
sources:
- ppa:juju/stable
packages:
- amulet
- python-amulet
- python-distro-info
- python-glanceclient
- python-keystoneclient
- python-novaclient