[coreycb,r=james-page] Add amulet tests for swift
This commit is contained in:
20
Makefile
20
Makefile
@@ -3,16 +3,24 @@ PYTHON := /usr/bin/env python
|
||||
|
||||
lint:
|
||||
@flake8 --exclude hooks/charmhelpers --ignore=E125 hooks
|
||||
@flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests
|
||||
@flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests tests
|
||||
@charm proof
|
||||
|
||||
test:
|
||||
@echo Starting tests...
|
||||
unit_test:
|
||||
@echo Starting unit tests...
|
||||
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
|
||||
|
||||
sync:
|
||||
@charm-helper-sync -c charm-helpers.yaml
|
||||
test:
|
||||
@echo Starting Amulet tests...
|
||||
# coreycb note: The -v should only be temporary until Amulet sends
|
||||
# raise_status() messages to stderr:
|
||||
# https://bugs.launchpad.net/amulet/+bug/1320357
|
||||
@juju test -v -p AMULET_HTTP_PROXY
|
||||
|
||||
publish: lint test
|
||||
sync:
|
||||
@charm-helper-sync -c charm-helpers-hooks.yaml
|
||||
@charm-helper-sync -c charm-helpers-tests.yaml
|
||||
|
||||
publish: lint unit_test
|
||||
bzr push lp:charms/swift-proxy
|
||||
bzr push lp:charms/trusty/swift-proxy
|
||||
|
||||
5
charm-helpers-tests.yaml
Normal file
5
charm-helpers-tests.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
branch: lp:charm-helpers
|
||||
destination: tests/charmhelpers
|
||||
include:
|
||||
- contrib.amulet
|
||||
- contrib.openstack.amulet
|
||||
@@ -62,6 +62,15 @@ def peer_units():
|
||||
return peers
|
||||
|
||||
|
||||
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
||||
'''Return a dict of peers and their private-address'''
|
||||
peers = {}
|
||||
for r_id in relation_ids(peer_relation):
|
||||
for unit in relation_list(r_id):
|
||||
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
||||
return peers
|
||||
|
||||
|
||||
def oldest_peer(peers):
|
||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||
for peer in peers:
|
||||
|
||||
@@ -4,8 +4,11 @@ from charmhelpers.contrib.amulet.deployment import (
|
||||
|
||||
|
||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
"""This class inherits from AmuletDeployment and has additional support
|
||||
that is specifically for use by OpenStack charms."""
|
||||
"""OpenStack amulet deployment.
|
||||
|
||||
This class inherits from AmuletDeployment and has additional support
|
||||
that is specifically for use by OpenStack charms.
|
||||
"""
|
||||
|
||||
def __init__(self, series=None, openstack=None, source=None):
|
||||
"""Initialize the deployment environment."""
|
||||
@@ -40,11 +43,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _get_openstack_release(self):
|
||||
"""Return an integer representing the enum value of the openstack
|
||||
release."""
|
||||
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
|
||||
self.precise_havana, self.precise_icehouse, \
|
||||
self.trusty_icehouse = range(6)
|
||||
"""Get openstack release.
|
||||
|
||||
Return an integer representing the enum value of the openstack
|
||||
release.
|
||||
"""
|
||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||
self.precise_havana, self.precise_icehouse,
|
||||
self.trusty_icehouse) = range(6)
|
||||
releases = {
|
||||
('precise', None): self.precise_essex,
|
||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||
|
||||
@@ -16,8 +16,11 @@ ERROR = logging.ERROR
|
||||
|
||||
|
||||
class OpenStackAmuletUtils(AmuletUtils):
|
||||
"""This class inherits from AmuletUtils and has additional support
|
||||
that is specifically for use by OpenStack charms."""
|
||||
"""OpenStack amulet utilities.
|
||||
|
||||
This class inherits from AmuletUtils and has additional support
|
||||
that is specifically for use by OpenStack charms.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level=ERROR):
|
||||
"""Initialize the deployment environment."""
|
||||
@@ -25,13 +28,17 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
|
||||
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||
public_port, expected):
|
||||
"""Validate actual endpoint data vs expected endpoint data. The ports
|
||||
are used to find the matching endpoint."""
|
||||
"""Validate endpoint data.
|
||||
|
||||
Validate actual endpoint data vs expected endpoint data. The ports
|
||||
are used to find the matching endpoint.
|
||||
"""
|
||||
found = False
|
||||
for ep in endpoints:
|
||||
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||
if admin_port in ep.adminurl and internal_port in ep.internalurl \
|
||||
and public_port in ep.publicurl:
|
||||
if (admin_port in ep.adminurl and
|
||||
internal_port in ep.internalurl and
|
||||
public_port in ep.publicurl):
|
||||
found = True
|
||||
actual = {'id': ep.id,
|
||||
'region': ep.region,
|
||||
@@ -47,8 +54,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return 'endpoint not found'
|
||||
|
||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||
"""Validate a list of actual service catalog endpoints vs a list of
|
||||
expected service catalog endpoints."""
|
||||
"""Validate service catalog endpoint data.
|
||||
|
||||
Validate a list of actual service catalog endpoints vs a list of
|
||||
expected service catalog endpoints.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for k, v in expected.iteritems():
|
||||
if k in actual:
|
||||
@@ -60,8 +70,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return ret
|
||||
|
||||
def validate_tenant_data(self, expected, actual):
|
||||
"""Validate a list of actual tenant data vs list of expected tenant
|
||||
data."""
|
||||
"""Validate tenant data.
|
||||
|
||||
Validate a list of actual tenant data vs list of expected tenant
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
@@ -78,8 +91,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return ret
|
||||
|
||||
def validate_role_data(self, expected, actual):
|
||||
"""Validate a list of actual role data vs a list of expected role
|
||||
data."""
|
||||
"""Validate role data.
|
||||
|
||||
Validate a list of actual role data vs a list of expected role
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
@@ -95,8 +111,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return ret
|
||||
|
||||
def validate_user_data(self, expected, actual):
|
||||
"""Validate a list of actual user data vs a list of expected user
|
||||
data."""
|
||||
"""Validate user data.
|
||||
|
||||
Validate a list of actual user data vs a list of expected user
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
@@ -114,21 +133,24 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return ret
|
||||
|
||||
def validate_flavor_data(self, expected, actual):
|
||||
"""Validate a list of actual flavors vs a list of expected flavors."""
|
||||
"""Validate flavor data.
|
||||
|
||||
Validate a list of actual flavors vs a list of expected flavors.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
act = [a.name for a in actual]
|
||||
return self._validate_list_data(expected, act)
|
||||
|
||||
def tenant_exists(self, keystone, tenant):
|
||||
"""Return True if tenant exists"""
|
||||
"""Return True if tenant exists."""
|
||||
return tenant in [t.name for t in keystone.tenants.list()]
|
||||
|
||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||
tenant):
|
||||
"""Authenticates admin user with the keystone admin endpoint."""
|
||||
service_ip = \
|
||||
keystone_sentry.relation('shared-db',
|
||||
'mysql:shared-db')['private-address']
|
||||
unit = keystone_sentry
|
||||
service_ip = unit.relation('shared-db',
|
||||
'mysql:shared-db')['private-address']
|
||||
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||
return keystone_client.Client(username=user, password=password,
|
||||
tenant_name=tenant, auth_url=ep)
|
||||
@@ -177,12 +199,40 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare', data=f)
|
||||
count = 1
|
||||
status = image.status
|
||||
while status != 'active' and count < 10:
|
||||
time.sleep(3)
|
||||
image = glance.images.get(image.id)
|
||||
status = image.status
|
||||
self.log.debug('image status: {}'.format(status))
|
||||
count += 1
|
||||
|
||||
if status != 'active':
|
||||
self.log.error('image creation timed out')
|
||||
return None
|
||||
|
||||
return image
|
||||
|
||||
def delete_image(self, glance, image):
|
||||
"""Delete the specified image."""
|
||||
num_before = len(list(glance.images.list()))
|
||||
glance.images.delete(image)
|
||||
|
||||
count = 1
|
||||
num_after = len(list(glance.images.list()))
|
||||
while num_after != (num_before - 1) and count < 10:
|
||||
time.sleep(3)
|
||||
num_after = len(list(glance.images.list()))
|
||||
self.log.debug('number of images: {}'.format(num_after))
|
||||
count += 1
|
||||
|
||||
if num_after != (num_before - 1):
|
||||
self.log.error('image deletion timed out')
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||
"""Create the specified instance."""
|
||||
image = nova.images.find(name=image_name)
|
||||
@@ -199,11 +249,27 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
self.log.debug('instance status: {}'.format(status))
|
||||
count += 1
|
||||
|
||||
if status == 'BUILD':
|
||||
if status != 'ACTIVE':
|
||||
self.log.error('instance creation timed out')
|
||||
return None
|
||||
|
||||
return instance
|
||||
|
||||
def delete_instance(self, nova, instance):
|
||||
"""Delete the specified instance."""
|
||||
num_before = len(list(nova.servers.list()))
|
||||
nova.servers.delete(instance)
|
||||
|
||||
count = 1
|
||||
num_after = len(list(nova.servers.list()))
|
||||
while num_after != (num_before - 1) and count < 10:
|
||||
time.sleep(3)
|
||||
num_after = len(list(nova.servers.list()))
|
||||
self.log.debug('number of instances: {}'.format(num_after))
|
||||
count += 1
|
||||
|
||||
if num_after != (num_before - 1):
|
||||
self.log.error('instance deletion timed out')
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@@ -753,6 +753,17 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||
return ctxt
|
||||
|
||||
|
||||
class LogLevelContext(OSContextGenerator):
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
ctxt['debug'] = \
|
||||
False if config('debug') is None else config('debug')
|
||||
ctxt['verbose'] = \
|
||||
False if config('verbose') is None else config('verbose')
|
||||
return ctxt
|
||||
|
||||
|
||||
class SyslogContext(OSContextGenerator):
|
||||
|
||||
def __call__(self):
|
||||
|
||||
11
tests/00-setup
Executable file
11
tests/00-setup
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
sudo add-apt-repository --yes ppa:juju/stable
|
||||
sudo apt-get update --yes
|
||||
sudo apt-get install --yes python-amulet
|
||||
sudo apt-get install --yes python-swiftclient
|
||||
sudo apt-get install --yes python-glanceclient
|
||||
sudo apt-get install --yes python-keystoneclient
|
||||
sudo apt-get install --yes python-novaclient
|
||||
9
tests/10-basic-precise-essex
Executable file
9
tests/10-basic-precise-essex
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Amulet tests on a basic swift-proxy deployment on precise-essex."""
|
||||
|
||||
from basic_deployment import SwiftProxyBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = SwiftProxyBasicDeployment(series='precise')
|
||||
deployment.run_tests()
|
||||
11
tests/11-basic-precise-folsom
Executable file
11
tests/11-basic-precise-folsom
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Amulet tests on a basic swift-proxy deployment on precise-folsom."""
|
||||
|
||||
from basic_deployment import SwiftProxyBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = SwiftProxyBasicDeployment(series='precise',
|
||||
openstack='cloud:precise-folsom',
|
||||
source='cloud:precise-updates/folsom')
|
||||
deployment.run_tests()
|
||||
11
tests/12-basic-precise-grizzly
Executable file
11
tests/12-basic-precise-grizzly
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Amulet tests on a basic swift-proxy deployment on precise-grizzly."""
|
||||
|
||||
from basic_deployment import SwiftProxyBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = SwiftProxyBasicDeployment(series='precise',
|
||||
openstack='cloud:precise-grizzly',
|
||||
source='cloud:precise-updates/grizzly')
|
||||
deployment.run_tests()
|
||||
11
tests/13-basic-precise-havana
Executable file
11
tests/13-basic-precise-havana
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Amulet tests on a basic swift-proxy deployment on precise-havana."""
|
||||
|
||||
from basic_deployment import SwiftProxyBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = SwiftProxyBasicDeployment(series='precise',
|
||||
openstack='cloud:precise-havana',
|
||||
source='cloud:precise-updates/havana')
|
||||
deployment.run_tests()
|
||||
11
tests/14-basic-precise-icehouse
Executable file
11
tests/14-basic-precise-icehouse
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Amulet tests on a basic swift-proxy deployment on precise-icehouse."""
|
||||
|
||||
from basic_deployment import SwiftProxyBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = SwiftProxyBasicDeployment(series='precise',
|
||||
openstack='cloud:precise-icehouse',
|
||||
source='cloud:precise-updates/icehouse')
|
||||
deployment.run_tests()
|
||||
9
tests/15-basic-trusty-icehouse
Executable file
9
tests/15-basic-trusty-icehouse
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Amulet tests on a basic swift-proxy deployment on trusty-icehouse."""
|
||||
|
||||
from basic_deployment import SwiftProxyBasicDeployment
|
||||
|
||||
if __name__ == '__main__':
|
||||
deployment = SwiftProxyBasicDeployment(series='trusty')
|
||||
deployment.run_tests()
|
||||
52
tests/README
Normal file
52
tests/README
Normal file
@@ -0,0 +1,52 @@
|
||||
This directory provides Amulet tests that focus on verification of swift-proxy
|
||||
deployments.
|
||||
|
||||
If you use a web proxy server to access the web, you'll need to set the
|
||||
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
|
||||
|
||||
The following examples demonstrate different ways that tests can be executed.
|
||||
All examples are run from the charm's root directory.
|
||||
|
||||
* To run all tests (starting with 00-setup):
|
||||
|
||||
make test
|
||||
|
||||
* To run a specific test module (or modules):
|
||||
|
||||
juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
|
||||
|
||||
* To run a specific test module (or modules), and keep the environment
|
||||
deployed after a failure:
|
||||
|
||||
juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
|
||||
|
||||
* To re-run a test module against an already deployed environment (one
|
||||
that was deployed by a previous call to 'juju test --set-e'):
|
||||
|
||||
./tests/15-basic-trusty-icehouse
|
||||
|
||||
For debugging and test development purposes, all code should be idempotent.
|
||||
In other words, the code should have the ability to be re-run without changing
|
||||
the results beyond the initial run. This enables editing and re-running of a
|
||||
test module against an already deployed environment, as described above.
|
||||
|
||||
Manual debugging tips:
|
||||
|
||||
* Set the following env vars before using the OpenStack CLI as admin:
|
||||
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
|
||||
export OS_TENANT_NAME=admin
|
||||
export OS_USERNAME=admin
|
||||
export OS_PASSWORD=openstack
|
||||
export OS_REGION_NAME=RegionOne
|
||||
|
||||
* Set the following env vars before using the OpenStack CLI as demoUser:
|
||||
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
|
||||
export OS_TENANT_NAME=demoTenant
|
||||
export OS_USERNAME=demoUser
|
||||
export OS_PASSWORD=password
|
||||
export OS_REGION_NAME=RegionOne
|
||||
|
||||
* Sample swift command:
|
||||
swift -A $OS_AUTH_URL --os-tenant-name services --os-username swift \
|
||||
--os-password password list
|
||||
(where tenant/user names and password are in swift-proxy's nova.conf file)
|
||||
827
tests/basic_deployment.py
Normal file
827
tests/basic_deployment.py
Normal file
@@ -0,0 +1,827 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import amulet
|
||||
import swiftclient
|
||||
|
||||
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||
OpenStackAmuletDeployment
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||
OpenStackAmuletUtils,
|
||||
DEBUG, # flake8: noqa
|
||||
ERROR
|
||||
)
|
||||
|
||||
# Use DEBUG to turn on debug logging
|
||||
u = OpenStackAmuletUtils(ERROR)
|
||||
|
||||
|
||||
class SwiftProxyBasicDeployment(OpenStackAmuletDeployment):
|
||||
"""Amulet tests on a basic swift-proxy deployment."""
|
||||
|
||||
def __init__(self, series, openstack=None, source=None):
|
||||
"""Deploy the entire test environment."""
|
||||
super(SwiftProxyBasicDeployment, self).__init__(series, openstack,
|
||||
source)
|
||||
self._add_services()
|
||||
self._add_relations()
|
||||
self._configure_services()
|
||||
self._deploy()
|
||||
self._initialize_tests()
|
||||
|
||||
def _add_services(self):
|
||||
"""Add the service that we're testing, including the number of units,
|
||||
where swift-proxy is local, and the other charms are from
|
||||
the charm store."""
|
||||
this_service = ('swift-proxy', 1)
|
||||
other_services = [('mysql', 1),
|
||||
('keystone', 1), ('glance', 1), ('swift-storage', 1)]
|
||||
super(SwiftProxyBasicDeployment, self)._add_services(this_service,
|
||||
other_services)
|
||||
|
||||
def _add_relations(self):
|
||||
"""Add all of the relations for the services."""
|
||||
relations = {
|
||||
'keystone:shared-db': 'mysql:shared-db',
|
||||
'swift-proxy:identity-service': 'keystone:identity-service',
|
||||
'swift-storage:swift-storage': 'swift-proxy:swift-storage',
|
||||
'glance:identity-service': 'keystone:identity-service',
|
||||
'glance:shared-db': 'mysql:shared-db',
|
||||
'glance:object-store': 'swift-proxy:object-store'
|
||||
}
|
||||
super(SwiftProxyBasicDeployment, self)._add_relations(relations)
|
||||
|
||||
def _configure_services(self):
|
||||
"""Configure all of the services."""
|
||||
keystone_config = {'admin-password': 'openstack',
|
||||
'admin-token': 'ubuntutesting'}
|
||||
swift_proxy_config = {'zone-assignment': 'manual',
|
||||
'replicas': '1',
|
||||
'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae',
|
||||
'use-https': 'no'}
|
||||
swift_storage_config = {'zone': '1',
|
||||
'block-device': 'vdb',
|
||||
'overwrite': 'true'}
|
||||
configs = {'keystone': keystone_config,
|
||||
'swift-proxy': swift_proxy_config,
|
||||
'swift-storage': swift_storage_config}
|
||||
super(SwiftProxyBasicDeployment, self)._configure_services(configs)
|
||||
|
||||
def _initialize_tests(self):
|
||||
"""Perform final initialization before tests get run."""
|
||||
# Access the sentries for inspecting service units
|
||||
self.mysql_sentry = self.d.sentry.unit['mysql/0']
|
||||
self.keystone_sentry = self.d.sentry.unit['keystone/0']
|
||||
self.glance_sentry = self.d.sentry.unit['glance/0']
|
||||
self.swift_proxy_sentry = self.d.sentry.unit['swift-proxy/0']
|
||||
self.swift_storage_sentry = self.d.sentry.unit['swift-storage/0']
|
||||
|
||||
# Authenticate admin with keystone
|
||||
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
|
||||
user='admin',
|
||||
password='openstack',
|
||||
tenant='admin')
|
||||
|
||||
# Authenticate admin with glance endpoint
|
||||
self.glance = u.authenticate_glance_admin(self.keystone)
|
||||
|
||||
# Authenticate swift user
|
||||
keystone_relation = self.keystone_sentry.relation('identity-service',
|
||||
'swift-proxy:identity-service')
|
||||
ep = self.keystone.service_catalog.url_for(service_type='identity',
|
||||
endpoint_type='publicURL')
|
||||
self.swift = swiftclient.Connection(authurl=ep,
|
||||
user=keystone_relation['service_username'],
|
||||
key=keystone_relation['service_password'],
|
||||
tenant_name=keystone_relation['service_tenant'],
|
||||
auth_version='2.0')
|
||||
|
||||
# Create a demo tenant/role/user
|
||||
self.demo_tenant = 'demoTenant'
|
||||
self.demo_role = 'demoRole'
|
||||
self.demo_user = 'demoUser'
|
||||
if not u.tenant_exists(self.keystone, self.demo_tenant):
|
||||
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
|
||||
description='demo tenant',
|
||||
enabled=True)
|
||||
self.keystone.roles.create(name=self.demo_role)
|
||||
self.keystone.users.create(name=self.demo_user,
|
||||
password='password',
|
||||
tenant_id=tenant.id,
|
||||
email='demo@demo.com')
|
||||
|
||||
# Authenticate demo user with keystone
|
||||
self.keystone_demo = \
|
||||
u.authenticate_keystone_user(self.keystone, user=self.demo_user,
|
||||
password='password',
|
||||
tenant=self.demo_tenant)
|
||||
|
||||
def test_services(self):
|
||||
"""Verify the expected services are running on the corresponding
|
||||
service units."""
|
||||
swift_storage_services = ['status swift-account',
|
||||
'status swift-account-auditor',
|
||||
'status swift-account-reaper',
|
||||
'status swift-account-replicator',
|
||||
'status swift-container',
|
||||
'status swift-container-auditor',
|
||||
'status swift-container-replicator',
|
||||
'status swift-container-updater',
|
||||
'status swift-object',
|
||||
'status swift-object-auditor',
|
||||
'status swift-object-replicator',
|
||||
'status swift-object-updater']
|
||||
if self._get_openstack_release() >= self.precise_icehouse:
|
||||
swift_storage_services.append('status swift-container-sync')
|
||||
|
||||
commands = {
|
||||
self.mysql_sentry: ['status mysql'],
|
||||
self.keystone_sentry: ['status keystone'],
|
||||
self.glance_sentry: ['status glance-registry', 'status glance-api'],
|
||||
self.swift_proxy_sentry: ['status swift-proxy'],
|
||||
self.swift_storage_sentry: swift_storage_services
|
||||
}
|
||||
|
||||
ret = u.validate_services(commands)
|
||||
if ret:
|
||||
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||
|
||||
def test_users(self):
|
||||
"""Verify all existing roles."""
|
||||
user1 = {'name': 'demoUser',
|
||||
'enabled': True,
|
||||
'tenantId': u.not_null,
|
||||
'id': u.not_null,
|
||||
'email': 'demo@demo.com'}
|
||||
user2 = {'name': 'admin',
|
||||
'enabled': True,
|
||||
'tenantId': u.not_null,
|
||||
'id': u.not_null,
|
||||
'email': 'juju@localhost'}
|
||||
user3 = {'name': 'glance',
|
||||
'enabled': True,
|
||||
'tenantId': u.not_null,
|
||||
'id': u.not_null,
|
||||
'email': u'juju@localhost'}
|
||||
user4 = {'name': 'swift',
|
||||
'enabled': True,
|
||||
'tenantId': u.not_null,
|
||||
'id': u.not_null,
|
||||
'email': u'juju@localhost'}
|
||||
expected = [user1, user2, user3, user4]
|
||||
actual = self.keystone.users.list()
|
||||
|
||||
ret = u.validate_user_data(expected, actual)
|
||||
if ret:
|
||||
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||
|
||||
def test_service_catalog(self):
|
||||
"""Verify that the service catalog endpoint data is valid."""
|
||||
endpoint_vol = {'adminURL': u.valid_url,
|
||||
'region': 'RegionOne',
|
||||
'publicURL': u.valid_url,
|
||||
'internalURL': u.valid_url}
|
||||
endpoint_id = {'adminURL': u.valid_url,
|
||||
'region': 'RegionOne',
|
||||
'publicURL': u.valid_url,
|
||||
'internalURL': u.valid_url}
|
||||
if self._get_openstack_release() >= self.precise_folsom:
|
||||
endpoint_vol['id'] = u.not_null
|
||||
endpoint_id['id'] = u.not_null
|
||||
expected = {'image': [endpoint_id], 'object-store': [endpoint_id],
|
||||
'identity': [endpoint_id]}
|
||||
actual = self.keystone_demo.service_catalog.get_endpoints()
|
||||
|
||||
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
|
||||
if ret:
|
||||
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||
|
||||
def test_openstack_object_store_endpoint(self):
|
||||
"""Verify the swift object-store endpoint data."""
|
||||
endpoints = self.keystone.endpoints.list()
|
||||
admin_port = internal_port = public_port = '8080'
|
||||
expected = {'id': u.not_null,
|
||||
'region': 'RegionOne',
|
||||
'adminurl': u.valid_url,
|
||||
'internalurl': u.valid_url,
|
||||
'publicurl': u.valid_url,
|
||||
'service_id': u.not_null}
|
||||
|
||||
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
|
||||
public_port, expected)
|
||||
if ret:
|
||||
message = 'object-store endpoint: {}'.format(ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_swift_proxy_identity_service_relation(self):
|
||||
"""Verify the swift-proxy to keystone identity-service relation data."""
|
||||
unit = self.swift_proxy_sentry
|
||||
relation = ['identity-service', 'keystone:identity-service']
|
||||
expected = {
|
||||
'service': 'swift',
|
||||
'region': 'RegionOne',
|
||||
'public_url': u.valid_url,
|
||||
'internal_url': u.valid_url,
|
||||
'private-address': u.valid_ip,
|
||||
'requested_roles': 'Member,Admin',
|
||||
'admin_url': u.valid_url
|
||||
}
|
||||
|
||||
ret = u.validate_relation_data(unit, relation, expected)
|
||||
if ret:
|
||||
message = u.relation_error('swift-proxy identity-service', ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_keystone_identity_service_relation(self):
|
||||
"""Verify the keystone to swift-proxy identity-service relation data."""
|
||||
unit = self.keystone_sentry
|
||||
relation = ['identity-service', 'swift-proxy:identity-service']
|
||||
expected = {
|
||||
'service_protocol': 'http',
|
||||
'service_tenant': 'services',
|
||||
'admin_token': 'ubuntutesting',
|
||||
'service_password': u.not_null,
|
||||
'service_port': '5000',
|
||||
'auth_port': '35357',
|
||||
'auth_protocol': 'http',
|
||||
'private-address': u.valid_ip,
|
||||
'https_keystone': 'False',
|
||||
'auth_host': u.valid_ip,
|
||||
'service_username': 'swift',
|
||||
'service_tenant_id': u.not_null,
|
||||
'service_host': u.valid_ip
|
||||
}
|
||||
|
||||
ret = u.validate_relation_data(unit, relation, expected)
|
||||
if ret:
|
||||
message = u.relation_error('keystone identity-service', ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_swift_storage_swift_storage_relation(self):
|
||||
"""Verify the swift-storage to swift-proxy swift-storage relation
|
||||
data."""
|
||||
unit = self.swift_storage_sentry
|
||||
relation = ['swift-storage', 'swift-proxy:swift-storage']
|
||||
expected = {
|
||||
'account_port': '6002',
|
||||
'zone': '1',
|
||||
'object_port': '6000',
|
||||
'container_port': '6001',
|
||||
'private-address': u.valid_ip,
|
||||
'device': 'vdb'
|
||||
}
|
||||
|
||||
ret = u.validate_relation_data(unit, relation, expected)
|
||||
if ret:
|
||||
message = u.relation_error('swift-storage swift-storage', ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_swift_proxy_swift_storage_relation(self):
|
||||
"""Verify the swift-proxy to swift-storage swift-storage relation
|
||||
data."""
|
||||
unit = self.swift_proxy_sentry
|
||||
relation = ['swift-storage', 'swift-storage:swift-storage']
|
||||
expected = {
|
||||
'private-address': u.valid_ip,
|
||||
'trigger': u.not_null,
|
||||
'rings_url': u.valid_url,
|
||||
'swift_hash': u.not_null
|
||||
}
|
||||
|
||||
ret = u.validate_relation_data(unit, relation, expected)
|
||||
if ret:
|
||||
message = u.relation_error('swift-proxy swift-storage', ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_glance_object_store_relation(self):
|
||||
"""Verify the glance to swift-proxy object-store relation data."""
|
||||
unit = self.glance_sentry
|
||||
relation = ['object-store', 'swift-proxy:object-store']
|
||||
expected = { 'private-address': u.valid_ip }
|
||||
|
||||
ret = u.validate_relation_data(unit, relation, expected)
|
||||
if ret:
|
||||
message = u.relation_error('glance object-store', ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_swift_proxy_object_store_relation(self):
|
||||
"""Verify the swift-proxy to glance object-store relation data."""
|
||||
unit = self.swift_proxy_sentry
|
||||
relation = ['object-store', 'glance:object-store']
|
||||
expected = {'private-address': u.valid_ip}
|
||||
ret = u.validate_relation_data(unit, relation, expected)
|
||||
if ret:
|
||||
message = u.relation_error('swift-proxy object-store', ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_restart_on_config_change(self):
|
||||
"""Verify that the specified services are restarted when the config
|
||||
is changed."""
|
||||
svc = 'swift-proxy'
|
||||
self.d.configure('swift-proxy', {'node-timeout': '90'})
|
||||
|
||||
if not u.service_restarted(self.swift_proxy_sentry, svc,
|
||||
'/etc/swift/proxy-server.conf'):
|
||||
msg = "service {} didn't restart after config change".format(svc)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
self.d.configure('swift-proxy', {'node-timeout': '60'})
|
||||
|
||||
def test_swift_config(self):
|
||||
"""Verify the data in the swift config file."""
|
||||
unit = self.swift_proxy_sentry
|
||||
conf = '/etc/swift/swift.conf'
|
||||
swift_proxy_relation = unit.relation('swift-storage',
|
||||
'swift-storage:swift-storage')
|
||||
expected = {
|
||||
'swift_hash_path_suffix': swift_proxy_relation['swift_hash']
|
||||
}
|
||||
|
||||
ret = u.validate_config_data(unit, conf, 'swift-hash', expected)
|
||||
if ret:
|
||||
message = "swift config error: {}".format(ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_proxy_server_icehouse_config(self):
|
||||
"""Verify the data in the proxy-server config file."""
|
||||
if self._get_openstack_release() < self.precise_icehouse:
|
||||
return
|
||||
|
||||
unit = self.swift_proxy_sentry
|
||||
conf = '/etc/swift/proxy-server.conf'
|
||||
keystone_relation = self.keystone_sentry.relation('identity-service',
|
||||
'swift-proxy:identity-service')
|
||||
swift_proxy_relation = unit.relation('identity-service',
|
||||
'keystone:identity-service')
|
||||
swift_proxy_ip = swift_proxy_relation['private-address']
|
||||
auth_host = keystone_relation['auth_host']
|
||||
auth_protocol = keystone_relation['auth_protocol']
|
||||
|
||||
expected = {
|
||||
'DEFAULT': {
|
||||
'bind_port': '8080',
|
||||
'workers': '0',
|
||||
'user': 'swift'
|
||||
},
|
||||
'pipeline:main': {
|
||||
'pipeline': 'gatekeeper healthcheck cache swift3 s3token '
|
||||
'container_sync bulk tempurl slo dlo formpost '
|
||||
'authtoken keystoneauth staticweb '
|
||||
'container-quotas account-quotas proxy-server'
|
||||
},
|
||||
'app:proxy-server': {
|
||||
'use': 'egg:swift#proxy',
|
||||
'allow_account_management': 'true',
|
||||
'account_autocreate': 'true',
|
||||
'node_timeout': '60',
|
||||
'recoverable_node_timeout': '30'
|
||||
},
|
||||
'filter:tempauth': {
|
||||
'use': 'egg:swift#tempauth',
|
||||
'user_system_root': 'testpass .admin https://{}:8080/v1/'
|
||||
'AUTH_system'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
|
||||
'filter:cache': {
|
||||
'use': 'egg:swift#memcache',
|
||||
'memcache_servers': '{}:11211'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
|
||||
'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
|
||||
'filter:staticweb': {'use': 'egg:swift#staticweb'},
|
||||
'filter:bulk': {'use': 'egg:swift#bulk'},
|
||||
'filter:slo': {'use': 'egg:swift#slo'},
|
||||
'filter:dlo': {'use': 'egg:swift#dlo'},
|
||||
'filter:formpost': {'use': 'egg:swift#formpost'},
|
||||
'filter:tempurl': {'use': 'egg:swift#tempurl'},
|
||||
'filter:container_sync': {'use': 'egg:swift#container_sync'},
|
||||
'filter:gatekeeper': {'use': 'egg:swift#gatekeeper'},
|
||||
'filter:keystoneauth': {
|
||||
'use': 'egg:swift#keystoneauth',
|
||||
'operator_roles': 'Member,Admin'
|
||||
},
|
||||
'filter:authtoken': {
|
||||
'paste.filter_factory': 'keystoneclient.middleware.'
|
||||
'auth_token:filter_factory',
|
||||
'auth_host': auth_host,
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_protocol': auth_protocol,
|
||||
'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
|
||||
keystone_relation['service_port']),
|
||||
'admin_tenant_name': keystone_relation['service_tenant'],
|
||||
'admin_user': keystone_relation['service_username'],
|
||||
'admin_password': keystone_relation['service_password'],
|
||||
'delay_auth_decision': 'true',
|
||||
'signing_dir': '/etc/swift',
|
||||
'cache': 'swift.cache'
|
||||
},
|
||||
'filter:s3token': {
|
||||
'paste.filter_factory': 'keystoneclient.middleware.'
|
||||
's3_token:filter_factory',
|
||||
'service_host': keystone_relation['service_host'],
|
||||
'service_port': keystone_relation['service_port'],
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_host': keystone_relation['auth_host'],
|
||||
'auth_protocol': keystone_relation['auth_protocol'],
|
||||
'auth_token': keystone_relation['admin_token'],
|
||||
'admin_token': keystone_relation['admin_token']
|
||||
},
|
||||
'filter:swift3': {'use': 'egg:swift3#swift3'}
|
||||
}
|
||||
|
||||
for section, pairs in expected.iteritems():
|
||||
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||
if ret:
|
||||
message = "proxy-server config error: {}".format(ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_proxy_server_havana_config(self):
|
||||
"""Verify the data in the proxy-server config file."""
|
||||
if self._get_openstack_release() != self.precise_havana:
|
||||
return
|
||||
|
||||
unit = self.swift_proxy_sentry
|
||||
conf = '/etc/swift/proxy-server.conf'
|
||||
keystone_relation = self.keystone_sentry.relation('identity-service',
|
||||
'swift-proxy:identity-service')
|
||||
swift_proxy_relation = unit.relation('identity-service',
|
||||
'keystone:identity-service')
|
||||
swift_proxy_ip = swift_proxy_relation['private-address']
|
||||
auth_host = keystone_relation['auth_host']
|
||||
auth_protocol = keystone_relation['auth_protocol']
|
||||
|
||||
expected = {
|
||||
'DEFAULT': {
|
||||
'bind_port': '8080',
|
||||
'workers': '0',
|
||||
'user': 'swift'
|
||||
},
|
||||
'pipeline:main': {
|
||||
'pipeline': 'healthcheck cache swift3 authtoken '
|
||||
'keystoneauth container-quotas account-quotas '
|
||||
'proxy-server'
|
||||
},
|
||||
'app:proxy-server': {
|
||||
'use': 'egg:swift#proxy',
|
||||
'allow_account_management': 'true',
|
||||
'account_autocreate': 'true',
|
||||
'node_timeout': '60',
|
||||
'recoverable_node_timeout': '30'
|
||||
},
|
||||
'filter:tempauth': {
|
||||
'use': 'egg:swift#tempauth',
|
||||
'user_system_root': 'testpass .admin https://{}:8080/v1/'
|
||||
'AUTH_system'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
|
||||
'filter:cache': {
|
||||
'use': 'egg:swift#memcache',
|
||||
'memcache_servers': '{}:11211'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
|
||||
'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
|
||||
'filter:keystoneauth': {
|
||||
'use': 'egg:swift#keystoneauth',
|
||||
'operator_roles': 'Member,Admin'
|
||||
},
|
||||
'filter:authtoken': {
|
||||
'paste.filter_factory': 'keystoneclient.middleware.'
|
||||
'auth_token:filter_factory',
|
||||
'auth_host': auth_host,
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_protocol': auth_protocol,
|
||||
'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
|
||||
keystone_relation['service_port']),
|
||||
'admin_tenant_name': keystone_relation['service_tenant'],
|
||||
'admin_user': keystone_relation['service_username'],
|
||||
'admin_password': keystone_relation['service_password'],
|
||||
'delay_auth_decision': 'true',
|
||||
'signing_dir': '/etc/swift',
|
||||
'cache': 'swift.cache'
|
||||
},
|
||||
'filter:s3token': {
|
||||
'paste.filter_factory': 'keystone.middleware.s3_token:'
|
||||
'filter_factory',
|
||||
'service_host': keystone_relation['service_host'],
|
||||
'service_port': keystone_relation['service_port'],
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_host': keystone_relation['auth_host'],
|
||||
'auth_protocol': keystone_relation['auth_protocol'],
|
||||
'auth_token': keystone_relation['admin_token'],
|
||||
'admin_token': keystone_relation['admin_token'],
|
||||
'service_protocol': keystone_relation['service_protocol']
|
||||
},
|
||||
'filter:swift3': {'use': 'egg:swift3#swift3'}
|
||||
}
|
||||
|
||||
for section, pairs in expected.iteritems():
|
||||
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||
if ret:
|
||||
message = "proxy-server config error: {}".format(ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_proxy_server_grizzly_config(self):
|
||||
"""Verify the data in the proxy-server config file."""
|
||||
if self._get_openstack_release() != self.precise_grizzly:
|
||||
return
|
||||
|
||||
unit = self.swift_proxy_sentry
|
||||
conf = '/etc/swift/proxy-server.conf'
|
||||
keystone_relation = self.keystone_sentry.relation('identity-service',
|
||||
'swift-proxy:identity-service')
|
||||
swift_proxy_relation = unit.relation('identity-service',
|
||||
'keystone:identity-service')
|
||||
swift_proxy_ip = swift_proxy_relation['private-address']
|
||||
auth_host = keystone_relation['auth_host']
|
||||
auth_protocol = keystone_relation['auth_protocol']
|
||||
|
||||
expected = {
|
||||
'DEFAULT': {
|
||||
'bind_port': '8080',
|
||||
'workers': '0',
|
||||
'user': 'swift'
|
||||
},
|
||||
'pipeline:main': {
|
||||
'pipeline': 'healthcheck cache swift3 s3token authtoken '
|
||||
'keystone container-quotas account-quotas '
|
||||
'proxy-server'
|
||||
},
|
||||
'app:proxy-server': {
|
||||
'use': 'egg:swift#proxy',
|
||||
'allow_account_management': 'true',
|
||||
'account_autocreate': 'true',
|
||||
'node_timeout': '60',
|
||||
'recoverable_node_timeout': '30'
|
||||
},
|
||||
'filter:tempauth': {
|
||||
'use': 'egg:swift#tempauth',
|
||||
'user_system_root': 'testpass .admin https://{}:8080/v1/'
|
||||
'AUTH_system'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
|
||||
'filter:cache': {
|
||||
'use': 'egg:swift#memcache',
|
||||
'memcache_servers': '{}:11211'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
|
||||
'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
|
||||
'filter:keystone': {
|
||||
'paste.filter_factory': 'swift.common.middleware.'
|
||||
'keystoneauth:filter_factory',
|
||||
'operator_roles': 'Member,Admin'
|
||||
},
|
||||
'filter:authtoken': {
|
||||
'paste.filter_factory': 'keystone.middleware.auth_token:'
|
||||
'filter_factory',
|
||||
'auth_host': auth_host,
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_protocol': auth_protocol,
|
||||
'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
|
||||
keystone_relation['service_port']),
|
||||
'admin_tenant_name': keystone_relation['service_tenant'],
|
||||
'admin_user': keystone_relation['service_username'],
|
||||
'admin_password': keystone_relation['service_password'],
|
||||
'delay_auth_decision': 'true',
|
||||
'signing_dir': '/etc/swift'
|
||||
},
|
||||
'filter:s3token': {
|
||||
'paste.filter_factory': 'keystone.middleware.s3_token:'
|
||||
'filter_factory',
|
||||
'service_host': keystone_relation['service_host'],
|
||||
'service_port': keystone_relation['service_port'],
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_host': keystone_relation['auth_host'],
|
||||
'auth_protocol': keystone_relation['auth_protocol'],
|
||||
'auth_token': keystone_relation['admin_token'],
|
||||
'admin_token': keystone_relation['admin_token'],
|
||||
'service_protocol': keystone_relation['service_protocol']
|
||||
},
|
||||
'filter:swift3': {'use': 'egg:swift3#swift3'}
|
||||
}
|
||||
|
||||
for section, pairs in expected.iteritems():
|
||||
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||
if ret:
|
||||
message = "proxy-server config error: {}".format(ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_proxy_server_folsom_config(self):
|
||||
"""Verify the data in the proxy-server config file."""
|
||||
if self._get_openstack_release() != self.precise_folsom:
|
||||
return
|
||||
|
||||
unit = self.swift_proxy_sentry
|
||||
conf = '/etc/swift/proxy-server.conf'
|
||||
keystone_relation = self.keystone_sentry.relation('identity-service',
|
||||
'swift-proxy:identity-service')
|
||||
swift_proxy_relation = unit.relation('identity-service',
|
||||
'keystone:identity-service')
|
||||
swift_proxy_ip = swift_proxy_relation['private-address']
|
||||
auth_host = keystone_relation['auth_host']
|
||||
auth_protocol = keystone_relation['auth_protocol']
|
||||
|
||||
expected = {
|
||||
'DEFAULT': {
|
||||
'bind_port': '8080',
|
||||
'workers': '0',
|
||||
'user': 'swift'
|
||||
},
|
||||
'pipeline:main': {
|
||||
'pipeline': 'healthcheck cache swift3 s3token authtoken '
|
||||
'keystone proxy-server'
|
||||
},
|
||||
'app:proxy-server': {
|
||||
'use': 'egg:swift#proxy',
|
||||
'allow_account_management': 'true',
|
||||
'account_autocreate': 'true',
|
||||
'node_timeout': '60',
|
||||
'recoverable_node_timeout': '30'
|
||||
},
|
||||
'filter:tempauth': {
|
||||
'use': 'egg:swift#tempauth',
|
||||
'user_system_root': 'testpass .admin https://{}:8080/v1/'
|
||||
'AUTH_system'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
|
||||
'filter:cache': {
|
||||
'use': 'egg:swift#memcache',
|
||||
'memcache_servers': '{}:11211'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:keystone': {
|
||||
'paste.filter_factory': 'keystone.middleware.swift_auth:'
|
||||
'filter_factory',
|
||||
'operator_roles': 'Member,Admin'
|
||||
},
|
||||
'filter:authtoken': {
|
||||
'paste.filter_factory': 'keystone.middleware.auth_token:'
|
||||
'filter_factory',
|
||||
'auth_host': auth_host,
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_protocol': auth_protocol,
|
||||
'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
|
||||
keystone_relation['service_port']),
|
||||
'admin_tenant_name': keystone_relation['service_tenant'],
|
||||
'admin_user': keystone_relation['service_username'],
|
||||
'admin_password': keystone_relation['service_password'],
|
||||
'delay_auth_decision': '1'
|
||||
},
|
||||
'filter:s3token': {
|
||||
'paste.filter_factory': 'keystone.middleware.s3_token:'
|
||||
'filter_factory',
|
||||
'service_host': keystone_relation['service_host'],
|
||||
'service_port': keystone_relation['service_port'],
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_host': keystone_relation['auth_host'],
|
||||
'auth_protocol': keystone_relation['auth_protocol'],
|
||||
'auth_token': keystone_relation['admin_token'],
|
||||
'admin_token': keystone_relation['admin_token'],
|
||||
'service_protocol': keystone_relation['service_protocol']
|
||||
},
|
||||
'filter:swift3': {'use': 'egg:swift#swift3'}
|
||||
}
|
||||
|
||||
for section, pairs in expected.iteritems():
|
||||
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||
if ret:
|
||||
message = "proxy-server config error: {}".format(ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_proxy_server_essex_config(self):
|
||||
"""Verify the data in the proxy-server config file."""
|
||||
if self._get_openstack_release() != self.precise_essex:
|
||||
return
|
||||
|
||||
unit = self.swift_proxy_sentry
|
||||
conf = '/etc/swift/proxy-server.conf'
|
||||
keystone_relation = self.keystone_sentry.relation('identity-service',
|
||||
'swift-proxy:identity-service')
|
||||
swift_proxy_relation = unit.relation('identity-service',
|
||||
'keystone:identity-service')
|
||||
swift_proxy_ip = swift_proxy_relation['private-address']
|
||||
auth_host = keystone_relation['auth_host']
|
||||
auth_protocol = keystone_relation['auth_protocol']
|
||||
|
||||
expected = {
|
||||
'DEFAULT': {
|
||||
'bind_port': '8080',
|
||||
'workers': '0',
|
||||
'user': 'swift'
|
||||
},
|
||||
'pipeline:main': {
|
||||
'pipeline': 'healthcheck cache swift3 s3token authtoken '
|
||||
'keystone proxy-server'
|
||||
},
|
||||
'app:proxy-server': {
|
||||
'use': 'egg:swift#proxy',
|
||||
'allow_account_management': 'true',
|
||||
'account_autocreate': 'true',
|
||||
'node_timeout': '60',
|
||||
'recoverable_node_timeout': '30'
|
||||
},
|
||||
'filter:tempauth': {
|
||||
'use': 'egg:swift#tempauth',
|
||||
'user_system_root': 'testpass .admin https://{}:8080/v1/'
|
||||
'AUTH_system'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
|
||||
'filter:cache': {
|
||||
'use': 'egg:swift#memcache',
|
||||
'memcache_servers': '{}:11211'.format(swift_proxy_ip)
|
||||
},
|
||||
'filter:keystone': {
|
||||
'paste.filter_factory': 'keystone.middleware.swift_auth:'
|
||||
'filter_factory',
|
||||
'operator_roles': 'Member,Admin'
|
||||
},
|
||||
'filter:authtoken': {
|
||||
'paste.filter_factory': 'keystone.middleware.auth_token:'
|
||||
'filter_factory',
|
||||
'auth_host': auth_host,
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_protocol': auth_protocol,
|
||||
'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
|
||||
keystone_relation['service_port']),
|
||||
'admin_tenant_name': keystone_relation['service_tenant'],
|
||||
'admin_user': keystone_relation['service_username'],
|
||||
'admin_password': keystone_relation['service_password'],
|
||||
'delay_auth_decision': '1'
|
||||
},
|
||||
'filter:s3token': {
|
||||
'paste.filter_factory': 'keystone.middleware.s3_token:'
|
||||
'filter_factory',
|
||||
'service_host': keystone_relation['service_host'],
|
||||
'service_port': keystone_relation['service_port'],
|
||||
'auth_port': keystone_relation['auth_port'],
|
||||
'auth_host': keystone_relation['auth_host'],
|
||||
'auth_protocol': keystone_relation['auth_protocol'],
|
||||
'auth_token': keystone_relation['admin_token'],
|
||||
'admin_token': keystone_relation['admin_token'],
|
||||
'service_protocol': keystone_relation['service_protocol']
|
||||
},
|
||||
'filter:swift3': {'use': 'egg:swift#swift3'}
|
||||
}
|
||||
|
||||
for section, pairs in expected.iteritems():
|
||||
ret = u.validate_config_data(unit, conf, section, pairs)
|
||||
if ret:
|
||||
message = "proxy-server config error: {}".format(ret)
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
def test_image_create(self):
|
||||
"""Create an instance in glance, which is backed by swift, and validate
|
||||
that some of the metadata for the image match in glance and swift."""
|
||||
# NOTE(coreycb): Skipping failing test on folsom until resolved. On
|
||||
# folsom only, uploading an image to glance gets 400 Bad
|
||||
# Request - Error uploading image: (error): [Errno 111]
|
||||
# ECONNREFUSED (HTTP 400)
|
||||
if self._get_openstack_release() == self.precise_folsom:
|
||||
u.log.error("Skipping failing test until resolved")
|
||||
return
|
||||
|
||||
# Create glance image
|
||||
image = u.create_cirros_image(self.glance, "cirros-image")
|
||||
if not image:
|
||||
amulet.raise_status(amulet.FAIL, msg="Image create failed")
|
||||
|
||||
# Validate that cirros image exists in glance and get its checksum/size
|
||||
images = list(self.glance.images.list())
|
||||
if len(images) != 1:
|
||||
msg = "Expected 1 glance image, found {}".format(len(images))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
if images[0].name != 'cirros-image':
|
||||
message = "cirros image does not exist"
|
||||
amulet.raise_status(amulet.FAIL, msg=message)
|
||||
|
||||
glance_image_md5 = image.checksum
|
||||
glance_image_size = image.size
|
||||
|
||||
# Validate that swift object's checksum/size match that from glance
|
||||
headers, containers = self.swift.get_account()
|
||||
if len(containers) != 1:
|
||||
msg = "Expected 1 swift container, found {}".format(len(containers))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
container_name = containers[0].get('name')
|
||||
|
||||
headers, objects = self.swift.get_container(container_name)
|
||||
if len(objects) != 1:
|
||||
msg = "Expected 1 swift object, found {}".format(len(objects))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
swift_object_size = objects[0].get('bytes')
|
||||
swift_object_md5 = objects[0].get('hash')
|
||||
|
||||
if glance_image_size != swift_object_size:
|
||||
msg = "Glance image size {} != swift object size {}".format( \
|
||||
glance_image_size, swift_object_size)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
if glance_image_md5 != swift_object_md5:
|
||||
msg = "Glance image hash {} != swift object hash {}".format( \
|
||||
glance_image_md5, swift_object_md5)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
# Cleanup
|
||||
u.delete_image(self.glance, image)
|
||||
0
tests/charmhelpers/__init__.py
Normal file
0
tests/charmhelpers/__init__.py
Normal file
0
tests/charmhelpers/contrib/__init__.py
Normal file
0
tests/charmhelpers/contrib/__init__.py
Normal file
0
tests/charmhelpers/contrib/amulet/__init__.py
Normal file
0
tests/charmhelpers/contrib/amulet/__init__.py
Normal file
71
tests/charmhelpers/contrib/amulet/deployment.py
Normal file
71
tests/charmhelpers/contrib/amulet/deployment.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import amulet
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class AmuletDeployment(object):
|
||||
"""Amulet deployment.
|
||||
|
||||
This class provides generic Amulet deployment and test runner
|
||||
methods.
|
||||
"""
|
||||
|
||||
def __init__(self, series=None):
|
||||
"""Initialize the deployment environment."""
|
||||
self.series = None
|
||||
|
||||
if series:
|
||||
self.series = series
|
||||
self.d = amulet.Deployment(series=self.series)
|
||||
else:
|
||||
self.d = amulet.Deployment()
|
||||
|
||||
def _add_services(self, this_service, other_services):
|
||||
"""Add services.
|
||||
|
||||
Add services to the deployment where this_service is the local charm
|
||||
that we're focused on testing and other_services are the other
|
||||
charms that come from the charm store.
|
||||
"""
|
||||
name, units = range(2)
|
||||
|
||||
if this_service[name] != os.path.basename(os.getcwd()):
|
||||
s = this_service[name]
|
||||
msg = "The charm's root directory name needs to be {}".format(s)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
self.d.add(this_service[name], units=this_service[units])
|
||||
|
||||
for svc in other_services:
|
||||
if self.series:
|
||||
self.d.add(svc[name],
|
||||
charm='cs:{}/{}'.format(self.series, svc[name]),
|
||||
units=svc[units])
|
||||
else:
|
||||
self.d.add(svc[name], units=svc[units])
|
||||
|
||||
def _add_relations(self, relations):
|
||||
"""Add all of the relations for the services."""
|
||||
for k, v in relations.iteritems():
|
||||
self.d.relate(k, v)
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
for service, config in configs.iteritems():
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _deploy(self):
|
||||
"""Deploy environment and wait for all hooks to finish executing."""
|
||||
try:
|
||||
self.d.setup()
|
||||
self.d.sentry.wait(timeout=900)
|
||||
except amulet.helpers.TimeoutError:
|
||||
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
def run_tests(self):
|
||||
"""Run all of the methods that are prefixed with 'test_'."""
|
||||
for test in dir(self):
|
||||
if test.startswith('test_'):
|
||||
getattr(self, test)()
|
||||
176
tests/charmhelpers/contrib/amulet/utils.py
Normal file
176
tests/charmhelpers/contrib/amulet/utils.py
Normal file
@@ -0,0 +1,176 @@
|
||||
import ConfigParser
|
||||
import io
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
class AmuletUtils(object):
|
||||
"""Amulet utilities.
|
||||
|
||||
This class provides common utility functions that are used by Amulet
|
||||
tests.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level=logging.ERROR):
|
||||
self.log = self.get_logger(level=log_level)
|
||||
|
||||
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
|
||||
"""Get a logger object that will log to stdout."""
|
||||
log = logging
|
||||
logger = log.getLogger(name)
|
||||
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||
"%(levelname)s: %(message)s")
|
||||
|
||||
handler = log.StreamHandler(stream=sys.stdout)
|
||||
handler.setLevel(level)
|
||||
handler.setFormatter(fmt)
|
||||
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(level)
|
||||
|
||||
return logger
|
||||
|
||||
def valid_ip(self, ip):
|
||||
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def valid_url(self, url):
|
||||
p = re.compile(
|
||||
r'^(?:http|ftp)s?://'
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
|
||||
r'localhost|'
|
||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
|
||||
r'(?::\d+)?'
|
||||
r'(?:/?|[/?]\S+)$',
|
||||
re.IGNORECASE)
|
||||
if p.match(url):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def validate_services(self, commands):
|
||||
"""Validate services.
|
||||
|
||||
Verify the specified services are running on the corresponding
|
||||
service units.
|
||||
"""
|
||||
for k, v in commands.iteritems():
|
||||
for cmd in v:
|
||||
output, code = k.run(cmd)
|
||||
if code != 0:
|
||||
return "command `{}` returned {}".format(cmd, str(code))
|
||||
return None
|
||||
|
||||
def _get_config(self, unit, filename):
|
||||
"""Get a ConfigParser object for parsing a unit's config file."""
|
||||
file_contents = unit.file_contents(filename)
|
||||
config = ConfigParser.ConfigParser()
|
||||
config.readfp(io.StringIO(file_contents))
|
||||
return config
|
||||
|
||||
def validate_config_data(self, sentry_unit, config_file, section,
|
||||
expected):
|
||||
"""Validate config file data.
|
||||
|
||||
Verify that the specified section of the config file contains
|
||||
the expected option key:value pairs.
|
||||
"""
|
||||
config = self._get_config(sentry_unit, config_file)
|
||||
|
||||
if section != 'DEFAULT' and not config.has_section(section):
|
||||
return "section [{}] does not exist".format(section)
|
||||
|
||||
for k in expected.keys():
|
||||
if not config.has_option(section, k):
|
||||
return "section [{}] is missing option {}".format(section, k)
|
||||
if config.get(section, k) != expected[k]:
|
||||
return "section [{}] {}:{} != expected {}:{}".format(
|
||||
section, k, config.get(section, k), k, expected[k])
|
||||
return None
|
||||
|
||||
def _validate_dict_data(self, expected, actual):
|
||||
"""Validate dictionary data.
|
||||
|
||||
Compare expected dictionary data vs actual dictionary data.
|
||||
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||
longs, or can be a function that evaluate a variable and returns a
|
||||
bool.
|
||||
"""
|
||||
for k, v in expected.iteritems():
|
||||
if k in actual:
|
||||
if (isinstance(v, basestring) or
|
||||
isinstance(v, bool) or
|
||||
isinstance(v, (int, long))):
|
||||
if v != actual[k]:
|
||||
return "{}:{}".format(k, actual[k])
|
||||
elif not v(actual[k]):
|
||||
return "{}:{}".format(k, actual[k])
|
||||
else:
|
||||
return "key '{}' does not exist".format(k)
|
||||
return None
|
||||
|
||||
def validate_relation_data(self, sentry_unit, relation, expected):
|
||||
"""Validate actual relation data based on expected relation data."""
|
||||
actual = sentry_unit.relation(relation[0], relation[1])
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
return self._validate_dict_data(expected, actual)
|
||||
|
||||
def _validate_list_data(self, expected, actual):
|
||||
"""Compare expected list vs actual list data."""
|
||||
for e in expected:
|
||||
if e not in actual:
|
||||
return "expected item {} not found in actual list".format(e)
|
||||
return None
|
||||
|
||||
def not_null(self, string):
|
||||
if string is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _get_file_mtime(self, sentry_unit, filename):
|
||||
"""Get last modification time of file."""
|
||||
return sentry_unit.file_stat(filename)['mtime']
|
||||
|
||||
def _get_dir_mtime(self, sentry_unit, directory):
|
||||
"""Get last modification time of directory."""
|
||||
return sentry_unit.directory_stat(directory)['mtime']
|
||||
|
||||
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
|
||||
"""Get process' start time.
|
||||
|
||||
Determine start time of the process based on the last modification
|
||||
time of the /proc/pid directory. If pgrep_full is True, the process
|
||||
name is matched against the full command line.
|
||||
"""
|
||||
if pgrep_full:
|
||||
cmd = 'pgrep -o -f {}'.format(service)
|
||||
else:
|
||||
cmd = 'pgrep -o {}'.format(service)
|
||||
proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
|
||||
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||
|
||||
def service_restarted(self, sentry_unit, service, filename,
|
||||
pgrep_full=False, sleep_time=20):
|
||||
"""Check if service was restarted.
|
||||
|
||||
Compare a service's start time vs a file's last modification time
|
||||
(such as a config file for that service) to determine if the service
|
||||
has been restarted.
|
||||
"""
|
||||
time.sleep(sleep_time)
|
||||
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||
self._get_file_mtime(sentry_unit, filename)):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def relation_error(self, name, data):
|
||||
return 'unexpected relation data in {} - {}'.format(name, data)
|
||||
|
||||
def endpoint_error(self, name, data):
|
||||
return 'unexpected endpoint data in {} - {}'.format(name, data)
|
||||
0
tests/charmhelpers/contrib/openstack/__init__.py
Normal file
0
tests/charmhelpers/contrib/openstack/__init__.py
Normal file
61
tests/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
61
tests/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
|
||||
|
||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
"""OpenStack amulet deployment.
|
||||
|
||||
This class inherits from AmuletDeployment and has additional support
|
||||
that is specifically for use by OpenStack charms.
|
||||
"""
|
||||
|
||||
def __init__(self, series=None, openstack=None, source=None):
|
||||
"""Initialize the deployment environment."""
|
||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||
self.openstack = openstack
|
||||
self.source = source
|
||||
|
||||
def _add_services(self, this_service, other_services):
|
||||
"""Add services to the deployment and set openstack-origin."""
|
||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||
other_services)
|
||||
name = 0
|
||||
services = other_services
|
||||
services.append(this_service)
|
||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
|
||||
|
||||
if self.openstack:
|
||||
for svc in services:
|
||||
if svc[name] not in use_source:
|
||||
config = {'openstack-origin': self.openstack}
|
||||
self.d.configure(svc[name], config)
|
||||
|
||||
if self.source:
|
||||
for svc in services:
|
||||
if svc[name] in use_source:
|
||||
config = {'source': self.source}
|
||||
self.d.configure(svc[name], config)
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
for service, config in configs.iteritems():
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _get_openstack_release(self):
|
||||
"""Get openstack release.
|
||||
|
||||
Return an integer representing the enum value of the openstack
|
||||
release.
|
||||
"""
|
||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||
self.precise_havana, self.precise_icehouse,
|
||||
self.trusty_icehouse) = range(6)
|
||||
releases = {
|
||||
('precise', None): self.precise_essex,
|
||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
||||
('precise', 'cloud:precise-havana'): self.precise_havana,
|
||||
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
||||
('trusty', None): self.trusty_icehouse}
|
||||
return releases[(self.series, self.openstack)]
|
||||
275
tests/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
275
tests/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
@@ -0,0 +1,275 @@
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import urllib
|
||||
|
||||
import glanceclient.v1.client as glance_client
|
||||
import keystoneclient.v2_0 as keystone_client
|
||||
import novaclient.v1_1.client as nova_client
|
||||
|
||||
from charmhelpers.contrib.amulet.utils import (
|
||||
AmuletUtils
|
||||
)
|
||||
|
||||
DEBUG = logging.DEBUG
|
||||
ERROR = logging.ERROR
|
||||
|
||||
|
||||
class OpenStackAmuletUtils(AmuletUtils):
|
||||
"""OpenStack amulet utilities.
|
||||
|
||||
This class inherits from AmuletUtils and has additional support
|
||||
that is specifically for use by OpenStack charms.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level=ERROR):
|
||||
"""Initialize the deployment environment."""
|
||||
super(OpenStackAmuletUtils, self).__init__(log_level)
|
||||
|
||||
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||
public_port, expected):
|
||||
"""Validate endpoint data.
|
||||
|
||||
Validate actual endpoint data vs expected endpoint data. The ports
|
||||
are used to find the matching endpoint.
|
||||
"""
|
||||
found = False
|
||||
for ep in endpoints:
|
||||
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||
if (admin_port in ep.adminurl and
|
||||
internal_port in ep.internalurl and
|
||||
public_port in ep.publicurl):
|
||||
found = True
|
||||
actual = {'id': ep.id,
|
||||
'region': ep.region,
|
||||
'adminurl': ep.adminurl,
|
||||
'internalurl': ep.internalurl,
|
||||
'publicurl': ep.publicurl,
|
||||
'service_id': ep.service_id}
|
||||
ret = self._validate_dict_data(expected, actual)
|
||||
if ret:
|
||||
return 'unexpected endpoint data - {}'.format(ret)
|
||||
|
||||
if not found:
|
||||
return 'endpoint not found'
|
||||
|
||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||
"""Validate service catalog endpoint data.
|
||||
|
||||
Validate a list of actual service catalog endpoints vs a list of
|
||||
expected service catalog endpoints.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for k, v in expected.iteritems():
|
||||
if k in actual:
|
||||
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||
if ret:
|
||||
return self.endpoint_error(k, ret)
|
||||
else:
|
||||
return "endpoint {} does not exist".format(k)
|
||||
return ret
|
||||
|
||||
def validate_tenant_data(self, expected, actual):
|
||||
"""Validate tenant data.
|
||||
|
||||
Validate a list of actual tenant data vs list of expected tenant
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
for act in actual:
|
||||
a = {'enabled': act.enabled, 'description': act.description,
|
||||
'name': act.name, 'id': act.id}
|
||||
if e['name'] == a['name']:
|
||||
found = True
|
||||
ret = self._validate_dict_data(e, a)
|
||||
if ret:
|
||||
return "unexpected tenant data - {}".format(ret)
|
||||
if not found:
|
||||
return "tenant {} does not exist".format(e['name'])
|
||||
return ret
|
||||
|
||||
def validate_role_data(self, expected, actual):
|
||||
"""Validate role data.
|
||||
|
||||
Validate a list of actual role data vs a list of expected role
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
for act in actual:
|
||||
a = {'name': act.name, 'id': act.id}
|
||||
if e['name'] == a['name']:
|
||||
found = True
|
||||
ret = self._validate_dict_data(e, a)
|
||||
if ret:
|
||||
return "unexpected role data - {}".format(ret)
|
||||
if not found:
|
||||
return "role {} does not exist".format(e['name'])
|
||||
return ret
|
||||
|
||||
def validate_user_data(self, expected, actual):
|
||||
"""Validate user data.
|
||||
|
||||
Validate a list of actual user data vs a list of expected user
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
for act in actual:
|
||||
a = {'enabled': act.enabled, 'name': act.name,
|
||||
'email': act.email, 'tenantId': act.tenantId,
|
||||
'id': act.id}
|
||||
if e['name'] == a['name']:
|
||||
found = True
|
||||
ret = self._validate_dict_data(e, a)
|
||||
if ret:
|
||||
return "unexpected user data - {}".format(ret)
|
||||
if not found:
|
||||
return "user {} does not exist".format(e['name'])
|
||||
return ret
|
||||
|
||||
def validate_flavor_data(self, expected, actual):
|
||||
"""Validate flavor data.
|
||||
|
||||
Validate a list of actual flavors vs a list of expected flavors.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
act = [a.name for a in actual]
|
||||
return self._validate_list_data(expected, act)
|
||||
|
||||
def tenant_exists(self, keystone, tenant):
|
||||
"""Return True if tenant exists."""
|
||||
return tenant in [t.name for t in keystone.tenants.list()]
|
||||
|
||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||
tenant):
|
||||
"""Authenticates admin user with the keystone admin endpoint."""
|
||||
unit = keystone_sentry
|
||||
service_ip = unit.relation('shared-db',
|
||||
'mysql:shared-db')['private-address']
|
||||
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||
return keystone_client.Client(username=user, password=password,
|
||||
tenant_name=tenant, auth_url=ep)
|
||||
|
||||
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||
"""Authenticates a regular user with the keystone public endpoint."""
|
||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||
endpoint_type='publicURL')
|
||||
return keystone_client.Client(username=user, password=password,
|
||||
tenant_name=tenant, auth_url=ep)
|
||||
|
||||
def authenticate_glance_admin(self, keystone):
|
||||
"""Authenticates admin user with glance."""
|
||||
ep = keystone.service_catalog.url_for(service_type='image',
|
||||
endpoint_type='adminURL')
|
||||
return glance_client.Client(ep, token=keystone.auth_token)
|
||||
|
||||
def authenticate_nova_user(self, keystone, user, password, tenant):
|
||||
"""Authenticates a regular user with nova-api."""
|
||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||
endpoint_type='publicURL')
|
||||
return nova_client.Client(username=user, api_key=password,
|
||||
project_id=tenant, auth_url=ep)
|
||||
|
||||
def create_cirros_image(self, glance, image_name):
|
||||
"""Download the latest cirros image and upload it to glance."""
|
||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||
if http_proxy:
|
||||
proxies = {'http': http_proxy}
|
||||
opener = urllib.FancyURLopener(proxies)
|
||||
else:
|
||||
opener = urllib.FancyURLopener()
|
||||
|
||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
||||
version = f.read().strip()
|
||||
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
|
||||
|
||||
if not os.path.exists(cirros_img):
|
||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
||||
version, cirros_img)
|
||||
opener.retrieve(cirros_url, cirros_img)
|
||||
f.close()
|
||||
|
||||
with open(cirros_img) as f:
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare', data=f)
|
||||
count = 1
|
||||
status = image.status
|
||||
while status != 'active' and count < 10:
|
||||
time.sleep(3)
|
||||
image = glance.images.get(image.id)
|
||||
status = image.status
|
||||
self.log.debug('image status: {}'.format(status))
|
||||
count += 1
|
||||
|
||||
if status != 'active':
|
||||
self.log.error('image creation timed out')
|
||||
return None
|
||||
|
||||
return image
|
||||
|
||||
def delete_image(self, glance, image):
|
||||
"""Delete the specified image."""
|
||||
num_before = len(list(glance.images.list()))
|
||||
glance.images.delete(image)
|
||||
|
||||
count = 1
|
||||
num_after = len(list(glance.images.list()))
|
||||
while num_after != (num_before - 1) and count < 10:
|
||||
time.sleep(3)
|
||||
num_after = len(list(glance.images.list()))
|
||||
self.log.debug('number of images: {}'.format(num_after))
|
||||
count += 1
|
||||
|
||||
if num_after != (num_before - 1):
|
||||
self.log.error('image deletion timed out')
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||
"""Create the specified instance."""
|
||||
image = nova.images.find(name=image_name)
|
||||
flavor = nova.flavors.find(name=flavor)
|
||||
instance = nova.servers.create(name=instance_name, image=image,
|
||||
flavor=flavor)
|
||||
|
||||
count = 1
|
||||
status = instance.status
|
||||
while status != 'ACTIVE' and count < 60:
|
||||
time.sleep(3)
|
||||
instance = nova.servers.get(instance.id)
|
||||
status = instance.status
|
||||
self.log.debug('instance status: {}'.format(status))
|
||||
count += 1
|
||||
|
||||
if status != 'ACTIVE':
|
||||
self.log.error('instance creation timed out')
|
||||
return None
|
||||
|
||||
return instance
|
||||
|
||||
def delete_instance(self, nova, instance):
|
||||
"""Delete the specified instance."""
|
||||
num_before = len(list(nova.servers.list()))
|
||||
nova.servers.delete(instance)
|
||||
|
||||
count = 1
|
||||
num_after = len(list(nova.servers.list()))
|
||||
while num_after != (num_before - 1) and count < 10:
|
||||
time.sleep(3)
|
||||
num_after = len(list(nova.servers.list()))
|
||||
self.log.debug('number of instances: {}'.format(num_after))
|
||||
count += 1
|
||||
|
||||
if num_after != (num_before - 1):
|
||||
self.log.error('instance deletion timed out')
|
||||
return False
|
||||
|
||||
return True
|
||||
Reference in New Issue
Block a user