Move charm tests to zaza

Use the same assertions as the cinder charm. Also port the 499 original
test, which is in the associated depends in zaza-openstack-tests.

Change-Id: I86f2ab205d0f082dd6b185ccbfa314bccf4e5bfb
func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/246
Closes-Bug: #1828424
This commit is contained in:
Ryan Beisner 2020-03-27 10:51:49 -05:00 committed by Alex Kavanagh
parent cf0334519a
commit c0f870c4ff
24 changed files with 1326 additions and 1076 deletions

View File

@ -1,6 +1,12 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# TODO: Distill the func test requirements from the lint/unit test
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
pbr>=1.8.0,<1.9.0
simplejson>=2.2.0
netifaces>=0.10.4

View File

@ -1,29 +1,18 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of *requirements.txt files for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# TODO: Distill the func test requirements from the lint/unit test
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
charm-tools>=2.4.4
coverage>=3.6
requests>=2.18.4
mock>=1.2
flake8>=2.2.4,<=2.4.1
stestr>=2.2.0
requests>=2.18.4
# BEGIN: Amulet OpenStack Charm Helper Requirements
# Liberty client lower constraints
amulet>=1.14.3,<2.0;python_version=='2.7'
bundletester>=0.6.1,<1.0;python_version=='2.7'
python-ceilometerclient>=1.5.0
python-cinderclient>=1.4.0,<5.0.0
python-glanceclient>=1.1.0
python-heatclient>=0.8.0
python-keystoneclient>=1.7.1
python-neutronclient>=3.1.0
python-novaclient>=2.30.1
python-openstackclient>=1.7.0
python-swiftclient>=2.6.0
pika>=0.10.0,<1.0
distro-info
git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
# END: Amulet OpenStack Charm Helper Requirements
# NOTE: workaround for 14.04 pip/tox
pytz
pyudev # for ceph-* charm unit tests (not mocked?)
coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0'
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack

View File

@ -1,8 +1,9 @@
# Overview
This directory provides Amulet tests to verify basic deployment functionality
from the perspective of this charm, its requirements and its features, as
exercised in a subset of the full OpenStack deployment test bundle topology.
This directory provides Zaza test definitions and bundles to verify basic
deployment functionality from the perspective of this charm, its requirements
and its features, as exercised in a subset of the full OpenStack deployment
test bundle topology.
For full details on functional testing of OpenStack charms please refer to
the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)

View File

@ -1,775 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Basic cinder-ceph functional test.
"""
import amulet
import json
import time
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class CinderCephBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic heat deployment."""
def __init__(self, series=None, openstack=None, source=None, git=False,
stable=False):
"""Deploy the entire test environment."""
super(CinderCephBasicDeployment, self).__init__(series, openstack,
source, stable)
self.git = git
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = ['nrpe']
# Wait for deployment ready msgs, except exclusions
self._auto_wait_for_status(exclude_services=exclude_services)
self.d.sentry.wait()
self._initialize_tests()
def _add_services(self):
"""Add the services that we're testing, where cinder-ceph is
local, and the rest of the services are from lp branches that
are compatible with the local charm (e.g. stable or next).
"""
# Note: cinder-ceph becomes a cinder subordinate unit.
this_service = {'name': 'cinder-ceph'}
other_services = [
self.get_percona_service_entry(),
{'name': 'keystone'},
{'name': 'rabbitmq-server'},
{'name': 'ceph-mon', 'units': 3},
{'name': 'ceph-osd', 'units': 3,
'storage': {'osd-devices': 'cinder,10G'}},
{'name': 'cinder'}
]
super(CinderCephBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'ceph-mon:client': 'cinder-ceph:ceph',
'ceph-osd:mon': 'ceph-mon:osd',
'cinder:storage-backend': 'cinder-ceph:storage-backend',
'keystone:shared-db': 'percona-cluster:shared-db',
'cinder:shared-db': 'percona-cluster:shared-db',
'cinder:identity-service': 'keystone:identity-service',
'cinder:amqp': 'rabbitmq-server:amqp',
}
# If the release is less than ocata then add in the cinder <-> ceph
# relationship; it's not needed for ocata onwards as cinder gained the
# ability to have multiple backends, and in this test (cinder-ceph) we
# only want THIS backend.
if self._get_openstack_release() < self.xenial_ocata:
relations['cinder:ceph'] = 'ceph-mon:client'
super(CinderCephBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {
'admin-password': 'openstack',
}
pxc_config = {
'innodb-buffer-pool-size': '256M',
'max-connections': 1000,
}
cinder_config = {
'block-device': 'None',
'glance-api-version': '2'
}
ceph_config = {
'monitor-count': '3',
}
if self._get_openstack_release() < self.bionic_train:
ceph_config['auth-supported'] = 'none'
cinder_ceph_config = {
'ceph-osd-replication-count': '3',
}
configs = {
'keystone': keystone_config,
'percona-cluster': pxc_config,
'cinder': cinder_config,
'ceph-mon': ceph_config,
'cinder-ceph': cinder_ceph_config
}
super(CinderCephBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.pxc_sentry = self.d.sentry['percona-cluster'][0]
self.keystone_sentry = self.d.sentry['keystone'][0]
self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
self.cinder_sentry = self.d.sentry['cinder'][0]
self.ceph0_sentry = self.d.sentry['ceph-mon'][0]
self.ceph1_sentry = self.d.sentry['ceph-mon'][1]
self.ceph2_sentry = self.d.sentry['ceph-mon'][2]
self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0]
self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1]
self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2]
self.cinder_ceph_sentry = self.d.sentry['cinder-ceph'][0]
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Authenticate admin with keystone
self.keystone_session, self.keystone = u.get_default_keystone_session(
self.keystone_sentry,
openstack_release=self._get_openstack_release())
# Authenticate admin with cinder endpoint
if self._get_openstack_release() >= self.xenial_pike:
api_version = 2
else:
api_version = 1
self.cinder = u.authenticate_cinder_admin(self.keystone, api_version)
def test_102_services(self):
"""Verify the expected services are running on the service units."""
if self._get_openstack_release() >= self.xenial_ocata:
cinder_services = ['apache2',
'cinder-scheduler',
'cinder-volume']
else:
cinder_services = ['cinder-api',
'cinder-scheduler',
'cinder-volume']
services = {
self.cinder_sentry: cinder_services,
}
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_112_service_catalog(self):
"""Verify that the service catalog endpoint data"""
u.log.debug('Checking keystone service catalog...')
endpoint_vol = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
endpoint_id = {'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url}
if self._get_openstack_release() >= self.trusty_icehouse:
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later
expected = {'identity': [endpoint_id],
'volumev2': [endpoint_id]}
else:
# Ocata and prior
expected = {'identity': [endpoint_id],
'volume': [endpoint_id]}
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(
expected,
actual,
openstack_release=self._get_openstack_release())
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_114_cinder_endpoint(self):
"""Verify the cinder endpoint data."""
u.log.debug('Checking cinder endpoint...')
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8776'
if self._get_openstack_release() >= self.xenial_queens:
expected = {
'id': u.not_null,
'region': 'RegionOne',
'region_id': 'RegionOne',
'url': u.valid_url,
'interface': u.not_null,
'service_id': u.not_null}
ret = u.validate_v3_endpoint_data(
endpoints,
admin_port,
internal_port,
public_port,
expected,
6)
else:
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
ret = u.validate_v2_endpoint_data(
endpoints,
admin_port,
internal_port,
public_port,
expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='cinder endpoint: {}'.format(ret))
def validate_broker_req(self, unit, relation, expected):
rel_data = json.loads(unit.relation(
relation[0],
relation[1])['broker_req'])
if rel_data['api-version'] != expected['api-version']:
return "Broker request api mismatch"
for index in range(0, len(rel_data['ops'])):
actual_op = rel_data['ops'][index]
expected_op = expected['ops'][index]
for key in ['op', 'name', 'replicas']:
if actual_op[key] == expected_op[key]:
u.log.debug("OK op {} key {}".format(index, key))
else:
return "Mismatch, op: {} key: {}".format(index, key)
return None
def get_broker_request(self):
client_unit = self.cinder_ceph_sentry
broker_req = json.loads(client_unit.relation(
'ceph',
'ceph-mon:client')['broker_req'])
return broker_req
def get_broker_response(self):
broker_request = self.get_broker_request()
u.log.debug('Broker request: {}'.format(broker_request))
response_key = "broker-rsp-{}-{}".format(
self.cinder_ceph_sentry.info['service'],
self.cinder_ceph_sentry.info['unit']
)
u.log.debug('Checking response_key: {}'.format(response_key))
ceph_sentrys = [self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry]
for sentry in ceph_sentrys:
relation_data = sentry.relation('client', 'cinder-ceph:ceph')
if relation_data.get(response_key):
broker_response = json.loads(relation_data[response_key])
if (broker_request['request-id'] ==
broker_response['request-id']):
u.log.debug('broker_response: {}'.format(broker_response))
return broker_response
def test_200_cinderceph_ceph_ceph_relation(self):
u.log.debug('Checking cinder-ceph:ceph to ceph:client '
'relation data...')
unit = self.cinder_ceph_sentry
relation = ['ceph', 'ceph-mon:client']
req = {
"api-version": 1,
"ops": [{"replicas": 3,
"name": "cinder-ceph",
"op": "create-pool"}]
}
expected = {
'private-address': u.valid_ip,
'broker_req': u.not_null,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder-ceph ceph-mon', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
ret = self.validate_broker_req(unit, relation, req)
if ret:
msg = u.relation_error('cinder-ceph ceph-mon', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_201_ceph_cinderceph_ceph_relation(self):
u.log.debug('Checking ceph:client to cinder-ceph:ceph '
'relation data...')
ceph_unit = self.ceph0_sentry
relation = ['client', 'cinder-ceph:ceph']
expected = {
'key': u.not_null,
'private-address': u.valid_ip,
'ceph-public-address': u.valid_ip,
'auth': 'none',
}
if self._get_openstack_release() >= self.bionic_train:
expected['auth'] = 'cephx'
ret = u.validate_relation_data(ceph_unit, relation, expected)
if ret:
msg = u.relation_error('cinder cinder-ceph storage-backend', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_202_cinderceph_cinder_backend_relation(self):
u.log.debug('Checking cinder-ceph:storage-backend to '
'cinder:storage-backend relation data...')
unit = self.cinder_ceph_sentry
relation = ['storage-backend', 'cinder:storage-backend']
backend_uuid, _ = unit.run('leader-get secret-uuid')
sub_dict = {
"cinder": {
"/etc/cinder/cinder.conf": {
"sections": {
"cinder-ceph": [
["volume_backend_name", "cinder-ceph"],
["volume_driver",
"cinder.volume.drivers.rbd.RBDDriver"],
["rbd_pool", "cinder-ceph"],
["rbd_user", "cinder-ceph"],
["rbd_secret_uuid", backend_uuid],
['rbd_ceph_conf',
'/var/lib/charm/cinder-ceph/ceph.conf'],
]
}
}
}
}
section = sub_dict['cinder']["/etc/cinder/cinder.conf"]["sections"]
if self._get_openstack_release() >= self.trusty_mitaka:
section["cinder-ceph"].append(('report_discard_supported', True))
if self._get_openstack_release() >= self.xenial_ocata:
section["cinder-ceph"].append(('rbd_exclusive_cinder_pool', True))
if self._get_openstack_release() >= self.xenial_queens:
section["cinder-ceph"].append(
('rbd_flatten_volume_from_snapshot', False))
expected = {
'subordinate_configuration': json.dumps(sub_dict),
'private-address': u.valid_ip,
'backend_name': 'cinder-ceph'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder cinder-ceph storage-backend', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_203_cinder_cinderceph_backend_relation(self):
u.log.debug('Checking cinder:storage-backend to '
'cinder-ceph:storage-backend relation data...')
unit = self.cinder_sentry
relation = ['storage-backend', 'cinder-ceph:storage-backend']
expected = {
'private-address': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder cinder-ceph storage-backend', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_204_mysql_cinder_db_relation(self):
"""Verify the mysql:glance shared-db relation data"""
u.log.debug('Checking mysql:cinder db relation data...')
unit = self.pxc_sentry
relation = ['shared-db', 'cinder:shared-db']
expected = {
'private-address': u.valid_ip,
'db_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_205_cinder_mysql_db_relation(self):
"""Verify the cinder:mysql shared-db relation data"""
u.log.debug('Checking cinder:mysql db relation data...')
unit = self.cinder_sentry
relation = ['shared-db', 'percona-cluster:shared-db']
expected = {
'private-address': u.valid_ip,
'hostname': u.valid_ip,
'username': 'cinder',
'database': 'cinder'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_206_keystone_cinder_id_relation(self):
"""Verify the keystone:cinder identity-service relation data"""
u.log.debug('Checking keystone:cinder id relation data...')
unit = self.keystone_sentry
relation = ['identity-service',
'cinder:identity-service']
expected = {
'service_protocol': 'http',
'service_tenant': 'services',
'service_password': u.not_null,
'service_port': '5000',
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'auth_host': u.valid_ip,
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
if self._get_openstack_release() < self.xenial_pike:
# Ocata and earlier
expected['service_username'] = 'cinder_cinderv2'
else:
# Pike and later
expected['service_username'] = 'cinderv2_cinderv3'
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('identity-service cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_207_cinder_keystone_id_relation(self):
"""Verify the cinder:keystone identity-service relation data"""
u.log.debug('Checking cinder:keystone id relation data...')
unit = self.cinder_sentry
relation = ['identity-service',
'keystone:identity-service']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_208_rabbitmq_cinder_amqp_relation(self):
"""Verify the rabbitmq-server:cinder amqp relation data"""
u.log.debug('Checking rmq:cinder amqp relation data...')
unit = self.rabbitmq_sentry
relation = ['amqp', 'cinder:amqp']
expected = {
'private-address': u.valid_ip,
'password': u.not_null,
'hostname': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('amqp cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_209_cinder_rabbitmq_amqp_relation(self):
"""Verify the cinder:rabbitmq-server amqp relation data"""
u.log.debug('Checking cinder:rmq amqp relation data...')
unit = self.cinder_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
'private-address': u.valid_ip,
'vhost': 'openstack',
'username': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder amqp', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_300_cinder_config(self):
"""Verify the data in the cinder.conf file."""
u.log.debug('Checking cinder config file data...')
unit = self.cinder_sentry
conf = '/etc/cinder/cinder.conf'
unit_mq = self.rabbitmq_sentry
rel_mq_ci = unit_mq.relation('amqp', 'cinder:amqp')
expected = {
'DEFAULT': {
'use_syslog': 'False',
'debug': 'False',
'verbose': 'False',
'iscsi_helper': 'tgtadm',
'auth_strategy': 'keystone',
'enabled_backends': 'cinder-ceph'
},
'cinder-ceph': {
'volume_backend_name': 'cinder-ceph',
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
'rbd_pool': 'cinder-ceph',
'rbd_user': 'cinder-ceph',
'rbd_ceph_conf': '/var/lib/charm/cinder-ceph/ceph.conf',
},
}
if self._get_openstack_release() < self.xenial_ocata:
expected['DEFAULT']['volume_group'] = 'cinder-volumes'
expected['DEFAULT']['volumes_dir'] = '/var/lib/cinder/volumes'
expected_rmq = {
'rabbit_userid': 'cinder',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rel_mq_ci['password'],
'rabbit_host': rel_mq_ci['hostname'],
}
if self._get_openstack_release() < self.xenial_ocata:
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
expected['oslo_messaging_rabbit'] = expected_rmq
else:
# Juno or earlier
expected['DEFAULT'].update(expected_rmq)
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_301_cinder_ceph_config(self):
"""Verify the data in the ceph.conf file."""
u.log.debug('Checking cinder ceph config file data...')
unit = self.cinder_sentry
conf = '/etc/ceph/ceph.conf'
expected = {
'global': {
'auth_supported': 'none',
'keyring': '/etc/ceph/$cluster.$name.keyring',
'mon host': u.not_null,
# XXX: Temporarily disabled syslog check, pending
# resolution of https://bugs.launchpad.net/bugs/1604575
# 'log to syslog': 'false'
}
}
if self._get_openstack_release() >= self.bionic_train:
expected['global']['auth_supported'] = 'cephx'
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder ceph config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_cinder_api_connection(self):
"""Simple api call to check service is up and responding"""
u.log.debug('Checking basic cinder api functionality...')
check = list(self.cinder.volumes.list())
u.log.debug('Cinder api check (volumes.list): {}'.format(check))
assert(check == [])
def test_401_check_broker_reponse(self):
u.log.debug('Checking broker response')
broker_response = self.get_broker_response()
if not broker_response or broker_response['exit-code'] != 0:
msg = ('Broker request invalid'
' or failed: {}'.format(broker_response))
amulet.raise_status(amulet.FAIL, msg=msg)
def test_402_create_delete_volume(self):
"""Create a cinder volume and delete it."""
u.log.debug('Creating, checking and deleting cinder volume...')
vol_new = u.create_cinder_volume(self.cinder)
vol_id = vol_new.id
u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume")
def test_409_ceph_check_osd_pools(self):
"""Check osd pools on all ceph units, expect them to be
identical, and expect specific pools to be present."""
u.log.debug('Checking pools on ceph units...')
expected_pools = self.get_ceph_expected_pools()
# Override expected pools
if 'glance' in expected_pools:
expected_pools.remove('glance')
if 'cinder-ceph' not in expected_pools:
expected_pools.append('cinder-ceph')
if (self._get_openstack_release() >= self.xenial_ocata and
'cinder' in expected_pools):
# No cinder after mitaka because we don't use the relation in this
# test
expected_pools.remove('cinder')
results = []
sentries = [
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
# Check for presence of expected pools on each unit
u.log.debug('Expected pools: {}'.format(expected_pools))
for sentry_unit in sentries:
pools = u.get_ceph_pools(sentry_unit)
results.append(pools)
for expected_pool in expected_pools:
if expected_pool not in pools:
msg = ('{} does not have pool: '
'{}'.format(sentry_unit.info['unit_name'],
expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
u.log.debug('{} has (at least) the expected '
'pools.'.format(sentry_unit.info['unit_name']))
# Check that all units returned the same pool name:id data
ret = u.validate_list_of_identical_dicts(results)
if ret:
u.log.debug('Pool list results: {}'.format(results))
msg = ('{}; Pool list results are not identical on all '
'ceph units.'.format(ret))
amulet.raise_status(amulet.FAIL, msg=msg)
else:
u.log.debug('Pool list on all ceph units produced the '
'same results (OK).')
def test_410_ceph_cinder_vol_create_pool_inspect(self):
"""Create and confirm a ceph-backed cinder volume, and inspect
ceph cinder pool object count as the volume is created
and deleted."""
sentry_unit = self.ceph0_sentry
obj_count_samples = []
pool_size_samples = []
pools = u.get_ceph_pools(self.ceph0_sentry)
expected_pool = 'cinder-ceph'
cinder_ceph_pool = pools[expected_pool]
# Check ceph cinder pool object count, disk space usage and pool name
u.log.debug('Checking ceph cinder pool original samples...')
u.log.debug('cinder-ceph pool: {}'.format(cinder_ceph_pool))
u.log.debug('Checking ceph cinder pool original samples...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
sentry_unit, cinder_ceph_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
if pool_name != expected_pool:
msg = ('Ceph pool {} unexpected name (actual, expected): '
'{}. {}'.format(cinder_ceph_pool, pool_name, expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
# Create ceph-backed cinder volume
cinder_vol = u.create_cinder_volume(self.cinder)
# Re-check ceph cinder pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph cinder pool samples after volume create...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
sentry_unit, cinder_ceph_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Delete ceph-backed cinder volume
u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume")
# Final check, ceph cinder pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph cinder pool after volume delete...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
sentry_unit, cinder_ceph_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Validate ceph cinder pool object count samples over time
ret = u.validate_ceph_pool_samples(obj_count_samples,
"cinder pool object count")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Luminous (pike) ceph seems more efficient at disk usage so we cannot
# grantee the ordering of kb_used
if self._get_openstack_release() < self.xenial_mitaka:
# Validate ceph cinder pool disk space usage samples over time
ret = u.validate_ceph_pool_samples(pool_size_samples,
"cinder pool disk usage")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_499_ceph_cmds_exit_zero(self):
"""Check basic functionality of ceph cli commands against
all ceph units, and the cinder-ceph unit."""
u.log.debug('Checking exit values are 0 on ceph commands')
sentry_units = [
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
if self._get_openstack_release() < self.bionic_train:
sentry_units.append(self.cinder_ceph_sentry)
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
]
ret = u.check_commands_on_units(commands, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_500_ceph_alternatives_cleanup(self):
"""Check ceph alternatives are removed when ceph-mon
relation is broken"""
# Skip this test if release is less than xenial_ocata as in that case
# cinder HAS a relation with ceph directly and this test would fail
if self._get_openstack_release() < self.xenial_ocata:
u.log.debug("No checking 500 ceph alternatives as "
"/etc/ceph/ceph.conf will exist.")
return
u.log.debug('Checking ceph alternatives are removed '
'upon broken ceph-mon relation')
ceph_dir = self.cinder_ceph_sentry.directory_listing('/etc/ceph')
u.log.debug(ceph_dir)
if 'ceph.conf' in ceph_dir['files']:
u.log.debug('/etc/ceph/ceph.conf exists BEFORE relation-broken')
else:
error_msg = '/etc/ceph/ceph.conf does not '
error_msg += 'exist BEFORE relation-broken\n'
error_msg += 'test_500_ceph_alternatives_cleanup FAILED'
amulet.raise_status(amulet.FAIL, msg=error_msg)
self.d.unrelate('ceph-mon:client', 'cinder-ceph:ceph')
self.d.sentry.wait()
ceph_dir_after = self.cinder_ceph_sentry.directory_listing('/etc/ceph')
u.log.debug(ceph_dir_after)
if 'ceph.conf' in ceph_dir_after['files']:
u.log.debug('/etc/ceph/ceph.conf exists AFTER relation-broken')
error_msg = 'Did not expect /etc/ceph/ceph.conf AFTER '
error_msg += 'relation-broken\n'
error_msg += 'test_500_ceph_alternatives_cleanup FAILED'
amulet.raise_status(amulet.FAIL, msg=error_msg)
else:
u.log.debug('/etc/ceph/ceph.conf removed AFTER relation-broken')
u.log.debug('test_500_ceph_alternatives_cleanup PASSED - (OK)')
# Restore cinder-ceph and ceph-mon relation to keep tests idempotent
self.d.relate('ceph-mon:client', 'cinder-ceph:ceph')
self.d.sentry.wait()

View File

@ -0,0 +1,127 @@
series: bionic
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
auth-supported: 'none'
monitor-count: '3'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -0,0 +1,138 @@
series: bionic
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:bionic-rocky
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:bionic-rocky
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
auth-supported: 'none'
monitor-count: '3'
source: cloud:bionic-rocky
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
openstack-origin: cloud:bionic-rocky
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -0,0 +1,138 @@
series: bionic
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-stein
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:bionic-stein
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-stein
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:bionic-stein
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-stein
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:bionic-stein
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:bionic-stein
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
auth-supported: 'none'
monitor-count: '3'
source: cloud:bionic-stein
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
openstack-origin: cloud:bionic-stein
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -0,0 +1,149 @@
series: bionic
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - placement:shared-db
- percona-cluster:shared-db
- - placement:identity-service
- keystone:identity-service
- - placement:placement
- nova-cloud-controller:placement
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-train
network-manager: Neutron
debug: true
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
constraints: mem=1G
options:
openstack-origin: cloud:bionic-train
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:bionic-train
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-train
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:bionic-train
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-train
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:bionic-train
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:bionic-train
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
source: cloud:bionic-train
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
openstack-origin: cloud:bionic-train
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -0,0 +1,138 @@
series: trusty
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:trusty/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:trusty-mitaka
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:trusty-mitaka
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
auth-supported: 'none'
monitor-count: '3'
source: cloud:trusty-mitaka
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
openstack-origin: cloud:trusty-mitaka
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -0,0 +1,129 @@
series: xenial
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:ceph
- ceph-mon:client
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
auth-supported: 'none'
monitor-count: '3'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -0,0 +1,138 @@
series: xenial
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:xenial-ocata
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:xenial-ocata
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
auth-supported: 'none'
monitor-count: '3'
source: cloud:xenial-ocata
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
openstack-origin: cloud:xenial-ocata
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -0,0 +1,138 @@
series: xenial
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-pike
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:xenial-pike
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-pike
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:xenial-pike
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-pike
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:xenial-pike
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:xenial-pike
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
auth-supported: 'none'
monitor-count: '3'
source: cloud:xenial-pike
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
openstack-origin: cloud:xenial-pike
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -0,0 +1,136 @@
series: xenial
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - ceph-mon:client
- cinder-ceph:ceph
- - ceph-mon:osd
- ceph-osd:mon
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-queens
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: cloud:xenial-queens
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-queens
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:xenial-queens
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-queens
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:xenial-queens
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: '10G'
options:
osd-devices: '/dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
auth-supported: 'none'
monitor-count: '3'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
debug: True
verbose: True
openstack-origin: cloud:xenial-queens
block-device: None
glance-api-version: 2
cinder-ceph:
charm: ../../../cinder-ceph

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on bionic-queens."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='bionic')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on bionic-rocky."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='bionic',
openstack='cloud:bionic-rocky',
source='cloud:bionic-updates/rocky')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on bionic-stein."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='bionic',
openstack='cloud:bionic-stein',
source='cloud:bionic-stein')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on bionic-train."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='bionic',
openstack='cloud:bionic-train',
source='cloud:bionic-train')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on trusty-mitaka."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='trusty',
openstack='cloud:trusty-mitaka',
source='cloud:trusty-updates/mitaka')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on xenial-mitaka."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='xenial')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on xenial-ocata."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='xenial',
openstack='cloud:xenial-ocata',
source='cloud:xenial-updates/ocata')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on xenial-pike."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='xenial',
openstack='cloud:xenial-pike',
source='cloud:xenial-updates/pike')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic cinder-ceph deployment on xenial-queens."""
from basic_deployment import CinderCephBasicDeployment
if __name__ == '__main__':
deployment = CinderCephBasicDeployment(series='xenial',
openstack='cloud:xenial-queens',
source='cloud:xenial-updates/queens')
deployment.run_tests()

View File

@ -1,18 +1,38 @@
# Bootstrap the model if necessary.
bootstrap: True
# Re-use bootstrap node.
reset: True
# Use tox/requirements to drive the venv instead of bundletester's venv feature.
virtualenv: False
# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
makefile: []
# Do not specify juju PPA sources. Juju is presumed to be pre-installed
# and configured in all test runner environments.
#sources:
# Do not specify or rely on system packages.
#packages:
# Do not specify python packages here. Use test-requirements.txt
# and tox instead. ie. The venv is constructed before bundletester
# is invoked.
#python-packages:
reset_timeout: 600
charm_name: cinder-ceph
comment: |
The cinder-ceph charm assertions are intended to be the sum of the
cinder and ceph-mon zaza charm tests.
smoke_bundles:
- bionic-train
gate_bundles:
- bionic-train
- bionic-stein
- bionic-rocky
- bionic-queens
- xenial-queens
- xenial-ocata
- xenial-pike
- xenial-mitaka
- trusty-mitaka
dev_bundles:
configure:
- zaza.openstack.charm_tests.glance.setup.add_cirros_image
- zaza.openstack.charm_tests.glance.setup.add_lts_image
- zaza.openstack.charm_tests.keystone.setup.add_demo_user
- zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network
- zaza.openstack.charm_tests.nova.setup.create_flavors
- zaza.openstack.charm_tests.nova.setup.manage_ssh_key
tests:
# do cinder tests early as they are sensitive to service restarts that occur
# with the ceph tests (due to relation changes on ceph-mon and ceph-osd).
- zaza.openstack.charm_tests.cinder.tests.CinderTests
- zaza.openstack.charm_tests.ceph.mon.tests.CinderCephMonTest
- zaza.openstack.charm_tests.cinder.tests.SecurityTests
- zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest
- zaza.openstack.charm_tests.ceph.tests.CephRelationTest
- zaza.openstack.charm_tests.ceph.tests.CephTest
- zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest
- zaza.openstack.charm_tests.policyd.tests.CinderTests
tests_options:
policyd:
service: cinder

61
tox.ini
View File

@ -1,8 +1,12 @@
# Classic charm (with amulet): ./tox.ini
# Classic charm (with zaza): ./tox.ini
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of tox.ini for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# TODO: Distill the func test requirements from the lint/unit test
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
[tox]
envlist = pep8,py3
skipsdist = True
@ -15,17 +19,12 @@ skip_missing_interpreters = False
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
CHARM_DIR={envdir}
AMULET_SETUP_TIMEOUT=5400
install_command =
pip install {opts} {packages}
commands = stestr run --slowest {posargs}
whitelist_externals = juju
passenv = HOME TERM AMULET_* CS_* OS_* TEST_*
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
passenv = HOME TERM CS_* OS_* TEST_*
deps = -r{toxinidir}/test-requirements.txt
[testenv:py35]
basepython = python3.5
@ -42,6 +41,16 @@ basepython = python3.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py38]
basepython = python3.8
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:pep8]
basepython = python3
deps = -r{toxinidir}/requirements.txt
@ -82,39 +91,29 @@ basepython = python3
commands = {posargs}
[testenv:func-noop]
# DRY RUN - For Debug
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
functest-run-suite --help
[testenv:func]
# Charm Functional Test
# Run all gate tests which are +x (expected to always pass)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
functest-run-suite --keep-model
[testenv:func-smoke]
# Charm Functional Test
# Run a specific test as an Amulet smoke test (expected to always pass)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-train --no-destroy
functest-run-suite --keep-model --smoke
[testenv:func-dev]
# Charm Functional Test
# Run all development test targets which are +x (may not always pass!)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
functest-run-suite --keep-model --dev
[testenv:func-target]
basepython = python3
commands =
functest-run-suite --keep-model --bundle {posargs}
[flake8]
ignore = E402,E226