Migrate charm-ceph-mon testing to Zaza

Closes-Bug: #1828424
Change-Id: Ie46129f02566f17eabbf2eb0cae217fa0e886a4f
This commit is contained in:
Chris MacNaughton 2019-05-01 15:26:21 +02:00 committed by Chris MacNaughton (icey)
parent 3d9ea3908e
commit 01fb37ebee
25 changed files with 1054 additions and 1141 deletions

View File

@ -27,3 +27,5 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
# NOTE: workaround for 14.04 pip/tox
pytz
pyudev # for ceph-* charm unit tests (not mocked?)
git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza;python_version>'3.4'
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack;python_version>'3.4'

View File

@ -1,9 +0,0 @@
# Overview
This directory provides Amulet tests to verify basic deployment functionality
from the perspective of this charm, its requirements and its features, as
exercised in a subset of the full OpenStack deployment test bundle topology.
For full details on functional testing of OpenStack charms please refer to
the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)
section of the OpenStack Charm Guide.

View File

@ -1,835 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import re
import time
import json
import keystoneclient
from keystoneclient.v3 import client as keystone_client_v3
from novaclient import client as nova_client
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
OpenStackAmuletUtils,
DEBUG,
# ERROR
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class CephBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic ceph deployment."""
def __init__(self, series=None, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(CephBasicDeployment, self).__init__(series, openstack, source,
stable)
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
exclude_services = []
# Wait for deployment ready msgs, except exclusions
self._auto_wait_for_status(exclude_services=exclude_services)
self.d.sentry.wait()
self._initialize_tests()
def _add_services(self):
"""Add services
Add the services that we're testing, where ceph is local,
and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'ceph-mon', 'units': 3}
other_services = [
{'name': 'percona-cluster'},
{'name': 'keystone'},
{'name': 'ceph-osd',
'units': 3,
'storage': {'osd-devices': 'cinder,10G'}},
{'name': 'rabbitmq-server'},
{'name': 'nova-compute'},
{'name': 'glance'},
{'name': 'cinder'},
{'name': 'cinder-ceph'},
{'name': 'nova-cloud-controller'},
]
super(CephBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'nova-compute:amqp': 'rabbitmq-server:amqp',
'nova-compute:image-service': 'glance:image-service',
'nova-compute:ceph': 'ceph-mon:client',
'keystone:shared-db': 'percona-cluster:shared-db',
'glance:shared-db': 'percona-cluster:shared-db',
'glance:identity-service': 'keystone:identity-service',
'glance:amqp': 'rabbitmq-server:amqp',
'glance:ceph': 'ceph-mon:client',
'cinder:shared-db': 'percona-cluster:shared-db',
'cinder:identity-service': 'keystone:identity-service',
'cinder:amqp': 'rabbitmq-server:amqp',
'cinder:image-service': 'glance:image-service',
'cinder-ceph:storage-backend': 'cinder:storage-backend',
'cinder-ceph:ceph': 'ceph-mon:client',
'ceph-osd:mon': 'ceph-mon:osd',
'nova-cloud-controller:shared-db': 'percona-cluster:shared-db',
'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:identity-service': 'keystone:'
'identity-service',
'nova-cloud-controller:cloud-compute': 'nova-compute:'
'cloud-compute',
'nova-cloud-controller:image-service': 'glance:image-service',
}
super(CephBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting'}
cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
pxc_config = {
'max-connections': 1000,
}
# Include a non-existent device as osd-devices is a whitelist,
# and this will catch cases where proposals attempt to change that.
ceph_config = {
'monitor-count': '3',
'auth-supported': 'none',
}
ceph_osd_config = {
'osd-devices': '/srv/ceph /dev/test-non-existent',
}
configs = {'keystone': keystone_config,
'percona-cluster': pxc_config,
'cinder': cinder_config,
'ceph-mon': ceph_config,
'ceph-osd': ceph_osd_config,
}
super(CephBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.pxc_sentry = self.d.sentry['percona-cluster'][0]
self.keystone_sentry = self.d.sentry['keystone'][0]
self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
self.nova_sentry = self.d.sentry['nova-compute'][0]
self.glance_sentry = self.d.sentry['glance'][0]
self.cinder_sentry = self.d.sentry['cinder'][0]
self.cinder_ceph_sentry = self.d.sentry['cinder-ceph'][0]
self.ceph_osd_sentry = self.d.sentry['ceph-osd'][0]
self.ceph0_sentry = self.d.sentry['ceph-mon'][0]
self.ceph1_sentry = self.d.sentry['ceph-mon'][1]
self.ceph2_sentry = self.d.sentry['ceph-mon'][2]
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Authenticate admin with keystone
self.keystone_session, self.keystone = u.get_default_keystone_session(
self.keystone_sentry,
openstack_release=self._get_openstack_release())
# Authenticate admin with cinder endpoint
self.cinder = u.authenticate_cinder_admin(self.keystone)
force_v1_client = False
if self._get_openstack_release() == self.trusty_icehouse:
# Updating image properties (such as arch or hypervisor) using the
# v2 api in icehouse results in:
# https://bugs.launchpad.net/python-glanceclient/+bug/1371559
u.log.debug('Forcing glance to use v1 api')
force_v1_client = True
# Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(
self.keystone,
force_v1_client=force_v1_client)
# Authenticate admin with nova endpoint
self.nova = nova_client.Client(2, session=self.keystone_session)
keystone_ip = self.keystone_sentry.info['public-address']
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
self.demo_project = 'demoProject'
self.demo_domain = 'demoDomain'
if self._get_openstack_release() >= self.xenial_queens:
self.create_users_v3()
self.demo_user_session, auth = u.get_keystone_session(
keystone_ip,
self.demo_user,
'password',
api_version=3,
user_domain_name=self.demo_domain,
project_domain_name=self.demo_domain,
project_name=self.demo_project
)
self.keystone_demo = keystone_client_v3.Client(
session=self.demo_user_session)
self.nova_demo = nova_client.Client(
2,
session=self.demo_user_session)
else:
self.create_users_v2()
# Authenticate demo user with keystone
self.keystone_demo = \
u.authenticate_keystone_user(
self.keystone, user=self.demo_user,
password='password',
tenant=self.demo_tenant)
# Authenticate demo user with nova-api
self.nova_demo = u.authenticate_nova_user(self.keystone,
user=self.demo_user,
password='password',
tenant=self.demo_tenant)
def create_users_v3(self):
try:
self.keystone.projects.find(name=self.demo_project)
except keystoneclient.exceptions.NotFound:
domain = self.keystone.domains.create(
self.demo_domain,
description='Demo Domain',
enabled=True
)
project = self.keystone.projects.create(
self.demo_project,
domain,
description='Demo Project',
enabled=True,
)
user = self.keystone.users.create(
self.demo_user,
domain=domain.id,
project=self.demo_project,
password='password',
email='demov3@demo.com',
description='Demo',
enabled=True)
role = self.keystone.roles.find(name='Admin')
self.keystone.roles.grant(
role.id,
user=user.id,
project=project.id)
def create_users_v2(self):
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='demo@demo.com')
def test_100_ceph_processes(self):
"""Verify that the expected service processes are running
on each ceph unit."""
# Process name and quantity of processes to expect on each unit
ceph_processes = {
'ceph-mon': 1
}
# Units with process names and PID quantities expected
expected_processes = {
self.ceph0_sentry: ceph_processes,
self.ceph1_sentry: ceph_processes,
self.ceph2_sentry: ceph_processes
}
actual_pids = u.get_unit_process_ids(expected_processes)
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_102_services(self):
"""Verify the expected services are running on the service units."""
services = {
self.rabbitmq_sentry: ['rabbitmq-server'],
self.nova_sentry: ['nova-compute'],
self.keystone_sentry: ['keystone'],
self.glance_sentry: ['glance-api'],
self.cinder_sentry: ['cinder-scheduler',
'cinder-volume'],
}
if self._get_openstack_release() < self.xenial_ocata:
services[self.cinder_sentry].append('cinder-api')
if self._get_openstack_release() < self.xenial_mitaka:
# For upstart systems only. Ceph services under systemd
# are checked by process name instead.
ceph_services = [
'ceph-mon-all',
'ceph-mon id=`hostname`'
]
services[self.ceph0_sentry] = ceph_services
services[self.ceph1_sentry] = ceph_services
services[self.ceph2_sentry] = ceph_services
ceph_osd_services = [
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
]
services[self.ceph_osd_sentry] = ceph_osd_services
if self._get_openstack_release() >= self.trusty_liberty:
services[self.keystone_sentry] = ['apache2']
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_200_ceph_nova_client_relation(self):
"""Verify the ceph to nova ceph-client relation data."""
u.log.debug('Checking ceph:nova-compute ceph-mon relation data...')
unit = self.ceph0_sentry
relation = ['client', 'nova-compute:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph-mon to nova ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_201_nova_ceph_client_relation(self):
"""Verify the nova to ceph client relation data."""
u.log.debug('Checking nova-compute:ceph ceph-client relation data...')
unit = self.nova_sentry
relation = ['ceph', 'ceph-mon:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('nova to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_202_ceph_glance_client_relation(self):
"""Verify the ceph to glance ceph-client relation data."""
u.log.debug('Checking ceph:glance client relation data...')
unit = self.ceph1_sentry
relation = ['client', 'glance:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to glance ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_203_glance_ceph_client_relation(self):
"""Verify the glance to ceph client relation data."""
u.log.debug('Checking glance:ceph client relation data...')
unit = self.glance_sentry
relation = ['ceph', 'ceph-mon:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('glance to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_204_ceph_cinder_client_relation(self):
"""Verify the ceph to cinder ceph-client relation data."""
u.log.debug('Checking ceph:cinder ceph relation data...')
unit = self.ceph2_sentry
relation = ['client', 'cinder-ceph:ceph']
expected = {
'private-address': u.valid_ip,
'auth': 'none',
'key': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('ceph to cinder ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_205_cinder_ceph_client_relation(self):
"""Verify the cinder to ceph ceph-client relation data."""
u.log.debug('Checking cinder:ceph ceph relation data...')
unit = self.cinder_ceph_sentry
relation = ['ceph', 'ceph-mon:client']
expected = {
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
message = u.relation_error('cinder to ceph ceph-client', ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_300_ceph_config(self):
"""Verify the data in the ceph config file."""
u.log.debug('Checking ceph config file data...')
unit = self.ceph0_sentry
conf = '/etc/ceph/ceph.conf'
expected = {
'global': {
'log to syslog': 'false',
'err to syslog': 'false',
'clog to syslog': 'false',
'mon cluster log to syslog': 'false',
'auth cluster required': 'none',
'auth service required': 'none',
'auth client required': 'none'
},
'mon': {
'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring'
},
'mds': {
'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring'
},
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "ceph config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_302_cinder_rbd_config(self):
"""Verify the cinder config file data regarding ceph."""
u.log.debug('Checking cinder (rbd) config file data...')
unit = self.cinder_sentry
conf = '/etc/cinder/cinder.conf'
section_key = 'cinder-ceph'
expected = {
section_key: {
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_304_glance_rbd_config(self):
"""Verify the glance config file data regarding ceph."""
u.log.debug('Checking glance (rbd) config file data...')
unit = self.glance_sentry
conf = '/etc/glance/glance-api.conf'
config = {
'default_store': 'rbd',
'rbd_store_ceph_conf': '/etc/ceph/ceph.conf',
'rbd_store_user': 'glance',
'rbd_store_pool': 'glance',
'rbd_store_chunk_size': '8'
}
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
config['stores'] = ('glance.store.filesystem.Store,'
'glance.store.http.Store,'
'glance.store.rbd.Store')
section = 'glance_store'
else:
# Juno or earlier
section = 'DEFAULT'
expected = {section: config}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "glance (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_306_nova_rbd_config(self):
"""Verify the nova config file data regarding ceph."""
u.log.debug('Checking nova (rbd) config file data...')
unit = self.nova_sentry
conf = '/etc/nova/nova.conf'
expected = {
'libvirt': {
'rbd_user': 'nova-compute',
'rbd_secret_uuid': u.not_null
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "nova (rbd) config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_ceph_check_osd_pools(self):
"""Check osd pools on all ceph units, expect them to be
identical, and expect specific pools to be present."""
u.log.debug('Checking pools on ceph units...')
expected_pools = self.get_ceph_expected_pools()
results = []
sentries = [
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
# Check for presence of expected pools on each unit
u.log.debug('Expected pools: {}'.format(expected_pools))
for sentry_unit in sentries:
pools = u.get_ceph_pools(sentry_unit)
results.append(pools)
for expected_pool in expected_pools:
if expected_pool not in pools:
msg = ('{} does not have pool: '
'{}'.format(sentry_unit.info['unit_name'],
expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
u.log.debug('{} has (at least) the expected '
'pools.'.format(sentry_unit.info['unit_name']))
# Check that all units returned the same pool name:id data
ret = u.validate_list_of_identical_dicts(results)
if ret:
u.log.debug('Pool list results: {}'.format(results))
msg = ('{}; Pool list results are not identical on all '
'ceph units.'.format(ret))
amulet.raise_status(amulet.FAIL, msg=msg)
else:
u.log.debug('Pool list on all ceph units produced the '
'same results (OK).')
def test_402_pause_resume_actions(self):
"""Veryfy that pause/resume works"""
u.log.debug("Testing pause")
cmd = "ceph -s"
sentry_unit = self.ceph0_sentry
action_id = u.run_action(sentry_unit, 'pause-health')
assert u.wait_on_action(action_id), "Pause health action failed."
output, code = sentry_unit.run(cmd)
if 'nodown' not in output or 'noout' not in output:
amulet.raise_status(amulet.FAIL, msg="Missing noout,nodown")
u.log.debug("Testing resume")
action_id = u.run_action(sentry_unit, 'resume-health')
assert u.wait_on_action(action_id), "Resume health action failed."
output, code = sentry_unit.run(cmd)
if 'nodown' in output or 'noout' in output:
amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown")
def test_501_security_checklist_action(self):
"""Verify expected result on a default install"""
u.log.debug("Testing security-checklist")
sentry_unit = self.ceph0_sentry
action_id = u.run_action(sentry_unit, "security-checklist")
u.wait_on_action(action_id)
data = amulet.actions.get_action_output(action_id, full_output=True)
assert data.get(u"status") == "completed", \
"Security check is expected to pass by default"
@staticmethod
def find_pool(sentry_unit, pool_name):
"""
This will do a ceph osd dump and search for pool you specify
:param sentry_unit: The unit to run this command from.
:param pool_name: str. The name of the Ceph pool to query
:return: str or None. The ceph pool or None if not found
"""
output, dump_code = sentry_unit.run("ceph osd dump")
if dump_code is not 0:
amulet.raise_status(
amulet.FAIL,
msg="ceph osd dump failed with output: {}".format(
output))
for line in output.split('\n'):
match = re.search(r"pool\s+\d+\s+'(?P<pool_name>.*)'", line)
if match:
name = match.group('pool_name')
if name == pool_name:
return line
return None
def test_403_cache_tier_actions(self):
"""Verify that cache tier add/remove works"""
u.log.debug("Testing cache tiering")
sentry_unit = self.ceph0_sentry
# Create our backer pool
output, code = sentry_unit.run("ceph osd pool create cold 128 128 ")
if code is not 0:
amulet.raise_status(
amulet.FAIL,
msg="ceph osd pool create cold failed with output: {}".format(
output))
# Create our cache pool
output, code = sentry_unit.run("ceph osd pool create hot 128 128 ")
if code is not 0:
amulet.raise_status(
amulet.FAIL,
msg="ceph osd pool create hot failed with output: {}".format(
output))
action_id = u.run_action(sentry_unit,
'create-cache-tier',
params={
'backer-pool': 'cold',
'cache-pool': 'hot',
'cache-mode': 'writeback'})
assert u.wait_on_action(action_id), \
"Create cache tier action failed."
pool_line = self.find_pool(
sentry_unit=sentry_unit,
pool_name='hot')
assert "cache_mode writeback" in pool_line, \
"cache_mode writeback not found in cache pool"
remove_action_id = u.run_action(sentry_unit,
'remove-cache-tier',
params={
'backer-pool': 'cold',
'cache-pool': 'hot'})
assert u.wait_on_action(remove_action_id), \
"Remove cache tier action failed"
pool_line = self.find_pool(sentry_unit=sentry_unit, pool_name='hot')
assert "cache_mode" not in pool_line, \
"cache_mode is still enabled on cache pool"
def test_404_set_noout_actions(self):
"""Verify that set/unset noout works"""
u.log.debug("Testing set noout")
cmd = "ceph -s"
sentry_unit = self.ceph0_sentry
action_id = u.run_action(sentry_unit, 'set-noout')
assert u.wait_on_action(action_id), "Set noout action failed."
output, code = sentry_unit.run(cmd)
if 'noout' not in output:
amulet.raise_status(amulet.FAIL, msg="Missing noout")
u.log.debug("Testing unset noout")
action_id = u.run_action(sentry_unit, 'unset-noout')
assert u.wait_on_action(action_id), "Unset noout action failed."
output, code = sentry_unit.run(cmd)
if 'noout' in output:
amulet.raise_status(amulet.FAIL, msg="Still has noout")
def test_410_ceph_cinder_vol_create(self):
"""Create and confirm a ceph-backed cinder volume, and inspect
ceph cinder pool object count as the volume is created
and deleted."""
sentry_unit = self.ceph0_sentry
obj_count_samples = []
pool_size_samples = []
pools = u.get_ceph_pools(self.ceph0_sentry)
cinder_pool = pools['cinder-ceph']
# Check ceph cinder pool object count, disk space usage and pool name
u.log.debug('Checking ceph cinder pool original samples...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
cinder_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
expected = 'cinder-ceph'
if pool_name != expected:
msg = ('Ceph pool {} unexpected name (actual, expected): '
'{}. {}'.format(cinder_pool, pool_name, expected))
amulet.raise_status(amulet.FAIL, msg=msg)
# Create ceph-backed cinder volume
cinder_vol = u.create_cinder_volume(self.cinder)
# Re-check ceph cinder pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph cinder pool samples after volume create...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
cinder_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Delete ceph-backed cinder volume
u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume")
# Final check, ceph cinder pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph cinder pool after volume delete...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
cinder_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Validate ceph cinder pool object count samples over time
ret = u.validate_ceph_pool_samples(obj_count_samples,
"cinder pool object count")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Luminous (pike) ceph seems more efficient at disk usage so we cannot
# grantee the ordering of kb_used
if self._get_openstack_release() < self.xenial_pike:
# Validate ceph cinder pool disk space usage samples over time
ret = u.validate_ceph_pool_samples(pool_size_samples,
"cinder pool disk usage")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_412_ceph_glance_image_create_delete(self):
"""Create and confirm a ceph-backed glance image, and inspect
ceph glance pool object count as the image is created
and deleted."""
sentry_unit = self.ceph0_sentry
obj_count_samples = []
pool_size_samples = []
pools = u.get_ceph_pools(self.ceph0_sentry)
glance_pool = pools['glance']
# Check ceph glance pool object count, disk space usage and pool name
u.log.debug('Checking ceph glance pool original samples...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
glance_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
expected = 'glance'
if pool_name != expected:
msg = ('Ceph glance pool {} unexpected name (actual, '
'expected): {}. {}'.format(glance_pool,
pool_name, expected))
amulet.raise_status(amulet.FAIL, msg=msg)
# Create ceph-backed glance image
glance_img = u.create_cirros_image(self.glance, "cirros-image-1")
# Re-check ceph glance pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph glance pool samples after image create...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
glance_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Delete ceph-backed glance image
u.delete_resource(self.glance.images,
glance_img.id, msg="glance image")
# Final check, ceph glance pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph glance pool samples after image delete...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(sentry_unit,
glance_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Validate ceph glance pool object count samples over time
ret = u.validate_ceph_pool_samples(obj_count_samples,
"glance pool object count")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Validate ceph glance pool disk space usage samples over time
ret = u.validate_ceph_pool_samples(pool_size_samples,
"glance pool disk usage")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_414_get_health_action(self):
"""Verify that getting health works"""
u.log.debug("Testing get-health")
sentry_unit = self.ceph0_sentry
action_id = u.run_action(sentry_unit, 'get-health')
assert u.wait_on_action(action_id), "HEALTH_OK"
def test_420_show_disk_free_action(self):
"""Verify show-disk-free"""
u.log.debug("Testing show-disk-free")
if self._get_openstack_release() < self.trusty_kilo:
u.log.info(
"show-disk-free only supported in >=kilo, skipping")
return
sentry_unit = self.ceph0_sentry
action_id = u.run_action(sentry_unit,
'show-disk-free',
params={'format': 'json'})
assert u.wait_on_action(action_id), "Show-disk-free action failed."
data = amulet.actions.get_action_output(action_id, full_output=True)
assert data.get(u"status") == "completed", "Show-disk-free failed"
message = data.get(u"results").get(u"message")
assert message is not None
jsonout = json.loads(message.strip())
nodes = jsonout.get(u"nodes")
assert nodes is not None, "Show-disk-free: no 'nodes' elem"
assert len(nodes) > 0, "Show-disk-free action: 0 nodes"
def test_499_ceph_cmds_exit_zero(self):
"""Check basic functionality of ceph cli commands against
all ceph units."""
sentry_units = [
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
]
ret = u.check_commands_on_units(commands, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# FYI: No restart check as ceph services do not restart
# when charm config changes, unless monitor count increases.

View File

@ -0,0 +1,90 @@
series: bionic
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
series: bionic
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,104 @@
series: bionic
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:bionic-rocky
ceph-mon:
charm: ceph-mon
series: bionic
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:bionic-rocky
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:bionic-rocky
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:bionic-rocky
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:bionic-rocky
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-rocky
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,104 @@
series: bionic
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:bionic-stein
ceph-mon:
charm: ceph-mon
series: bionic
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:bionic-stein
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:bionic-stein
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:bionic-stein
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-stein
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:bionic-stein
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-stein
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:bionic-stein
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-stein
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,90 @@
series: cosmic
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
ceph-mon:
charm: ceph-mon
series: cosmic
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,90 @@
series: disco
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
ceph-mon:
charm: ceph-mon
series: disco
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,140 @@
series: trusty
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
ceph-mon:
charm: ceph-mon
series: trusty
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
# workaround while awaiting release of next version of python-libjuju with
# model-constraints support
constraints:
virt-type=kvm
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,90 @@
series: xenial
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
ceph-mon:
charm: ceph-mon
series: xenial
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,104 @@
series: xenial
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:xenial-ocata
ceph-mon:
charm: ceph-mon
series: xenial
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-ocata
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:xenial-ocata
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:xenial-ocata
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-ocata
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,104 @@
series: xenial
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:xenial-pike
ceph-mon:
charm: ceph-mon
series: xenial
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-pike
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:xenial-pike
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:xenial-pike
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-pike
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-pike
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-pike
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-pike
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-pike
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -0,0 +1,104 @@
series: xenial
applications:
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/srv/ceph /dev/test-non-existent'
source: cloud:xenial-queens
ceph-mon:
charm: ceph-mon
series: xenial
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-queens
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
dataset-size: 25%
max-connections: 1000
source: cloud:xenial-queens
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: cloud:xenial-queens
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-queens
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: cloud:xenial-queens
glance:
expose: True
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:xenial-queens
cinder:
expose: True
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-queens
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
nova-cloud-controller:
expose: True
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:xenial-queens
relations:
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-compute:image-service
- glance:image-service
- - nova-compute:ceph
- ceph-mon:client
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:amqp
- rabbitmq-server:amqp
- - glance:ceph
- ceph-mon:client
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - cinder:image-service
- glance:image-service
- - cinder-ceph:storage-backend
- cinder:storage-backend
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on cosmic-rocky."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='cosmic')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on disco-stein."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='disco')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on bionic-queens."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='bionic')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on bionic-rocky."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='bionic',
openstack='cloud:bionic-rocky',
source='cloud:bionic-updates/rocky')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on bionic-stein."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='bionic',
openstack='cloud:bionic-stein',
source='cloud:bionic-stein')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on trusty-mitaka."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='trusty',
openstack='cloud:trusty-mitaka',
source='cloud:trusty-updates/mitaka')
deployment.run_tests()

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on xenial-mitaka."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='xenial')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on xenial-ocata."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='xenial',
openstack='cloud:xenial-ocata',
source='cloud:xenial-updates/ocata')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on xenial-pike."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='xenial',
openstack='cloud:xenial-pike',
source='cloud:xenial-updates/pike')
deployment.run_tests()

View File

@ -1,25 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic ceph deployment on xenial-queens."""
from basic_deployment import CephBasicDeployment
if __name__ == '__main__':
deployment = CephBasicDeployment(series='xenial',
openstack='cloud:xenial-queens',
source='cloud:xenial-updates/queens')
deployment.run_tests()

View File

@ -1,18 +1,22 @@
# Bootstrap the model if necessary.
bootstrap: True
# Re-use bootstrap node.
reset: True
# Use tox/requirements to drive the venv instead of bundletester's venv feature.
virtualenv: False
# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
makefile: []
# Do not specify juju PPA sources. Juju is presumed to be pre-installed
# and configured in all test runner environments.
#sources:
# Do not specify or rely on system packages.
#packages:
# Do not specify python packages here. Use test-requirements.txt
# and tox instead. ie. The venv is constructed before bundletester
# is invoked.
#python-packages:
reset_timeout: 600
charm_name: ceph-mon
gate_bundles:
- bionic-stein
- bionic-rocky
- bionic-queens
- xenial-queens
- xenial-pike
- xenial-ocata
- xenial-mitaka
- trusty-mitaka
smoke_bundles:
- bionic-queens
dev_bundles:
- cosmic-rocky
- disco-stein
configure:
- zaza.openstack.charm_tests.glance.setup.add_lts_image
tests:
- zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest
- zaza.openstack.charm_tests.ceph.tests.CephRelationTest
- zaza.openstack.charm_tests.ceph.tests.CephTest
- zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest

47
tox.ini
View File

@ -15,6 +15,7 @@ install_command =
commands = stestr run {posargs}
whitelist_externals = juju
passenv = HOME TERM AMULET_* CS_API_*
deps = -r{toxinidir}/test-requirements.txt
[testenv:py27]
basepython = python2.7
@ -78,49 +79,21 @@ omit =
basepython = python3
commands = {posargs}
[testenv:func27-noop]
# DRY RUN - For Debug
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
[testenv:func27]
# Charm Functional Test
# Run all gate tests which are +x (expected to always pass)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:func]
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
functest-run-suite --keep-model
[testenv:func27-smoke]
# Charm Functional Test
# Run a specific test as an Amulet smoke test (expected to always pass)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:func-smoke]
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
functest-run-suite --keep-model --smoke
[testenv:func27-dfs]
# Charm Functional Test
# Run all deploy-from-source tests which are +x (may not always pass!)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:func-dev]
basepython = python3
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy
[testenv:func27-dev]
# Charm Functional Test
# Run all development test targets which are +x (may not always pass!)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
functest-run-suite --keep-model --dev
[flake8]
ignore = E402,E226