Add Victoria to the test gate

Also sync libraries.
Also take trusty-mitaka out of the gate because of linked bug.
Also fixed Victoria bundles as they were trying to deploy
percona-cluster.

Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/434
Change-Id: I575d00b993fbff33d80956278b01e87e434713e0
Related-Bug: #1877076
This commit is contained in:
Aurelien Lourot 2020-08-28 14:05:05 +02:00
parent 8612868ce6
commit 74dc156660
9 changed files with 385 additions and 122 deletions

View File

@ -19,7 +19,3 @@ bin/charm_helpers_sync.py:
sync: bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
publish: lint unit_test
bzr push lp:charms/cinder-backup
bzr push lp:charms/trusty/cinder-backup

View File

@ -34,12 +34,14 @@ from charmhelpers.core.hookenv import (
WARNING, WARNING,
) )
from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.openstack.ip import (
ADMIN,
resolve_address, resolve_address,
get_vip_in_network, get_vip_in_network,
INTERNAL, ADDRESS_MAP,
PUBLIC, get_default_api_bindings,
ADDRESS_MAP) )
from charmhelpers.contrib.network.ip import (
get_relation_ip,
)
from charmhelpers.core.host import ( from charmhelpers.core.host import (
mkdir, mkdir,
@ -113,44 +115,118 @@ class CertRequest(object):
return req return req
def get_certificate_request(json_encode=True): def get_certificate_request(json_encode=True, bindings=None):
"""Generate a certificatee requests based on the network confioguration """Generate a certificate requests based on the network configuration
:param json_encode: Encode request in JSON or not. Used for setting
directly on a relation.
:type json_encode: boolean
:param bindings: List of bindings to check in addition to default api
bindings.
:type bindings: list of strings
:returns: CertRequest request as dictionary or JSON string.
:rtype: Union[dict, json]
""" """
if bindings:
# Add default API bindings to bindings list
bindings = set(bindings + get_default_api_bindings())
else:
# Use default API bindings
bindings = get_default_api_bindings()
req = CertRequest(json_encode=json_encode) req = CertRequest(json_encode=json_encode)
req.add_hostname_cn() req.add_hostname_cn()
# Add os-hostname entries # Add os-hostname entries
for net_type in [INTERNAL, ADMIN, PUBLIC]: _sans = get_certificate_sans()
net_config = config(ADDRESS_MAP[net_type]['override'])
# Handle specific hostnames per binding
for binding in bindings:
hostname_override = config(ADDRESS_MAP[binding]['override'])
try: try:
net_addr = resolve_address(endpoint_type=net_type) net_addr = resolve_address(endpoint_type=binding)
ip = network_get_primary_address( ip = network_get_primary_address(
ADDRESS_MAP[net_type]['binding']) ADDRESS_MAP[binding]['binding'])
addresses = [net_addr, ip] addresses = [net_addr, ip]
vip = get_vip_in_network(resolve_network_cidr(ip)) vip = get_vip_in_network(resolve_network_cidr(ip))
if vip: if vip:
addresses.append(vip) addresses.append(vip)
if net_config: # Add hostname certificate request
if hostname_override:
req.add_entry( req.add_entry(
net_type, binding,
net_config, hostname_override,
addresses) addresses)
else: # Remove hostname specific addresses from _sans
# There is network address with no corresponding hostname. for addr in addresses:
# Add the ip to the hostname cert to allow for this. try:
req.add_hostname_cn_ip(addresses) _sans.remove(addr)
except (ValueError, KeyError):
pass
except NoNetworkBinding: except NoNetworkBinding:
log("Skipping request for certificate for ip in {} space, no " log("Skipping request for certificate for ip in {} space, no "
"local address found".format(net_type), WARNING) "local address found".format(binding), WARNING)
# Gurantee all SANs are covered
# These are network addresses with no corresponding hostname.
# Add the ips to the hostname cert to allow for this.
req.add_hostname_cn_ip(_sans)
return req.get_request() return req.get_request()
def get_certificate_sans(bindings=None):
"""Get all possible IP addresses for certificate SANs.
"""
_sans = [unit_get('private-address')]
if bindings:
# Add default API bindings to bindings list
bindings = set(bindings + get_default_api_bindings())
else:
# Use default API bindings
bindings = get_default_api_bindings()
for binding in bindings:
# Check for config override
try:
net_config = config(ADDRESS_MAP[binding]['config'])
except KeyError:
# There is no configuration network for this binding name
net_config = None
# Using resolve_address is likely redundant. Keeping it here in
# case there is an edge case it handles.
net_addr = resolve_address(endpoint_type=binding)
ip = get_relation_ip(binding, cidr_network=net_config)
_sans = _sans + [net_addr, ip]
vip = get_vip_in_network(resolve_network_cidr(ip))
if vip:
_sans.append(vip)
return set(_sans)
def create_ip_cert_links(ssl_dir, custom_hostname_link=None): def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
"""Create symlinks for SAN records """Create symlinks for SAN records
:param ssl_dir: str Directory to create symlinks in :param ssl_dir: str Directory to create symlinks in
:param custom_hostname_link: str Additional link to be created :param custom_hostname_link: str Additional link to be created
""" """
# This includes the hostname cert and any specific bindng certs:
# admin, internal, public
req = get_certificate_request(json_encode=False)["cert_requests"]
# Specific certs
for cert_req in req.keys():
requested_cert = os.path.join(
ssl_dir,
'cert_{}'.format(cert_req))
requested_key = os.path.join(
ssl_dir,
'key_{}'.format(cert_req))
for addr in req[cert_req]['sans']:
cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
key = os.path.join(ssl_dir, 'key_{}'.format(addr))
if os.path.isfile(requested_cert) and not os.path.isfile(cert):
os.symlink(requested_cert, cert)
os.symlink(requested_key, key)
# Handle custom hostnames
hostname = get_hostname(unit_get('private-address')) hostname = get_hostname(unit_get('private-address'))
hostname_cert = os.path.join( hostname_cert = os.path.join(
ssl_dir, ssl_dir,
@ -158,18 +234,6 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
hostname_key = os.path.join( hostname_key = os.path.join(
ssl_dir, ssl_dir,
'key_{}'.format(hostname)) 'key_{}'.format(hostname))
# Add links to hostname cert, used if os-hostname vars not set
for net_type in [INTERNAL, ADMIN, PUBLIC]:
try:
addr = resolve_address(endpoint_type=net_type)
cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
key = os.path.join(ssl_dir, 'key_{}'.format(addr))
if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
os.symlink(hostname_cert, cert)
os.symlink(hostname_key, key)
except NoNetworkBinding:
log("Skipping creating cert symlink for ip in {} space, no "
"local address found".format(net_type), WARNING)
if custom_hostname_link: if custom_hostname_link:
custom_cert = os.path.join( custom_cert = os.path.join(
ssl_dir, ssl_dir,

View File

@ -33,6 +33,7 @@ INTERNAL = 'int'
ADMIN = 'admin' ADMIN = 'admin'
ACCESS = 'access' ACCESS = 'access'
# TODO: reconcile 'int' vs 'internal' binding names
ADDRESS_MAP = { ADDRESS_MAP = {
PUBLIC: { PUBLIC: {
'binding': 'public', 'binding': 'public',
@ -58,6 +59,14 @@ ADDRESS_MAP = {
'fallback': 'private-address', 'fallback': 'private-address',
'override': 'os-access-hostname', 'override': 'os-access-hostname',
}, },
# Note (thedac) bridge to begin the reconciliation between 'int' vs
# 'internal' binding names
'internal': {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
} }
@ -195,3 +204,10 @@ def get_vip_in_network(network):
if is_address_in_network(network, vip): if is_address_in_network(network, vip):
matching_vip = vip matching_vip = vip
return matching_vip return matching_vip
def get_default_api_bindings():
_default_bindings = []
for binding in [INTERNAL, ADMIN, PUBLIC]:
_default_bindings.append(ADDRESS_MAP[binding]['binding'])
return _default_bindings

View File

@ -230,7 +230,7 @@ SWIFT_CODENAMES = OrderedDict([
('ussuri', ('ussuri',
['2.24.0', '2.25.0']), ['2.24.0', '2.25.0']),
('victoria', ('victoria',
['2.25.0']), ['2.25.0', '2.26.0']),
]) ])
# >= Liberty version->codename mapping # >= Liberty version->codename mapping

View File

@ -41,6 +41,7 @@ from subprocess import (
) )
from charmhelpers import deprecate from charmhelpers import deprecate
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
application_name,
config, config,
service_name, service_name,
local_unit, local_unit,
@ -162,6 +163,17 @@ def get_osd_settings(relation_name):
return _order_dict_by_key(osd_settings) return _order_dict_by_key(osd_settings)
def send_application_name(relid=None):
"""Send the application name down the relation.
:param relid: Relation id to set application name in.
:type relid: str
"""
relation_set(
relation_id=relid,
relation_settings={'application-name': application_name()})
def send_osd_settings(): def send_osd_settings():
"""Pass on requested OSD settings to osd units.""" """Pass on requested OSD settings to osd units."""
try: try:
@ -1074,7 +1086,10 @@ def create_erasure_profile(service, profile_name,
erasure_plugin_technique=None): erasure_plugin_technique=None):
"""Create a new erasure code profile if one does not already exist for it. """Create a new erasure code profile if one does not already exist for it.
Updates the profile if it exists. Please refer to [0] for more details. Profiles are considered immutable so will not be updated if the named
profile already exists.
Please refer to [0] for more details.
0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@ -1110,6 +1125,11 @@ def create_erasure_profile(service, profile_name,
:type erasure_plugin_technique: str :type erasure_plugin_technique: str
:return: None. Can raise CalledProcessError, ValueError or AssertionError :return: None. Can raise CalledProcessError, ValueError or AssertionError
""" """
if erasure_profile_exists(service, profile_name):
log('EC profile {} exists, skipping update'.format(profile_name),
level=WARNING)
return
plugin_techniques = { plugin_techniques = {
'jerasure': [ 'jerasure': [
'reed_sol_van', 'reed_sol_van',
@ -1209,9 +1229,6 @@ def create_erasure_profile(service, profile_name,
if scalar_mds: if scalar_mds:
cmd.append('scalar-mds={}'.format(scalar_mds)) cmd.append('scalar-mds={}'.format(scalar_mds))
if erasure_profile_exists(service, profile_name):
cmd.append('--force')
check_call(cmd) check_call(cmd)
@ -2198,6 +2215,7 @@ def send_request_if_needed(request, relation='ceph'):
for rid in relation_ids(relation): for rid in relation_ids(relation):
log('Sending request {}'.format(request.request_id), level=DEBUG) log('Sending request {}'.format(request.request_id), level=DEBUG)
relation_set(relation_id=rid, broker_req=request.request) relation_set(relation_id=rid, broker_req=request.request)
relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()})
def has_broker_rsp(rid=None, unit=None): def has_broker_rsp(rid=None, unit=None):

View File

@ -15,5 +15,7 @@ flake8>=2.2.4
stestr>=2.2.0 stestr>=2.2.0
coverage>=4.5.2 coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
juju!=2.8.3 # this version causes spurious JujuAPIError's
git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0'
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack

View File

@ -1,55 +1,73 @@
variables:
openstack-origin: &openstack-origin cloud:focal-victoria
series: focal series: focal
relations:
- - cinder-backup:ceph comment:
- ceph-mon:client - 'machines section to decide order of deployment. database sooner = faster'
- - cinder-ceph:ceph machines:
- ceph-mon:client '0':
- - ceph-osd:mon constraints: mem=3072M
- ceph-mon:osd '1':
- - cinder:storage-backend constraints: mem=3072M
- cinder-ceph:storage-backend '2':
- - cinder:backup-backend constraints: mem=3072M
- cinder-backup:backup-backend '3':
- - keystone:shared-db '4':
- percona-cluster:shared-db '5':
- - cinder:shared-db '6':
- percona-cluster:shared-db '7':
- - cinder:identity-service '8':
- keystone:identity-service '9':
- - cinder:amqp '10':
- rabbitmq-server:amqp '11':
- - glance:image-service '12':
- nova-compute:image-service '13':
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - nova-compute:amqp
- rabbitmq-server:amqp
applications: applications:
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster keystone-mysql-router:
num_units: 1 charm: cs:~openstack-charmers-next/mysql-router
cinder-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options: options:
innodb-buffer-pool-size: 256M source: *openstack-origin
max-connections: 1000 to:
source: cloud:focal-victoria - '0'
- '1'
- '2'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:focal-victoria openstack-origin: *openstack-origin
to:
- '3'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
to:
- '4'
ceph-mon: ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3 num_units: 3
options: options:
monitor-count: '3' monitor-count: '3'
source: cloud:focal-victoria source: *openstack-origin
to:
- '5'
- '6'
- '7'
ceph-osd: ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3 num_units: 3
@ -57,26 +75,90 @@ applications:
osd-devices: 'cinder,10G' osd-devices: 'cinder,10G'
options: options:
osd-devices: '/dev/test-non-existent' osd-devices: '/dev/test-non-existent'
source: cloud:focal-victoria source: *openstack-origin
to:
- '8'
- '9'
- '10'
cinder: cinder:
charm: cs:~openstack-charmers-next/cinder charm: cs:~openstack-charmers-next/cinder
num_units: 1 num_units: 1
options: options:
block-device: 'None' block-device: 'None'
glance-api-version: '2' glance-api-version: '2'
openstack-origin: cloud:focal-victoria openstack-origin: *openstack-origin
to:
- '11'
cinder-backup: cinder-backup:
charm: ../../../cinder-backup charm: ../../../cinder-backup
series: focal
options: options:
ceph-osd-replication-count: 3 ceph-osd-replication-count: 3
cinder-ceph: cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph charm: cs:~openstack-charmers-next/cinder-ceph
options: options:
ceph-osd-replication-count: 3 ceph-osd-replication-count: 3
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
to:
- '12'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
to:
- '13'
relations:
- - 'cinder-backup:ceph'
- 'ceph-mon:client'
- - 'cinder-ceph:ceph'
- 'ceph-mon:client'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'cinder:storage-backend'
- 'cinder-ceph:storage-backend'
- - 'cinder:backup-backend'
- 'cinder-backup:backup-backend'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'

View File

@ -1,55 +1,73 @@
variables:
openstack-origin: &openstack-origin distro
series: groovy series: groovy
relations:
- - cinder-backup:ceph comment:
- ceph-mon:client - 'machines section to decide order of deployment. database sooner = faster'
- - cinder-ceph:ceph machines:
- ceph-mon:client '0':
- - ceph-osd:mon constraints: mem=3072M
- ceph-mon:osd '1':
- - cinder:storage-backend constraints: mem=3072M
- cinder-ceph:storage-backend '2':
- - cinder:backup-backend constraints: mem=3072M
- cinder-backup:backup-backend '3':
- - keystone:shared-db '4':
- percona-cluster:shared-db '5':
- - cinder:shared-db '6':
- percona-cluster:shared-db '7':
- - cinder:identity-service '8':
- keystone:identity-service '9':
- - cinder:amqp '10':
- rabbitmq-server:amqp '11':
- - glance:image-service '12':
- nova-compute:image-service '13':
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - nova-compute:amqp
- rabbitmq-server:amqp
applications: applications:
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster keystone-mysql-router:
num_units: 1 charm: cs:~openstack-charmers-next/mysql-router
cinder-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options: options:
innodb-buffer-pool-size: 256M source: *openstack-origin
max-connections: 1000 to:
source: distro - '0'
- '1'
- '2'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: distro openstack-origin: *openstack-origin
to:
- '3'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
to:
- '4'
ceph-mon: ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3 num_units: 3
options: options:
monitor-count: '3' monitor-count: '3'
source: distro source: *openstack-origin
to:
- '5'
- '6'
- '7'
ceph-osd: ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3 num_units: 3
@ -57,26 +75,90 @@ applications:
osd-devices: 'cinder,10G' osd-devices: 'cinder,10G'
options: options:
osd-devices: '/dev/test-non-existent' osd-devices: '/dev/test-non-existent'
source: distro source: *openstack-origin
to:
- '8'
- '9'
- '10'
cinder: cinder:
charm: cs:~openstack-charmers-next/cinder charm: cs:~openstack-charmers-next/cinder
num_units: 1 num_units: 1
options: options:
block-device: 'None' block-device: 'None'
glance-api-version: '2' glance-api-version: '2'
openstack-origin: distro openstack-origin: *openstack-origin
to:
- '11'
cinder-backup: cinder-backup:
charm: ../../../cinder-backup charm: ../../../cinder-backup
series: groovy
options: options:
ceph-osd-replication-count: 3 ceph-osd-replication-count: 3
cinder-ceph: cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph charm: cs:~openstack-charmers-next/cinder-ceph
options: options:
ceph-osd-replication-count: 3 ceph-osd-replication-count: 3
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
to:
- '12'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
to:
- '13'
relations:
- - 'cinder-backup:ceph'
- 'ceph-mon:client'
- - 'cinder-ceph:ceph'
- 'ceph-mon:client'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'cinder:storage-backend'
- 'cinder-ceph:storage-backend'
- - 'cinder:backup-backend'
- 'cinder-backup:backup-backend'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'

View File

@ -2,7 +2,6 @@ charm_name: cinder-backup
smoke_bundles: smoke_bundles:
- bionic-train - bionic-train
gate_bundles: gate_bundles:
- trusty-mitaka
- xenial-mitaka - xenial-mitaka
- xenial-ocata - xenial-ocata
- xenial-pike - xenial-pike
@ -12,9 +11,13 @@ gate_bundles:
- bionic-stein - bionic-stein
- bionic-train - bionic-train
- bionic-ussuri - bionic-ussuri
dev_bundles:
- focal-ussuri
- focal-victoria - focal-victoria
- groovy-victoria - groovy-victoria
dev_bundles:
- trusty-mitaka # fails often because of lp:1877076
- focal-ussuri # disabled because of lp:1891626
tests: tests:
- zaza.openstack.charm_tests.cinder_backup.tests.CinderBackupTest - zaza.openstack.charm_tests.cinder_backup.tests.CinderBackupTest
tests_options:
force_deploy:
- groovy-victoria