Add yoga bundles and release-tool syncs

* charm-helpers sync for classic charms
* sync from release-tools
* switch to release-specific zosci functional tests
* run focal-ussuri as smoke tests
* remove trusty, xenial, and groovy metadata/tests
* drop py35 and add py39

Depends-On: https://review.opendev.org/c/openstack/charm-cinder/+/814177
Change-Id: If8894bf0150c7fd0a73d70d20753d67efe92c0b8
This commit is contained in:
Corey Bryant 2021-10-29 17:00:39 -04:00
parent 3f9e52dd49
commit 79a00cad81
16 changed files with 219 additions and 451 deletions

View File

@ -1413,7 +1413,8 @@ def incomplete_relation_data(configs, required_interfaces):
for i in incomplete_relations}
def do_action_openstack_upgrade(package, upgrade_callback, configs):
def do_action_openstack_upgrade(package, upgrade_callback, configs,
force_upgrade=False):
"""Perform action-managed OpenStack upgrade.
Upgrades packages to the configured openstack-origin version and sets
@ -1427,12 +1428,13 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
@param package: package name for determining if upgrade available
@param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class
@param force_upgrade: perform dist-upgrade regardless of new openstack
@return: True if upgrade successful; False if upgrade failed or skipped
"""
ret = False
if openstack_upgrade_available(package):
if openstack_upgrade_available(package) or force_upgrade:
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
@ -2599,6 +2601,23 @@ def get_subordinate_release_packages(os_release, package_type='deb'):
return SubordinatePackages(install, purge)
def get_subordinate_services():
"""Iterate over subordinate relations and get service information.
In a similar fashion as with get_subordinate_release_packages(),
principle charms can retrieve a list of services advertised by their
subordinate charms. This is useful to know about subordinate services when
pausing, resuming or upgrading a principle unit.
:returns: Name of all services advertised by all subordinates
:rtype: Set[str]
"""
services = set()
for rdata in container_scoped_relation_get('services'):
services |= set(json.loads(rdata or '[]'))
return services
os_restart_on_change = partial(
pausable_restart_on_change,
can_restart_now_f=deferred_events.check_and_record_restart_request,

View File

@ -294,7 +294,6 @@ class BasePool(object):
# NOTE: Do not perform initialization steps that require live data from
# a running cluster here. The *Pool classes may be used for validation.
self.service = service
self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
self.op = op or {}
if op:
@ -341,7 +340,8 @@ class BasePool(object):
Do not add calls for a specific pool type here, those should go into
one of the pool specific classes.
"""
if self.nautilus_or_later:
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(
client=self.service,
@ -660,8 +660,9 @@ class ReplicatedPool(BasePool):
else:
self.pg_num = self.get_pgs(self.replicas, self.percent_data)
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
if self.nautilus_or_later:
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
@ -745,9 +746,9 @@ class ErasurePool(BasePool):
k = int(erasure_profile['k'])
m = int(erasure_profile['m'])
pgs = self.get_pgs(k + m, self.percent_data)
self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it
if self.nautilus_or_later:
if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(

View File

@ -29,6 +29,7 @@ UBUNTU_RELEASES = (
'groovy',
'hirsute',
'impish',
'jammy',
)

View File

@ -275,6 +275,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('groovy', 'victoria'),
('hirsute', 'wallaby'),
('impish', 'xena'),
('jammy', 'yoga'),
])

View File

@ -8,7 +8,6 @@ description: |
tags:
- miscellaneous
series:
- xenial
- bionic
- focal
- groovy

View File

@ -1,22 +1,10 @@
- project:
templates:
- charm-unit-jobs
# Revert this to the project-template when trusty is actually
# removed from the charm-functional-jobs template
# - charm-functional-jobs
check:
jobs:
- xenial-mitaka
- bionic-queens
- bionic-stein
- bionic-train
- bionic-ussuri
- focal-victoria
- focal-wallaby
- focal-xena:
voting: false
- groovy-victoria
- hirsute-wallaby:
voting: false
- impish-xena:
voting: false
- charm-yoga-unit-jobs
- charm-yoga-functional-jobs
- charm-xena-functional-jobs
- charm-wallaby-functional-jobs
- charm-victoria-functional-jobs
- charm-ussuri-functional-jobs
- charm-stein-functional-jobs
- charm-queens-functional-jobs

View File

@ -7,6 +7,8 @@
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here.
cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35.
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
requests>=2.18.4

View File

@ -0,0 +1,164 @@
variables:
openstack-origin: &openstack-origin cloud:focal-yoga
series: focal
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
'12':
'13':
applications:
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
cinder-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '3'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
to:
- '4'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
source: *openstack-origin
to:
- '5'
- '6'
- '7'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: *openstack-origin
to:
- '8'
- '9'
- '10'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: *openstack-origin
to:
- '11'
cinder-backup:
charm: ../../../cinder-backup
options:
ceph-osd-replication-count: 3
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
to:
- '12'
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
to:
- '13'
relations:
- - 'cinder-backup:ceph'
- 'ceph-mon:client'
- - 'cinder-ceph:ceph'
- 'ceph-mon:client'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'cinder:storage-backend'
- 'cinder-ceph:storage-backend'
- - 'cinder:backup-backend'
- 'cinder-backup:backup-backend'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:shared-db'
- 'cinder-mysql-router:shared-db'
- - 'cinder-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'cinder:identity-service'
- 'keystone:identity-service'
- - 'cinder:amqp'
- 'rabbitmq-server:amqp'
- - 'glance:image-service'
- 'nova-compute:image-service'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-compute:ceph-access'
- 'cinder-ceph:ceph-access'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'

View File

@ -1,7 +1,7 @@
variables:
openstack-origin: &openstack-origin distro
series: groovy
series: jammy
comment:
- 'machines section to decide order of deployment. database sooner = faster'

View File

@ -1,83 +0,0 @@
series: trusty
relations:
- - cinder-backup:ceph
- ceph-mon:client
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - cinder:storage-backend
- cinder-ceph:storage-backend
- - cinder:backup-backend
- cinder-backup:backup-backend
- - keystone:shared-db
- percona-cluster:shared-db
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - glance:image-service
- nova-compute:image-service
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - nova-compute:amqp
- rabbitmq-server:amqp
applications:
percona-cluster:
charm: cs:trusty/percona-cluster
num_units: 1
options:
innodb-buffer-pool-size: 256M
max-connections: 1000
source: cloud:trusty-mitaka
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:trusty-mitaka
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:trusty-mitaka
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:trusty-mitaka
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:trusty-mitaka
cinder-backup:
charm: ../../../cinder-backup
series: trusty
options:
ceph-osd-replication-count: 3
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1

View File

@ -1,77 +0,0 @@
series: xenial
relations:
- - cinder-backup:ceph
- ceph-mon:client
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - cinder:storage-backend
- cinder-ceph:storage-backend
- - cinder:backup-backend
- cinder-backup:backup-backend
- - keystone:shared-db
- percona-cluster:shared-db
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - glance:image-service
- nova-compute:image-service
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - nova-compute:amqp
- rabbitmq-server:amqp
applications:
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
innodb-buffer-pool-size: 256M
max-connections: 1000
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
cinder-backup:
charm: ../../../cinder-backup
series: xenial
options:
ceph-osd-replication-count: 3
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1

View File

@ -1,83 +0,0 @@
series: xenial
relations:
- - cinder-backup:ceph
- ceph-mon:client
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - cinder:storage-backend
- cinder-ceph:storage-backend
- - cinder:backup-backend
- cinder-backup:backup-backend
- - keystone:shared-db
- percona-cluster:shared-db
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - glance:image-service
- nova-compute:image-service
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - nova-compute:amqp
- rabbitmq-server:amqp
applications:
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
innodb-buffer-pool-size: 256M
max-connections: 1000
source: cloud:xenial-ocata
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-ocata
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-ocata
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:xenial-ocata
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-ocata
cinder-backup:
charm: ../../../cinder-backup
series: xenial
options:
ceph-osd-replication-count: 3
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1

View File

@ -1,83 +0,0 @@
series: xenial
relations:
- - cinder-backup:ceph
- ceph-mon:client
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - cinder:storage-backend
- cinder-ceph:storage-backend
- - cinder:backup-backend
- cinder-backup:backup-backend
- - keystone:shared-db
- percona-cluster:shared-db
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - glance:image-service
- nova-compute:image-service
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - nova-compute:amqp
- rabbitmq-server:amqp
applications:
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
innodb-buffer-pool-size: 256M
max-connections: 1000
source: cloud:xenial-pike
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-pike
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-pike
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:xenial-pike
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-pike
cinder-backup:
charm: ../../../cinder-backup
series: xenial
options:
ceph-osd-replication-count: 3
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1

View File

@ -1,83 +0,0 @@
series: xenial
relations:
- - cinder-backup:ceph
- ceph-mon:client
- - cinder-ceph:ceph
- ceph-mon:client
- - ceph-osd:mon
- ceph-mon:osd
- - cinder:storage-backend
- cinder-ceph:storage-backend
- - cinder:backup-backend
- cinder-backup:backup-backend
- - keystone:shared-db
- percona-cluster:shared-db
- - cinder:shared-db
- percona-cluster:shared-db
- - cinder:identity-service
- keystone:identity-service
- - cinder:amqp
- rabbitmq-server:amqp
- - glance:image-service
- nova-compute:image-service
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - nova-compute:ceph-access
- cinder-ceph:ceph-access
- - nova-compute:amqp
- rabbitmq-server:amqp
applications:
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
innodb-buffer-pool-size: 256M
max-connections: 1000
source: cloud:xenial-queens
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:xenial-queens
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
monitor-count: '3'
auth-supported: 'none'
source: cloud:xenial-queens
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
storage:
osd-devices: 'cinder,10G'
options:
osd-devices: '/dev/test-non-existent'
source: cloud:xenial-queens
cinder:
charm: cs:~openstack-charmers-next/cinder
num_units: 1
options:
block-device: 'None'
glance-api-version: '2'
openstack-origin: cloud:xenial-queens
cinder-backup:
charm: ../../../cinder-backup
series: xenial
options:
ceph-osd-replication-count: 3
cinder-ceph:
charm: cs:~openstack-charmers-next/cinder-ceph
options:
ceph-osd-replication-count: 3
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1

View File

@ -1,33 +1,30 @@
charm_name: cinder-backup
smoke_bundles:
- bionic-train
- focal-ussuri
gate_bundles:
- xenial-mitaka
- bionic-queens
- bionic-stein
- bionic-train
- bionic-ussuri
- focal-ussuri
- focal-victoria
- focal-wallaby
- groovy-victoria
dev_bundles:
- trusty-mitaka # fails often because of lp:1877076
- xenial-ocata
- xenial-pike
- xenial-queens
- bionic-rocky
- focal-ussuri # disabled because of lp:1891626
- focal-xena
- hirsute-wallaby
- impish-xena
dev_bundles:
- bionic-rocky
- bionic-train
- focal-yoga
- jammy-yoga
tests:
- zaza.openstack.charm_tests.cinder_backup.tests.CinderBackupTest
tests_options:
force_deploy:
- groovy-victoria
- hirsute-wallaby
- impish-xena
- jammy-yoga

View File

@ -61,6 +61,11 @@ basepython = python3.8
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py39]
basepython = python3.9
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt