Add yoga bundles and release-tool syncs
* charm-helpers sync for classic charms * pin pyparsing for aodhclient * pin cffi for py35 * add non-voting focal-yoga bundle * add non-voting jammy-yoga bundle * add series metadata for jammy * switch xena bundles to voting * run focal-ussuri as smoke tests * remove groovy bundles Change-Id: Icd1043fb0841908caeba593be2dbd07597fccb8b
This commit is contained in:
parent
bf23ef75b2
commit
b9bfe131d7
|
@ -1413,7 +1413,8 @@ def incomplete_relation_data(configs, required_interfaces):
|
|||
for i in incomplete_relations}
|
||||
|
||||
|
||||
def do_action_openstack_upgrade(package, upgrade_callback, configs):
|
||||
def do_action_openstack_upgrade(package, upgrade_callback, configs,
|
||||
force_upgrade=False):
|
||||
"""Perform action-managed OpenStack upgrade.
|
||||
|
||||
Upgrades packages to the configured openstack-origin version and sets
|
||||
|
@ -1427,12 +1428,13 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
|
|||
@param package: package name for determining if upgrade available
|
||||
@param upgrade_callback: function callback to charm's upgrade function
|
||||
@param configs: templating object derived from OSConfigRenderer class
|
||||
@param force_upgrade: perform dist-upgrade regardless of new openstack
|
||||
|
||||
@return: True if upgrade successful; False if upgrade failed or skipped
|
||||
"""
|
||||
ret = False
|
||||
|
||||
if openstack_upgrade_available(package):
|
||||
if openstack_upgrade_available(package) or force_upgrade:
|
||||
if config('action-managed-upgrade'):
|
||||
juju_log('Upgrading OpenStack release')
|
||||
|
||||
|
@ -2599,6 +2601,23 @@ def get_subordinate_release_packages(os_release, package_type='deb'):
|
|||
return SubordinatePackages(install, purge)
|
||||
|
||||
|
||||
def get_subordinate_services():
|
||||
"""Iterate over subordinate relations and get service information.
|
||||
|
||||
In a similar fashion as with get_subordinate_release_packages(),
|
||||
principle charms can retrieve a list of services advertised by their
|
||||
subordinate charms. This is useful to know about subordinate services when
|
||||
pausing, resuming or upgrading a principle unit.
|
||||
|
||||
:returns: Name of all services advertised by all subordinates
|
||||
:rtype: Set[str]
|
||||
"""
|
||||
services = set()
|
||||
for rdata in container_scoped_relation_get('services'):
|
||||
services |= set(json.loads(rdata or '[]'))
|
||||
return services
|
||||
|
||||
|
||||
os_restart_on_change = partial(
|
||||
pausable_restart_on_change,
|
||||
can_restart_now_f=deferred_events.check_and_record_restart_request,
|
||||
|
|
|
@ -294,7 +294,6 @@ class BasePool(object):
|
|||
# NOTE: Do not perform initialization steps that require live data from
|
||||
# a running cluster here. The *Pool classes may be used for validation.
|
||||
self.service = service
|
||||
self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
||||
self.op = op or {}
|
||||
|
||||
if op:
|
||||
|
@ -341,7 +340,8 @@ class BasePool(object):
|
|||
Do not add calls for a specific pool type here, those should go into
|
||||
one of the pool specific classes.
|
||||
"""
|
||||
if self.nautilus_or_later:
|
||||
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
||||
if nautilus_or_later:
|
||||
# Ensure we set the expected pool ratio
|
||||
update_pool(
|
||||
client=self.service,
|
||||
|
@ -660,8 +660,9 @@ class ReplicatedPool(BasePool):
|
|||
else:
|
||||
self.pg_num = self.get_pgs(self.replicas, self.percent_data)
|
||||
|
||||
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
||||
# Create it
|
||||
if self.nautilus_or_later:
|
||||
if nautilus_or_later:
|
||||
cmd = [
|
||||
'ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||
'--pg-num-min={}'.format(
|
||||
|
@ -745,9 +746,9 @@ class ErasurePool(BasePool):
|
|||
k = int(erasure_profile['k'])
|
||||
m = int(erasure_profile['m'])
|
||||
pgs = self.get_pgs(k + m, self.percent_data)
|
||||
self.nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
||||
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
||||
# Create it
|
||||
if self.nautilus_or_later:
|
||||
if nautilus_or_later:
|
||||
cmd = [
|
||||
'ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||
'--pg-num-min={}'.format(
|
||||
|
|
|
@ -29,6 +29,7 @@ UBUNTU_RELEASES = (
|
|||
'groovy',
|
||||
'hirsute',
|
||||
'impish',
|
||||
'jammy',
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -275,6 +275,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|||
('groovy', 'victoria'),
|
||||
('hirsute', 'wallaby'),
|
||||
('impish', 'xena'),
|
||||
('jammy', 'yoga'),
|
||||
])
|
||||
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ series:
|
|||
- groovy
|
||||
- hirsute
|
||||
- impish
|
||||
- jammy
|
||||
subordinate: false
|
||||
provides:
|
||||
simplestreams-image-service:
|
||||
|
|
|
@ -11,7 +11,8 @@
|
|||
- focal-ussuri
|
||||
- focal-victoria
|
||||
- focal-wallaby
|
||||
- focal-xena:
|
||||
- focal-xena
|
||||
- focal-yoga:
|
||||
voting: false
|
||||
# hirsuite-wallaby disabled due to:
|
||||
# https://bugs.launchpad.net/charm-ceph-osd/+bug/1929732
|
||||
|
@ -19,3 +20,5 @@
|
|||
voting: false
|
||||
- impish-xena:
|
||||
voting: false
|
||||
- jammy-yoga:
|
||||
voting: false
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
# requirements. They are intertwined. Also, Zaza itself should specify
|
||||
# all of its own requirements and if it doesn't, fix it there.
|
||||
#
|
||||
pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here.
|
||||
cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35.
|
||||
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
|
||||
|
||||
requests>=2.18.4
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
variables:
|
||||
source: &source cloud:focal-yoga
|
||||
|
||||
series: focal
|
||||
|
||||
comment:
|
||||
- 'machines section to decide order of deployment. database sooner = faster'
|
||||
|
||||
machines:
|
||||
'0':
|
||||
'1':
|
||||
'2':
|
||||
'3':
|
||||
'4':
|
||||
'5':
|
||||
'6':
|
||||
'7':
|
||||
'8':
|
||||
constraints: "mem=2048"
|
||||
'9':
|
||||
constraints: "mem=2048"
|
||||
'10':
|
||||
constraints: "mem=2048"
|
||||
'11':
|
||||
'12':
|
||||
'13':
|
||||
|
||||
|
||||
applications:
|
||||
|
||||
keystone-mysql-router:
|
||||
charm: cs:~openstack-charmers-next/mysql-router
|
||||
glance-mysql-router:
|
||||
charm: cs:~openstack-charmers-next/mysql-router
|
||||
vault-mysql-router:
|
||||
charm: cs:~openstack-charmers-next/mysql-router
|
||||
|
||||
mysql-innodb-cluster:
|
||||
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
|
||||
num_units: 3
|
||||
options:
|
||||
source: *source
|
||||
to:
|
||||
- '0'
|
||||
- '1'
|
||||
- '2'
|
||||
|
||||
vault:
|
||||
charm: cs:~openstack-charmers-next/vault
|
||||
num_units: 1
|
||||
to:
|
||||
- '3'
|
||||
|
||||
keystone:
|
||||
charm: cs:~openstack-charmers-next/keystone
|
||||
num_units: 1
|
||||
options:
|
||||
openstack-origin: *source
|
||||
to:
|
||||
- '4'
|
||||
|
||||
glance:
|
||||
charm: cs:~openstack-charmers-next/glance
|
||||
num_units: 1
|
||||
options:
|
||||
openstack-origin: *source
|
||||
to:
|
||||
- '5'
|
||||
|
||||
glance-simplestreams-sync:
|
||||
charm: ../../glance-simplestreams-sync
|
||||
num_units: 1
|
||||
to:
|
||||
- '6'
|
||||
|
||||
ceph-radosgw:
|
||||
charm: cs:~openstack-charmers-next/ceph-radosgw
|
||||
num_units: 1
|
||||
options:
|
||||
source: *source
|
||||
to:
|
||||
- '7'
|
||||
|
||||
ceph-osd:
|
||||
charm: cs:~openstack-charmers-next/ceph-osd
|
||||
num_units: 3
|
||||
storage:
|
||||
osd-devices: 'cinder,10G'
|
||||
options:
|
||||
source: *source
|
||||
to:
|
||||
- '8'
|
||||
- '9'
|
||||
- '10'
|
||||
|
||||
ceph-mon:
|
||||
charm: cs:~openstack-charmers-next/ceph-mon
|
||||
num_units: 3
|
||||
options:
|
||||
source: *source
|
||||
to:
|
||||
- '11'
|
||||
- '12'
|
||||
- '13'
|
||||
|
||||
relations:
|
||||
- ['keystone:certificates', 'vault:certificates']
|
||||
- ['glance:certificates', 'vault:certificates']
|
||||
- ['glance-simplestreams-sync:certificates', 'vault:certificates']
|
||||
- ['glance:identity-service', 'keystone:identity-service']
|
||||
- ['glance-simplestreams-sync:identity-service', 'keystone:identity-service']
|
||||
- ['keystone:shared-db','keystone-mysql-router:shared-db']
|
||||
- ['glance:shared-db','glance-mysql-router:shared-db']
|
||||
- ['glance:ceph', 'ceph-mon:client']
|
||||
- ['vault:shared-db','vault-mysql-router:shared-db']
|
||||
- ['keystone-mysql-router:db-router','mysql-innodb-cluster:db-router']
|
||||
- ['glance-mysql-router:db-router','mysql-innodb-cluster:db-router']
|
||||
- ['vault-mysql-router:db-router','mysql-innodb-cluster:db-router']
|
||||
- ['ceph-osd:mon', 'ceph-mon:osd']
|
||||
- ['ceph-radosgw:mon', 'ceph-mon:radosgw']
|
||||
- ['ceph-radosgw:identity-service', 'keystone:identity-service']
|
||||
- ['ceph-radosgw:certificates', 'vault:certificates']
|
|
@ -1,7 +1,7 @@
|
|||
variables:
|
||||
source: &source distro
|
||||
|
||||
series: groovy
|
||||
series: jammy
|
||||
|
||||
comment:
|
||||
- 'machines section to decide order of deployment. database sooner = faster'
|
|
@ -6,27 +6,29 @@ comment:
|
|||
# functest-run-suite ...
|
||||
# functest-deploy --bundle /path/to/gate/bundle
|
||||
gate_bundles:
|
||||
- xenial-mitaka
|
||||
- bionic-queens
|
||||
- bionic-stein
|
||||
- bionic-train
|
||||
- bionic-ussuri
|
||||
- focal-ussuri
|
||||
- focal-victoria
|
||||
- focal-wallaby
|
||||
- focal-xena
|
||||
- hirsute-wallaby
|
||||
- impish-xena
|
||||
|
||||
dev_bundles:
|
||||
- xenial-mitaka
|
||||
- xenial-queens
|
||||
- bionic-rocky
|
||||
- focal-victoria
|
||||
- groovy-victoria
|
||||
- impish-xena
|
||||
- bionic-train
|
||||
- focal-yoga
|
||||
- jammy-yoga
|
||||
|
||||
tests_options:
|
||||
force_deploy:
|
||||
- groovy-victoria
|
||||
- hirsute-wallaby
|
||||
- impish-xena
|
||||
- jammy-yoga
|
||||
|
||||
# functest-run-suite --smoke ...
|
||||
# functest-deploy --bundle /path/to/smoke/bundle
|
||||
|
|
Loading…
Reference in New Issue