Add xena bundles

- add non-voting focal-xena bundle
- add non-voting impish-xena bundle
- rebuild to pick up charm-helpers changes
- update tox/pip.sh to ensure setuptools<50.0.0
- Remove redundant (and failing) IdentityContext tests
- Remove EOL groovy-* gate tests.

Change-Id: I32c8195ff76164de565e6af7c329645be40769f1
Co-authored-by: Aurelien Lourot <aurelien.lourot@canonical.com>
This commit is contained in:
Alex Kavanagh 2021-09-21 14:43:15 +01:00
parent cebcc73380
commit d15ac894a9
44 changed files with 1066 additions and 2499 deletions

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2012-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,7 +13,6 @@
# limitations under the License.
"""Compatibility with the nrpe-external-master charm"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
@ -511,7 +510,7 @@ def add_haproxy_checks(nrpe, unit_name):
def remove_deprecated_check(nrpe, deprecated_services):
"""
Remove checks fro deprecated services in list
Remove checks for deprecated services in list
:param nrpe: NRPE object to remove check from
:type nrpe: NRPE

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -22,7 +22,7 @@ Configuration stanzas::
type: boolean
default: true
description: >
If false, a volume is mounted as sepecified in "volume-map"
If false, a volume is mounted as specified in "volume-map"
If true, ephemeral storage will be used, meaning that log data
will only exist as long as the machine. YOU HAVE BEEN WARNED.
volume-map:

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -86,7 +86,7 @@ def is_elected_leader(resource):
2. If the charm is part of a corosync cluster, call corosync to
determine leadership.
3. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit numer". In
determined as being "the alive unit with the lowest unit number". In
other words, the oldest surviving unit.
"""
try:
@ -418,7 +418,7 @@ def get_managed_services_and_ports(services, external_ports,
Return only the services and corresponding ports that are managed by this
charm. This excludes haproxy when there is a relation with hacluster. This
is because this charm passes responsability for stopping and starting
is because this charm passes responsibility for stopping and starting
haproxy to hacluster.
Similarly, if a relation with hacluster exists then the ports returned by

View File

@ -187,7 +187,7 @@ SYS_GID_MAX {{ sys_gid_max }}
#
# Max number of login retries if password is bad. This will most likely be
# overriden by PAM, since the default pam_unix module has it's own built
# overridden by PAM, since the default pam_unix module has it's own built
# in of 3 retries. However, this is a safe fallback in case you are using
# an authentication module that does not enforce PAM_MAXTRIES.
#
@ -235,7 +235,7 @@ USERGROUPS_ENAB yes
#
# Instead of the real user shell, the program specified by this parameter
# will be launched, although its visible name (argv[0]) will be the shell's.
# The program may do whatever it wants (logging, additional authentification,
# The program may do whatever it wants (logging, additional authentication,
# banner, ...) before running the actual shell.
#
# FAKE_SHELL /bin/fakeshell

View File

@ -1,4 +1,4 @@
# Copyright 2016 Canonical Limited.
# Copyright 2016-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -85,7 +85,7 @@ def _get_user_provided_overrides(modules):
def _apply_overrides(settings, overrides, schema):
"""Get overrides config overlayed onto modules defaults.
"""Get overrides config overlaid onto modules defaults.
:param modules: require stack modules config.
:returns: dictionary of modules config with user overrides applied.

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -578,7 +578,7 @@ def get_relation_ip(interface, cidr_network=None):
@returns IPv6 or IPv4 address
"""
# Select the interface address first
# For possible use as a fallback bellow with get_address_in_network
# For possible use as a fallback below with get_address_in_network
try:
# Get the interface specific IP
address = network_get_primary_address(interface)

View File

@ -1,13 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,387 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OPENSTACK_RELEASES_PAIRS
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the ~openstack-charmers
base_charms = {
'mysql': ['trusty'],
'mongodb': ['trusty'],
'nrpe': ['trusty', 'xenial'],
}
for svc in other_services:
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if svc['name'] in base_charms:
# NOTE: not all charms have support for all series we
# want/need to test against, so fix to most recent
# that each base charm supports
target_series = self.series
if self.series not in base_charms[svc['name']]:
target_series = base_charms[svc['name']][-1]
svc['location'] = 'cs:{}/{}'.format(target_series,
svc['name'])
elif self.stable:
svc['location'] = 'cs:{}/{}'.format(self.series,
svc['name'])
else:
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
self.series,
svc['name']
)
return other_services
def _add_services(self, this_service, other_services, use_source=None,
no_origin=None):
"""Add services to the deployment and optionally set
openstack-origin/source.
:param this_service dict: Service dictionary describing the service
whose amulet tests are being run
:param other_services dict: List of service dictionaries describing
the services needed to support the target
service
:param use_source list: List of services which use the 'source' config
option rather than 'openstack-origin'
:param no_origin list: List of services which do not support setting
the Cloud Archive.
Service Dict:
{
'name': str charm-name,
'units': int number of units,
'constraints': dict of juju constraints,
'location': str location of charm,
}
eg
this_service = {
'name': 'openvswitch-odl',
'constraints': {'mem': '8G'},
}
other_services = [
{
'name': 'nova-compute',
'units': 2,
'constraints': {'mem': '4G'},
'location': cs:~bob/xenial/nova-compute
},
{
'name': 'mysql',
'constraints': {'mem': '2G'},
},
{'neutron-api-odl'}]
use_source = ['mysql']
no_origin = ['neutron-api-odl']
"""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = use_source or []
no_origin = no_origin or []
# Charms which should use the source config option
use_source = list(set(
use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon',
'ceph-proxy', 'percona-cluster', 'lxd']))
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = list(set(
no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
'odl-controller', 'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt',
'ceilometer-agent']))
if self.openstack:
for svc in services:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=None):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
if not timeout:
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
self.log.info('Waiting for extended status on units for {}s...'
''.format(timeout))
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
# Check for idleness
self.d.sentry.wait(timeout=timeout)
# Check for error states and bail early
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
setattr(self, os_pair, i)
releases = {
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('xenial', None): self.xenial_mitaka,
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens,
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('bionic', 'cloud:bionic-train'): self.bionic_train,
('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri,
('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein,
('eoan', None): self.eoan_train,
('focal', None): self.focal_ussuri,
('focal', 'cloud:focal-victoria'): self.focal_victoria,
('groovy', None): self.groovy_victoria,
}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('trusty', 'icehouse'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_percona_service_entry(self, memory_constraint=None):
"""Return a amulet service entry for percona cluster.
:param memory_constraint: Override the default memory constraint
in the service entry.
:type memory_constraint: str
:returns: Amulet service entry.
:rtype: dict
"""
memory_constraint = memory_constraint or '3072M'
svc_entry = {
'name': 'percona-cluster',
'constraints': {'mem': memory_constraint}}
if self._get_openstack_release() <= self.trusty_mitaka:
svc_entry['location'] = 'cs:trusty/percona-cluster'
return svc_entry
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() == self.trusty_icehouse:
# Icehouse
pools = [
'data',
'metadata',
'rbd',
'cinder-ceph',
'glance'
]
elif (self.trusty_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
# Copyright 2014-2018 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Common python helper functions used for OpenStack charm certificats.
# Common python helper functions used for OpenStack charm certificates.
import os
import json
@ -71,7 +71,7 @@ class CertRequest(object):
def add_entry(self, net_type, cn, addresses):
"""Add a request to the batch
:param net_type: str netwrok space name request is for
:param net_type: str network space name request is for
:param cn: str Canonical Name for certificate
:param addresses: [] List of addresses to be used as SANs
"""
@ -85,7 +85,7 @@ class CertRequest(object):
addresses = [ip]
# If a vip is being used without os-hostname config or
# network spaces then we need to ensure the local units
# cert has the approriate vip in the SAN list
# cert has the appropriate vip in the SAN list
vip = get_vip_in_network(resolve_network_cidr(ip))
if vip:
addresses.append(vip)
@ -178,7 +178,7 @@ def get_certificate_request(json_encode=True, bindings=None):
except NoNetworkBinding:
log("Skipping request for certificate for ip in {} space, no "
"local address found".format(binding), WARNING)
# Gurantee all SANs are covered
# Guarantee all SANs are covered
# These are network addresses with no corresponding hostname.
# Add the ips to the hostname cert to allow for this.
req.add_hostname_cn_ip(_sans)
@ -357,7 +357,7 @@ def process_certificates(service_name, relation_id, unit,
bindings=None):
"""Process the certificates supplied down the relation
:param service_name: str Name of service the certifcates are for.
:param service_name: str Name of service the certificates are for.
:param relation_id: str Relation id providing the certs
:param unit: str Unit providing the certs
:param custom_hostname_link: str Name of custom link to create

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -25,7 +25,10 @@ import socket
import time
from base64 import b64decode
from subprocess import check_call, CalledProcessError
from subprocess import (
check_call,
check_output,
CalledProcessError)
import six
@ -453,18 +456,24 @@ class IdentityServiceContext(OSContextGenerator):
serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
int_host = rdata.get('internal_host')
int_host = format_ipv6_addr(int_host) or int_host
svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
int_protocol = rdata.get('internal_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'),
'service_host': serv_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'internal_host': int_host,
'internal_port': rdata.get('internal_port'),
'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol,
'internal_protocol': int_protocol,
'api_version': api_version})
if float(api_version) > 2:
@ -1358,7 +1367,7 @@ class NeutronPortContext(OSContextGenerator):
mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
for entry in ports:
if re.match(mac_regex, entry):
# NIC is in known NICs and does NOT hace an IP address
# NIC is in known NICs and does NOT have an IP address
if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
# If the nic is part of a bridge then don't use it
if is_bridge_member(hwaddr_to_nic[entry]):
@ -1781,6 +1790,10 @@ class NeutronAPIContext(OSContextGenerator):
'rel_key': 'enable-port-forwarding',
'default': False,
},
'enable_fwaas': {
'rel_key': 'enable-fwaas',
'default': False,
},
'global_physnet_mtu': {
'rel_key': 'global-physnet-mtu',
'default': 1500,
@ -1815,6 +1828,11 @@ class NeutronAPIContext(OSContextGenerator):
if ctxt['enable_port_forwarding']:
l3_extension_plugins.append('port_forwarding')
if ctxt['enable_fwaas']:
l3_extension_plugins.append('fwaas_v2')
if ctxt['enable_nfg_logging']:
l3_extension_plugins.append('fwaas_v2_log')
ctxt['l3_extension_plugins'] = l3_extension_plugins
return ctxt
@ -2379,6 +2397,12 @@ class DHCPAgentContext(OSContextGenerator):
ctxt['enable_metadata_network'] = True
ctxt['enable_isolated_metadata'] = True
ctxt['append_ovs_config'] = False
cmp_release = CompareOpenStackReleases(
os_release('neutron-common', base='icehouse'))
if cmp_release >= 'queens' and config('enable-dpdk'):
ctxt['append_ovs_config'] = True
return ctxt
@staticmethod
@ -2570,22 +2594,48 @@ class OVSDPDKDeviceContext(OSContextGenerator):
:returns: hex formatted CPU mask
:rtype: str
"""
num_cores = config('dpdk-socket-cores')
mask = 0
return self.cpu_masks()['dpdk_lcore_mask']
def cpu_masks(self):
"""Get hex formatted CPU masks
The mask is based on using the first config:dpdk-socket-cores
cores of each NUMA node in the unit, followed by the
next config:pmd-socket-cores
:returns: Dict of hex formatted CPU masks
:rtype: Dict[str, str]
"""
num_lcores = config('dpdk-socket-cores')
pmd_cores = config('pmd-socket-cores')
lcore_mask = 0
pmd_mask = 0
for cores in self._numa_node_cores().values():
for core in cores[:num_cores]:
mask = mask | 1 << core
return format(mask, '#04x')
for core in cores[:num_lcores]:
lcore_mask = lcore_mask | 1 << core
for core in cores[num_lcores:][:pmd_cores]:
pmd_mask = pmd_mask | 1 << core
return {
'pmd_cpu_mask': format(pmd_mask, '#04x'),
'dpdk_lcore_mask': format(lcore_mask, '#04x')}
def socket_memory(self):
"""Formatted list of socket memory configuration per NUMA node
"""Formatted list of socket memory configuration per socket.
:returns: socket memory configuration per NUMA node
:returns: socket memory configuration per socket.
:rtype: str
"""
lscpu_out = check_output(
['lscpu', '-p=socket']).decode('UTF-8').strip()
sockets = set()
for line in lscpu_out.split('\n'):
try:
sockets.add(int(line))
except ValueError:
# lscpu output is headed by comments so ignore them.
pass
sm_size = config('dpdk-socket-memory')
node_regex = '/sys/devices/system/node/node*'
mem_list = [str(sm_size) for _ in glob.glob(node_regex)]
mem_list = [str(sm_size) for _ in sockets]
if mem_list:
return ','.join(mem_list)
else:
@ -2650,7 +2700,7 @@ class OVSDPDKDeviceContext(OSContextGenerator):
class BridgePortInterfaceMap(object):
"""Build a map of bridge ports and interaces from charm configuration.
"""Build a map of bridge ports and interfaces from charm configuration.
NOTE: the handling of this detail in the charm is pre-deprecated.
@ -3099,7 +3149,7 @@ class SRIOVContext(OSContextGenerator):
actual = min(int(requested), int(device.sriov_totalvfs))
if actual < int(requested):
log('Requested VFs ({}) too high for device {}. Falling back '
'to value supprted by device: {}'
'to value supported by device: {}'
.format(requested, device.interface_name,
device.sriov_totalvfs),
level=WARNING)

View File

@ -244,7 +244,7 @@ def get_deferred_restarts():
def clear_deferred_restarts(services):
"""Clear deferred restart events targetted at `services`.
"""Clear deferred restart events targeted at `services`.
:param services: Services with deferred actions to clear.
:type services: List[str]
@ -253,7 +253,7 @@ def clear_deferred_restarts(services):
def process_svc_restart(service):
"""Respond to a service restart having occured.
"""Respond to a service restart having occurred.
:param service: Services that the action was performed against.
:type service: str

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""This script is an implemenation of policy-rc.d
"""This script is an implementation of policy-rc.d
For further information on policy-rc.d see *1

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Various utilies for dealing with Neutron and the renaming from Quantum.
# Various utilities for dealing with Neutron and the renaming from Quantum.
import six
from subprocess import check_output
@ -251,7 +251,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None):
def network_manager():
'''
Deals with the renaming of Quantum to Neutron in H and any situations
that require compatability (eg, deploying H with network-manager=quantum,
that require compatibility (eg, deploying H with network-manager=quantum,
upgrading from G).
'''
release = os_release('nova-common')

View File

@ -1,4 +1,4 @@
# Copyright 2019 Canonical Ltd
# Copyright 2019-2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -59,7 +59,7 @@ provided:
The functions should be called from the install and upgrade hooks in the charm.
The `maybe_do_policyd_overrides_on_config_changed` function is designed to be
called on the config-changed hook, in that it does an additional check to
ensure that an already overriden policy.d in an upgrade or install hooks isn't
ensure that an already overridden policy.d in an upgrade or install hooks isn't
repeated.
In order the *enable* this functionality, the charm's install, config_changed,
@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release,
restart_handler()
@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead")
@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead")
def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs):
"""This function is designed to be called from the config changed hook.

View File

@ -1,10 +1,22 @@
global
log /var/lib/haproxy/dev/log local0
log /var/lib/haproxy/dev/log local1 notice
# NOTE: on startup haproxy chroot's to /var/lib/haproxy.
#
# Unfortunately the program will open some files prior to the call to
# chroot never to reopen them, and some after. So looking at the on-disk
# layout of haproxy resources you will find some resources relative to /
# such as the admin socket, and some relative to /var/lib/haproxy such as
# the log socket.
#
# The logging socket is (re-)opened after the chroot and must be relative
# to /var/lib/haproxy.
log /dev/log local0
log /dev/log local1 notice
maxconn 20000
user haproxy
group haproxy
spread-checks 0
# The admin socket is opened prior to the chroot never to be reopened, so
# it lives outside the chroot directory in the filesystem.
stats socket /var/run/haproxy/admin.sock mode 600 level admin
stats timeout 2m

View File

@ -15,7 +15,7 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL}
@ -41,7 +41,7 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL}
@ -67,7 +67,7 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL}

View File

@ -15,7 +15,7 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL}
@ -41,7 +41,7 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL}
@ -67,7 +67,7 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL}

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -106,6 +106,8 @@ from charmhelpers.fetch import (
filter_installed_packages,
filter_missing_packages,
ubuntu_apt_pkg as apt,
OPENSTACK_RELEASES,
UBUNTU_OPENSTACK_RELEASE,
)
from charmhelpers.fetch.snap import (
@ -132,54 +134,9 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
'wallaby',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
('hirsute', 'wallaby'),
])
OPENSTACK_CODENAMES = OrderedDict([
# NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version
# number. This just means the i-th version of the year yyyy.
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
@ -200,6 +157,8 @@ OPENSTACK_CODENAMES = OrderedDict([
('2020.1', 'ussuri'),
('2020.2', 'victoria'),
('2021.1', 'wallaby'),
('2021.2', 'xena'),
('2022.1', 'yoga'),
])
# The ugly duckling - must list releases oldest to newest
@ -701,7 +660,7 @@ def import_key(keyid):
def get_source_and_pgp_key(source_and_key):
"""Look for a pgp key ID or ascii-armor key in the given input.
:param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
:param source_and_key: String, "source_spec|keyid" where '|keyid' is
optional.
:returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
if there was no '|' in the source_and_key string.
@ -721,7 +680,7 @@ def configure_installation_source(source_plus_key):
The functionality is provided by charmhelpers.fetch.add_source()
The difference between the two functions is that add_source() signature
requires the key to be passed directly, whereas this function passes an
optional key by appending '|<key>' to the end of the source specificiation
optional key by appending '|<key>' to the end of the source specification
'source'.
Another difference from add_source() is that the function calls sys.exit(1)
@ -808,7 +767,7 @@ def get_endpoint_notifications(service_names, rel_name='identity-service'):
def endpoint_changed(service_name, rel_name='identity-service'):
"""Whether a new notification has been recieved for an endpoint.
"""Whether a new notification has been received for an endpoint.
:param service_name: Service name eg nova, neutron, placement etc
:type service_name: str
@ -834,7 +793,7 @@ def endpoint_changed(service_name, rel_name='identity-service'):
def save_endpoint_changed_triggers(service_names, rel_name='identity-service'):
"""Save the enpoint triggers in db so it can be tracked if they changed.
"""Save the endpoint triggers in db so it can be tracked if they changed.
:param service_names: List of service name.
:type service_name: List
@ -1502,9 +1461,9 @@ def remote_restart(rel_name, remote_service=None):
if remote_service:
trigger['remote-service'] = remote_service
for rid in relation_ids(rel_name):
# This subordinate can be related to two seperate services using
# This subordinate can be related to two separate services using
# different subordinate relations so only issue the restart if
# the principle is conencted down the relation we think it is
# the principle is connected down the relation we think it is
if related_units(relid=rid):
relation_set(relation_id=rid,
relation_settings=trigger,
@ -1621,7 +1580,7 @@ def manage_payload_services(action, services=None, charm_func=None):
"""Run an action against all services.
An optional charm_func() can be called. It should raise an Exception to
indicate that the function failed. If it was succesfull it should return
indicate that the function failed. If it was successful it should return
None or an optional message.
The signature for charm_func is:
@ -1880,7 +1839,7 @@ def pausable_restart_on_change(restart_map, stopstart=False,
:param post_svc_restart_f: A function run after a service has
restarted.
:type post_svc_restart_f: Callable[[str], None]
:param pre_restarts_wait_f: A function callled before any restarts.
:param pre_restarts_wait_f: A function called before any restarts.
:type pre_restarts_wait_f: Callable[None, None]
:returns: decorator to use a restart_on_change with pausability
:rtype: decorator

View File

@ -1,4 +1,4 @@
# Copyright 2018 Canonical Limited.
# Copyright 2018-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -48,7 +48,7 @@ class VaultKVContext(context.OSContextGenerator):
"but it's not available. Is secrets-stroage relation "
"made, but encrypt option not set?",
level=hookenv.WARNING)
# return an emptry context on hvac import error
# return an empty context on hvac import error
return {}
ctxt = {}
# NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -12,9 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
@ -605,7 +602,7 @@ class BasePool(object):
class Pool(BasePool):
"""Compability shim for any descendents external to this library."""
"""Compatibility shim for any descendents external to this library."""
@deprecate(
'The ``Pool`` baseclass has been replaced by ``BasePool`` class.')
@ -1535,7 +1532,7 @@ def map_block_storage(service, pool, image):
def filesystem_mounted(fs):
"""Determine whether a filesytems is already mounted."""
"""Determine whether a filesystem is already mounted."""
return fs in [f for f, m in mounts()]
@ -1904,7 +1901,7 @@ class CephBrokerRq(object):
set the ceph-mon unit handling the broker
request will set its default value.
:type erasure_profile: str
:param allow_ec_overwrites: allow EC pools to be overriden
:param allow_ec_overwrites: allow EC pools to be overridden
:type allow_ec_overwrites: bool
:raises: AssertionError if provided data is of invalid type/range
"""
@ -1949,7 +1946,7 @@ class CephBrokerRq(object):
:param lrc_locality: Group the coding and data chunks into sets of size locality
(lrc plugin)
:type lrc_locality: int
:param durability_estimator: The number of parity chuncks each of which includes
:param durability_estimator: The number of parity chunks each of which includes
a data chunk in its calculation range (shec plugin)
:type durability_estimator: int
:param helper_chunks: The number of helper chunks to use for recovery operations
@ -2327,7 +2324,7 @@ class CephOSDConfContext(CephConfContext):
settings are in conf['osd_from_client'] and finally settings which do
clash are in conf['osd_from_client_conflict']. Rather than silently drop
the conflicting settings they are provided in the context so they can be
rendered commented out to give some visability to the admin.
rendered commented out to give some visibility to the admin.
"""
def __init__(self, permitted_sections=None):

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -27,7 +27,7 @@ from subprocess import (
##################################################
def deactivate_lvm_volume_group(block_device):
'''
Deactivate any volume gruop associated with an LVM physical volume.
Deactivate any volume group associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume
'''

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2013-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,7 +13,6 @@
# limitations under the License.
"Interactions with the Juju environment"
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
@ -610,7 +609,7 @@ def expected_related_units(reltype=None):
relation_type()))
:param reltype: Relation type to list data for, default is to list data for
the realtion type we are currently executing a hook for.
the relation type we are currently executing a hook for.
:type reltype: str
:returns: iterator
:rtype: types.GeneratorType
@ -627,7 +626,7 @@ def expected_related_units(reltype=None):
@cached
def relation_for_unit(unit=None, rid=None):
"""Get the json represenation of a unit's relation"""
"""Get the json representation of a unit's relation"""
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
@ -1614,11 +1613,11 @@ def env_proxy_settings(selected_settings=None):
def _contains_range(addresses):
"""Check for cidr or wildcard domain in a string.
Given a string comprising a comma seperated list of ip addresses
Given a string comprising a comma separated list of ip addresses
and domain names, determine whether the string contains IP ranges
or wildcard domains.
:param addresses: comma seperated list of domains and ip addresses.
:param addresses: comma separated list of domains and ip addresses.
:type addresses: str
"""
return (

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -217,7 +217,7 @@ def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d", **kwargs):
"""Resume a system service.
Reenable starting again at boot. Start the service.
Re-enable starting again at boot. Start the service.
:param service_name: the name of the service to resume
:param init_dir: the path to the init dir
@ -727,7 +727,7 @@ class restart_on_change(object):
:param post_svc_restart_f: A function run after a service has
restarted.
:type post_svc_restart_f: Callable[[str], None]
:param pre_restarts_wait_f: A function callled before any restarts.
:param pre_restarts_wait_f: A function called before any restarts.
:type pre_restarts_wait_f: Callable[None, None]
"""
self.restart_map = restart_map
@ -828,7 +828,7 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
:param post_svc_restart_f: A function run after a service has
restarted.
:type post_svc_restart_f: Callable[[str], None]
:param pre_restarts_wait_f: A function callled before any restarts.
:param pre_restarts_wait_f: A function called before any restarts.
:type pre_restarts_wait_f: Callable[None, None]
:returns: result of lambda_f()
:rtype: ANY
@ -880,7 +880,7 @@ def _post_restart_on_change_helper(checksums,
:param post_svc_restart_f: A function run after a service has
restarted.
:type post_svc_restart_f: Callable[[str], None]
:param pre_restarts_wait_f: A function callled before any restarts.
:param pre_restarts_wait_f: A function called before any restarts.
:type pre_restarts_wait_f: Callable[None, None]
"""
if restart_functions is None:
@ -914,7 +914,7 @@ def _post_restart_on_change_helper(checksums,
def pwgen(length=None):
"""Generate a random pasword."""
"""Generate a random password."""
if length is None:
# A random length is ok to use a weak PRNG
length = random.choice(range(35, 45))

View File

@ -28,6 +28,7 @@ UBUNTU_RELEASES = (
'focal',
'groovy',
'hirsute',
'impish',
)

View File

@ -18,8 +18,11 @@
import six
import re
TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'}
FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'}
def bool_from_string(value):
def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False):
"""Interpret string value as boolean.
Returns True if value translates to True otherwise False.
@ -32,9 +35,9 @@ def bool_from_string(value):
value = value.strip().lower()
if value in ['y', 'yes', 'true', 't', 'on']:
if value in truthy_strings:
return True
elif value in ['n', 'no', 'false', 'f', 'off']:
elif value in falsey_strings or assume_false:
return False
msg = "Unable to interpret string value '%s' as boolean" % (value)

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -61,7 +61,7 @@ Here's a fully worked integration example using hookenv.Hooks::
'previous value', prev,
'current value', cur)
# Get some unit specific bookeeping
# Get some unit specific bookkeeping
if not db.get('pkg_key'):
key = urllib.urlopen('https://example.com/pkg_key').read()
db.set('pkg_key', key)
@ -449,7 +449,7 @@ class HookData(object):
'previous value', prev,
'current value', cur)
# Get some unit specific bookeeping
# Get some unit specific bookkeeping
if not db.get('pkg_key'):
key = urllib.urlopen('https://example.com/pkg_key').read()
db.set('pkg_key', key)

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -106,6 +106,8 @@ if __platform__ == "ubuntu":
apt_pkg = fetch.ubuntu_apt_pkg
get_apt_dpkg_env = fetch.get_apt_dpkg_env
get_installed_version = fetch.get_installed_version
OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES
UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE
elif __platform__ == "centos":
yum_search = fetch.yum_search
@ -203,7 +205,7 @@ def plugins(fetch_handlers=None):
classname)
plugin_list.append(handler_class())
except NotImplementedError:
# Skip missing plugins so that they can be ommitted from
# Skip missing plugins so that they can be omitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(
handler_name))

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python
# coding: utf-8
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -27,7 +27,7 @@ __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def pip_execute(*args, **kwargs):
"""Overriden pip_execute() to stop sys.path being changed.
"""Overridden pip_execute() to stop sys.path being changed.
The act of importing main from the pip module seems to cause add wheels
from the /usr/share/python-wheels which are installed by various tools.
@ -142,8 +142,10 @@ def pip_create_virtualenv(path=None):
"""Create an isolated Python environment."""
if six.PY2:
apt_install('python-virtualenv')
extra_flags = []
else:
apt_install('python3-virtualenv')
apt_install(['python3-virtualenv', 'virtualenv'])
extra_flags = ['--python=python3']
if path:
venv_path = path
@ -151,4 +153,4 @@ def pip_create_virtualenv(path=None):
venv_path = os.path.join(charm_dir(), 'venv')
if not os.path.exists(venv_path):
subprocess.check_call(['virtualenv', venv_path])
subprocess.check_call(['virtualenv', venv_path] + extra_flags)

View File

@ -1,4 +1,4 @@
# Copyright 2014-2017 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -65,7 +65,7 @@ def _snap_exec(commands):
retry_count += + 1
if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
raise CouldNotAcquireLockException(
'Could not aquire lock after {} attempts'
'Could not acquire lock after {} attempts'
.format(SNAP_NO_LOCK_RETRY_COUNT))
return_code = e.returncode
log('Snap failed to acquire lock, trying again in {} seconds.'

View File

@ -1,4 +1,4 @@
# Copyright 2014-2015 Canonical Limited.
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -208,12 +208,79 @@ CLOUD_ARCHIVE_POCKETS = {
'wallaby/proposed': 'focal-proposed/wallaby',
'focal-wallaby/proposed': 'focal-proposed/wallaby',
'focal-proposed/wallaby': 'focal-proposed/wallaby',
# Xena
'xena': 'focal-updates/xena',
'focal-xena': 'focal-updates/xena',
'focal-xena/updates': 'focal-updates/xena',
'focal-updates/xena': 'focal-updates/xena',
'xena/proposed': 'focal-proposed/xena',
'focal-xena/proposed': 'focal-proposed/xena',
'focal-proposed/xena': 'focal-proposed/xena',
# Yoga
'yoga': 'focal-updates/yoga',
'focal-yoga': 'focal-updates/yoga',
'focal-yoga/updates': 'focal-updates/yoga',
'focal-updates/yoga': 'focal-updates/yoga',
'yoga/proposed': 'focal-proposed/yoga',
'focal-yoga/proposed': 'focal-proposed/yoga',
'focal-proposed/yoga': 'focal-proposed/yoga',
}
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
'wallaby',
'xena',
'yoga',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
('hirsute', 'wallaby'),
('impish', 'xena'),
])
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times.
CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times.
def filter_installed_packages(packages):
@ -246,9 +313,9 @@ def filter_missing_packages(packages):
def apt_cache(*_, **__):
"""Shim returning an object simulating the apt_pkg Cache.
:param _: Accept arguments for compability, not used.
:param _: Accept arguments for compatibility, not used.
:type _: any
:param __: Accept keyword arguments for compability, not used.
:param __: Accept keyword arguments for compatibility, not used.
:type __: any
:returns:Object used to interrogate the system apt and dpkg databases.
:rtype:ubuntu_apt_pkg.Cache
@ -283,7 +350,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
:param fatal: Whether the command's output should be checked and
retried.
:type fatal: bool
:param quiet: if True (default), supress log message to stdout/stderr
:param quiet: if True (default), suppress log message to stdout/stderr
:type quiet: bool
:raises: subprocess.CalledProcessError
"""
@ -397,7 +464,7 @@ def import_key(key):
A Radix64 format keyid is also supported for backwards
compatibility. In this case Ubuntu keyserver will be
queried for a key via HTTPS by its keyid. This method
is less preferrable because https proxy servers may
is less preferable because https proxy servers may
require traffic decryption which is equivalent to a
man-in-the-middle attack (a proxy server impersonates
keyserver TLS certificates and has to be explicitly
@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False):
with be used. If staging is NOT used then the cloud archive [3] will be
added, and the 'ubuntu-cloud-keyring' package will be added for the
current distro.
'<openstack-version>': translate to cloud:<release> based on the current
distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or
'distro'.
'<openstack-version>/proposed': as above, but for proposed.
Otherwise the source is not recognised and this is logged to the juju log.
However, no error is raised, unless sys_error_on_exit is True.
@ -592,7 +663,7 @@ def add_source(source, key=None, fail_invalid=False):
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. ppa and cloud archive keys
are securely added automtically, so sould not be provided.
are securely added automatically, so should not be provided.
@param fail_invalid: (boolean) if True, then the function raises a
SourceConfigError is there is no matching installation source.
@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False):
@raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
valid pocket in CLOUD_ARCHIVE_POCKETS
"""
# extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use
# the list in contrib.openstack.utils as it might not be included in
# classic charms and would break everything. Having OpenStack specific
# code in this file is a bit of an antipattern, anyway.
os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES))
_mapping = OrderedDict([
(r"^distro$", lambda: None), # This is a NOP
(r"^(?:proposed|distro-proposed)$", _add_proposed),
@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False):
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
(r"^{}\/proposed$".format(os_versions_regex),
_add_bare_openstack_proposed),
(r"^{}$".format(os_versions_regex), _add_bare_openstack),
])
if source is None:
source = ''
@ -640,7 +720,7 @@ def _add_proposed():
Uses get_distrib_codename to determine the correct stanza for
the deb line.
For intel architecutres PROPOSED_POCKET is used for the release, but for
For Intel architectures PROPOSED_POCKET is used for the release, but for
other architectures PROPOSED_PORTS_POCKET is used for the release.
"""
release = get_distrib_codename()
@ -662,7 +742,8 @@ def _add_apt_repository(spec):
series = get_distrib_codename()
spec = spec.replace('{series}', series)
_run_with_retries(['add-apt-repository', '--yes', spec],
cmd_env=env_proxy_settings(['https', 'http']))
cmd_env=env_proxy_settings(['https', 'http', 'no_proxy'])
)
def _add_cloud_pocket(pocket):
@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release):
'version ({})'.format(release, os_release, ubuntu_rel))
def _add_bare_openstack(openstack_release):
"""Add cloud or distro based on the release given.
The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri
or 'distro' depending on whether the ubuntu release is bionic or focal.
:param openstack_release: the OpenStack codename to determine the release
for.
:type openstack_release: str
:raises: SourceConfigError
"""
# TODO(ajkavanagh) - surely this means we should be removing cloud archives
# if they exist?
__add_bare_helper(openstack_release, "{}-{}", lambda: None)
def _add_bare_openstack_proposed(openstack_release):
"""Add cloud of distro but with proposed.
The spec given is, say, 'ussuri' but this could apply
cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the
ubuntu release is bionic or focal.
:param openstack_release: the OpenStack codename to determine the release
for.
:type openstack_release: str
:raises: SourceConfigError
"""
__add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed)
def __add_bare_helper(openstack_release, pocket_format, final_function):
"""Helper for _add_bare_openstack[_proposed]
The bulk of the work between the two functions is exactly the same except
for the pocket format and the function that is run if it's the distro
version.
:param openstack_release: the OpenStack codename. e.g. ussuri
:type openstack_release: str
:param pocket_format: the pocket formatter string to construct a pocket str
from the openstack_release and the current ubuntu version.
:type pocket_format: str
:param final_function: the function to call if it is the distro version.
:type final_function: Callable
:raises SourceConfigError on error
"""
ubuntu_version = get_distrib_codename()
possible_pocket = pocket_format.format(ubuntu_version, openstack_release)
if possible_pocket in CLOUD_ARCHIVE_POCKETS:
_add_cloud_pocket(possible_pocket)
return
# Otherwise it's almost certainly the distro version; verify that it
# exists.
try:
assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release
except KeyError:
raise SourceConfigError(
"Invalid ubuntu version {} isn't known to this library"
.format(ubuntu_version))
except AssertionError:
raise SourceConfigError(
'Invalid OpenStack release specified: {} for Ubuntu version {}'
.format(openstack_release, ubuntu_version))
final_function()
def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None, quiet=False):
"""Run a command and retry until success or max_retries is reached.

View File

@ -1,4 +1,4 @@
# Copyright 2019 Canonical Ltd
# Copyright 2019-2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -209,7 +209,7 @@ sys.modules[__name__].config = Config()
def init():
"""Compability shim that does nothing."""
"""Compatibility shim that does nothing."""
pass
@ -264,7 +264,7 @@ def version_compare(a, b):
else:
raise RuntimeError('Unable to compare "{}" and "{}", according to '
'our logic they are neither greater, equal nor '
'less than each other.')
'less than each other.'.format(a, b))
class PkgVersion():

View File

@ -28,6 +28,9 @@ def get_platform():
elif "elementary" in current_platform:
# ElementaryOS fails to run tests locally without this.
return "ubuntu"
elif "Pop!_OS" in current_platform:
# Pop!_OS also fails to run tests locally without this.
return "ubuntu"
else:
raise RuntimeError("This module is not supported on {}."
.format(current_platform))

View File

@ -79,9 +79,9 @@ class Crushmap(object):
stdin=crush.stdout)
.decode('UTF-8'))
except CalledProcessError as e:
log("Error occured while loading and decompiling CRUSH map:"
log("Error occurred while loading and decompiling CRUSH map:"
"{}".format(e), ERROR)
raise "Failed to read CRUSH map"
raise
def ensure_bucket_is_present(self, bucket_name):
if bucket_name not in [bucket.name for bucket in self.buckets()]:
@ -111,7 +111,7 @@ class Crushmap(object):
return ceph_output
except CalledProcessError as e:
log("save error: {}".format(e))
raise "Failed to save CRUSH map."
raise
def build_crushmap(self):
"""Modifies the current CRUSH map to include the new buckets"""

View File

@ -14,6 +14,7 @@
import collections
import glob
import itertools
import json
import os
import pyudev
@ -24,6 +25,7 @@ import subprocess
import sys
import time
import uuid
import functools
from contextlib import contextmanager
from datetime import datetime
@ -501,30 +503,33 @@ def ceph_user():
class CrushLocation(object):
def __init__(self,
name,
identifier,
host,
rack,
row,
datacenter,
chassis,
root):
self.name = name
def __init__(self, identifier, name, osd="", host="", chassis="",
rack="", row="", pdu="", pod="", room="",
datacenter="", zone="", region="", root=""):
self.identifier = identifier
self.name = name
self.osd = osd
self.host = host
self.chassis = chassis
self.rack = rack
self.row = row
self.pdu = pdu
self.pod = pod
self.room = room
self.datacenter = datacenter
self.chassis = chassis
self.zone = zone
self.region = region
self.root = root
def __str__(self):
return "name: {} id: {} host: {} rack: {} row: {} datacenter: {} " \
"chassis :{} root: {}".format(self.name, self.identifier,
self.host, self.rack, self.row,
self.datacenter, self.chassis,
self.root)
return "name: {} id: {} osd: {} host: {} chassis: {} rack: {} " \
"row: {} pdu: {} pod: {} room: {} datacenter: {} zone: {} " \
"region: {} root: {}".format(self.name, self.identifier,
self.osd, self.host, self.chassis,
self.rack, self.row, self.pdu,
self.pod, self.room,
self.datacenter, self.zone,
self.region, self.root)
def __eq__(self, other):
return not self.name < other.name and not other.name < self.name
@ -571,10 +576,53 @@ def get_osd_weight(osd_id):
raise
def _filter_nodes_and_set_attributes(node, node_lookup_map, lookup_type):
"""Get all nodes of the desired type, with all their attributes.
These attributes can be direct or inherited from ancestors.
"""
attribute_dict = {node['type']: node['name']}
if node['type'] == lookup_type:
attribute_dict['name'] = node['name']
attribute_dict['identifier'] = node['id']
return [attribute_dict]
elif not node.get('children'):
return [attribute_dict]
else:
descendant_attribute_dicts = [
_filter_nodes_and_set_attributes(node_lookup_map[node_id],
node_lookup_map, lookup_type)
for node_id in node.get('children', [])
]
return [dict(attribute_dict, **descendant_attribute_dict)
for descendant_attribute_dict
in itertools.chain.from_iterable(descendant_attribute_dicts)]
def _flatten_roots(nodes, lookup_type='host'):
"""Get a flattened list of nodes of the desired type.
:param nodes: list of nodes defined as a dictionary of attributes and
children
:type nodes: List[Dict[int, Any]]
:param lookup_type: type of searched node
:type lookup_type: str
:returns: flattened list of nodes
:rtype: List[Dict[str, Any]]
"""
lookup_map = {node['id']: node for node in nodes}
root_attributes_dicts = [_filter_nodes_and_set_attributes(node, lookup_map,
lookup_type)
for node in nodes if node['type'] == 'root']
# get a flattened list of roots.
return list(itertools.chain.from_iterable(root_attributes_dicts))
def get_osd_tree(service):
"""Returns the current osd map in JSON.
:returns: List.
:rtype: List[CrushLocation]
:raises: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
"""
@ -585,35 +633,14 @@ def get_osd_tree(service):
.decode('UTF-8'))
try:
json_tree = json.loads(tree)
crush_list = []
# Make sure children are present in the json
if not json_tree['nodes']:
return None
host_nodes = [
node for node in json_tree['nodes']
if node['type'] == 'host'
]
for host in host_nodes:
crush_list.append(
CrushLocation(
name=host.get('name'),
identifier=host['id'],
host=host.get('host'),
rack=host.get('rack'),
row=host.get('row'),
datacenter=host.get('datacenter'),
chassis=host.get('chassis'),
root=host.get('root')
)
)
return crush_list
roots = _flatten_roots(json_tree["nodes"])
return [CrushLocation(**host) for host in roots]
except ValueError as v:
log("Unable to parse ceph tree json: {}. Error: {}".format(
tree, v))
raise
except subprocess.CalledProcessError as e:
log("ceph osd tree command failed with message: {}".format(
e))
log("ceph osd tree command failed with message: {}".format(e))
raise
@ -669,7 +696,9 @@ def get_local_osd_ids():
dirs = os.listdir(osd_path)
for osd_dir in dirs:
osd_id = osd_dir.split('-')[1]
if _is_int(osd_id):
if (_is_int(osd_id) and
filesystem_mounted(os.path.join(
os.sep, osd_path, osd_dir))):
osd_ids.append(osd_id)
except OSError:
raise
@ -3271,13 +3300,14 @@ def determine_packages():
def determine_packages_to_remove():
"""Determines packages for removal
Note: if in a container, then the CHRONY_PACKAGE is removed.
:returns: list of packages to be removed
:rtype: List[str]
"""
rm_packages = REMOVE_PACKAGES.copy()
if is_container():
install_list = filter_missing_packages(CHRONY_PACKAGE)
if not install_list:
rm_packages.append(CHRONY_PACKAGE)
rm_packages.extend(filter_missing_packages([CHRONY_PACKAGE]))
return rm_packages
@ -3376,3 +3406,132 @@ def apply_osd_settings(settings):
level=ERROR)
raise OSDConfigSetError
return True
def enabled_manager_modules():
"""Return a list of enabled manager modules.
:rtype: List[str]
"""
cmd = ['ceph', 'mgr', 'module', 'ls']
try:
modules = subprocess.check_output(cmd).decode('UTF-8')
except subprocess.CalledProcessError as e:
log("Failed to list ceph modules: {}".format(e), WARNING)
return []
modules = json.loads(modules)
return modules['enabled_modules']
def is_mgr_module_enabled(module):
"""Is a given manager module enabled.
:param module:
:type module: str
:returns: Whether the named module is enabled
:rtype: bool
"""
return module in enabled_manager_modules()
is_dashboard_enabled = functools.partial(is_mgr_module_enabled, 'dashboard')
def mgr_enable_module(module):
"""Enable a Ceph Manager Module.
:param module: The module name to enable
:type module: str
:raises: subprocess.CalledProcessError
"""
if not is_mgr_module_enabled(module):
subprocess.check_call(['ceph', 'mgr', 'module', 'enable', module])
return True
return False
mgr_enable_dashboard = functools.partial(mgr_enable_module, 'dashboard')
def mgr_disable_module(module):
"""Enable a Ceph Manager Module.
:param module: The module name to enable
:type module: str
:raises: subprocess.CalledProcessError
"""
if is_mgr_module_enabled(module):
subprocess.check_call(['ceph', 'mgr', 'module', 'disable', module])
return True
return False
mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard')
def ceph_config_set(name, value, who):
"""Set a ceph config option
:param name: key to set
:type name: str
:param value: value corresponding to key
:type value: str
:param who: Config area the key is associated with (e.g. 'dashboard')
:type who: str
:raises: subprocess.CalledProcessError
"""
subprocess.check_call(['ceph', 'config', 'set', who, name, value])
mgr_config_set = functools.partial(ceph_config_set, who='mgr')
def ceph_config_get(name, who):
"""Retrieve the value of a ceph config option
:param name: key to lookup
:type name: str
:param who: Config area the key is associated with (e.g. 'dashboard')
:type who: str
:returns: Value associated with key
:rtype: str
:raises: subprocess.CalledProcessError
"""
return subprocess.check_output(
['ceph', 'config', 'get', who, name]).decode('UTF-8')
mgr_config_get = functools.partial(ceph_config_get, who='mgr')
def _dashboard_set_ssl_artifact(path, artifact_name, hostname=None):
"""Set SSL dashboard config option.
:param path: Path to file
:type path: str
:param artifact_name: Option name for setting the artifact
:type artifact_name: str
:param hostname: If hostname is set artifact will only be associated with
the dashboard on that host.
:type hostname: str
:raises: subprocess.CalledProcessError
"""
cmd = ['ceph', 'dashboard', artifact_name]
if hostname:
cmd.append(hostname)
cmd.extend(['-i', path])
log(cmd, level=DEBUG)
subprocess.check_call(cmd)
dashboard_set_ssl_certificate = functools.partial(
_dashboard_set_ssl_artifact,
artifact_name='set-ssl-certificate')
dashboard_set_ssl_certificate_key = functools.partial(
_dashboard_set_ssl_artifact,
artifact_name='set-ssl-certificate-key')

View File

@ -3,10 +3,16 @@
- charm-unit-jobs
check:
jobs:
- vault-impish-xena_rgw:
voting: false
- vault-impish-xena-namespaced:
voting: false
- vault-hirsute-wallaby_rgw
- vault-hirsute-wallaby-namespaced
- vault-groovy-victoria_rgw
- vault-groovy-victoria-namespaced
- vault-focal-xena_rgw:
voting: false
- vault-focal-xena-namespaced:
voting: false
- vault-focal-wallaby_rgw
- vault-focal-wallaby-namespaced
- vault-focal-victoria_rgw
@ -37,10 +43,22 @@
vars:
tox_extra_args: vault:bionic-ussuri
- job:
name: vault-hirsute-wallaby_rgw
name: vault-impish-xena_rgw
parent: func-target
dependencies: &smoke-jobs
- vault-bionic-ussuri
vars:
tox_extra_args: vault:impish-xena
- job:
name: vault-impish-xena-namespaced
parent: func-target
dependencies: *smoke-jobs
vars:
tox_extra_args: vault:impish-xena-namespaced
- job:
name: vault-hirsute-wallaby_rgw
parent: func-target
dependencies: *smoke-jobs
vars:
tox_extra_args: vault:hirsute-wallaby
- job:
@ -50,17 +68,17 @@
vars:
tox_extra_args: vault:hirsute-wallaby-namespaced
- job:
name: vault-groovy-victoria_rgw
name: vault-focal-xena_rgw
parent: func-target
dependencies: *smoke-jobs
vars:
tox_extra_args: vault:groovy-victoria
tox_extra_args: vault:focal-xena
- job:
name: vault-groovy-victoria-namespaced
name: vault-focal-xena-namespaced
parent: func-target
dependencies: *smoke-jobs
vars:
tox_extra_args: vault:groovy-victoria-namespaced
tox_extra_args: vault:focal-xena-namespaced
- job:
name: vault-focal-wallaby_rgw
parent: func-target

18
pip.sh Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
#
# This file is managed centrally by release-tools and should not be modified
# within individual charm repos. See the 'global' dir contents for available
# choices of tox.ini for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
#
# setuptools 58.0 dropped the support for use_2to3=true which is needed to
# install blessings (an indirect dependency of charm-tools).
#
# More details on the beahvior of tox and virtualenv creation can be found at
# https://github.com/tox-dev/tox/issues/448
#
# This script is wrapper to force the use of the pinned versions early in the
# process when the virtualenv was created and upgraded before installing the
# depedencies declared in the target.
pip install 'pip<20.3' 'setuptools<50.0.0'
pip "$@"

View File

@ -0,0 +1,117 @@
options:
source: &source cloud:focal-xena
series: focal
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
applications:
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *source
to:
- '0'
- '1'
- '2'
ceph-radosgw:
charm: ceph-radosgw
num_units: 1
options:
source: *source
namespace-tenants: True
to:
- '3'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
constraints: "mem=2048"
storage:
osd-devices: 'cinder,10G'
options:
source: *source
osd-devices: '/srv/ceph /dev/test-non-existent'
to:
- '4'
- '5'
- '6'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
source: *source
to:
- '7'
- '8'
- '9'
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *source
to:
- '10'
vault-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
vault:
charm: cs:~openstack-charmers-next/vault
num_units: 1
to:
- '11'
relations:
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-radosgw:mon'
- 'ceph-mon:radosgw'
- - 'ceph-radosgw:identity-service'
- 'keystone:identity-service'
- - 'vault-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'vault:shared-db'
- 'vault-mysql-router:shared-db'
- - 'keystone:certificates'
- 'vault:certificates'
- - 'ceph-radosgw:certificates'
- 'vault:certificates'

View File

@ -0,0 +1,116 @@
options:
source: &source cloud:focal-xena
series: focal
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
applications:
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *source
to:
- '0'
- '1'
- '2'
ceph-radosgw:
charm: ceph-radosgw
num_units: 1
options:
source: *source
to:
- '3'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
constraints: "mem=2048"
storage:
osd-devices: 'cinder,10G'
options:
source: *source
osd-devices: '/srv/ceph /dev/test-non-existent'
to:
- '4'
- '5'
- '6'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
source: *source
to:
- '7'
- '8'
- '9'
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *source
to:
- '10'
vault-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
vault:
charm: cs:~openstack-charmers-next/vault
num_units: 1
to:
- '11'
relations:
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-radosgw:mon'
- 'ceph-mon:radosgw'
- - 'ceph-radosgw:identity-service'
- 'keystone:identity-service'
- - 'vault-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'vault:shared-db'
- 'vault-mysql-router:shared-db'
- - 'keystone:certificates'
- 'vault:certificates'
- - 'ceph-radosgw:certificates'
- 'vault:certificates'

View File

@ -0,0 +1,117 @@
options:
source: &source distro
series: impish
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
applications:
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *source
to:
- '0'
- '1'
- '2'
ceph-radosgw:
charm: ceph-radosgw
num_units: 1
options:
source: *source
namespace-tenants: True
to:
- '3'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
constraints: "mem=2048"
storage:
osd-devices: 'cinder,10G'
options:
source: *source
osd-devices: '/srv/ceph /dev/test-non-existent'
to:
- '4'
- '5'
- '6'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
source: *source
to:
- '7'
- '8'
- '9'
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *source
to:
- '10'
vault-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
vault:
charm: cs:~openstack-charmers-next/vault
num_units: 1
to:
- '11'
relations:
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-radosgw:mon'
- 'ceph-mon:radosgw'
- - 'ceph-radosgw:identity-service'
- 'keystone:identity-service'
- - 'vault-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'vault:shared-db'
- 'vault-mysql-router:shared-db'
- - 'keystone:certificates'
- 'vault:certificates'
- - 'ceph-radosgw:certificates'
- 'vault:certificates'

View File

@ -0,0 +1,116 @@
options:
source: &source distro
series: impish
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
applications:
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *source
to:
- '0'
- '1'
- '2'
ceph-radosgw:
charm: ceph-radosgw
num_units: 1
options:
source: *source
to:
- '3'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 3
constraints: "mem=2048"
storage:
osd-devices: 'cinder,10G'
options:
source: *source
osd-devices: '/srv/ceph /dev/test-non-existent'
to:
- '4'
- '5'
- '6'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
source: *source
to:
- '7'
- '8'
- '9'
keystone:
expose: True
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *source
to:
- '10'
vault-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
vault:
charm: cs:~openstack-charmers-next/vault
num_units: 1
to:
- '11'
relations:
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-radosgw:mon'
- 'ceph-mon:radosgw'
- - 'ceph-radosgw:identity-service'
- 'keystone:identity-service'
- - 'vault-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'vault:shared-db'
- 'vault-mysql-router:shared-db'
- - 'keystone:certificates'
- 'vault:certificates'
- - 'ceph-radosgw:certificates'
- 'vault:certificates'

View File

@ -1,8 +1,8 @@
charm_name: ceph-radosgw
gate_bundles:
- vault: groovy-victoria
- vault: groovy-victoria-namespaced
- vault: focal-xena
- vault: focal-xena-namespaced
- vault: focal-wallaby
- vault: focal-wallaby-namespaced
- vault: focal-victoria
@ -33,8 +33,12 @@ dev_bundles:
- bionic-rocky-multisite
- vault: bionic-rocky
- vault: bionic-rocky-namespaced
- vault: groovy-victoria
- vault: groovy-victoria-namespaced
- vault: hirsute-wallaby
- vault: hirsute-wallaby-namespaced
- vault: impish-xena
- vault: impish-xena-namespaced
target_deploy_status:
vault:
@ -57,3 +61,7 @@ tests_options:
force_deploy:
- hirsute-wallaby
- hirsute-wallaby-namespaced
- groovy-victoria
- groovy-victoria-namespaced
- impish-xena
- impish-xena-namespaced

13
tox.ini
View File

@ -22,19 +22,22 @@ skip_missing_interpreters = False
# * It is also necessary to pin virtualenv as a newer virtualenv would still
# lead to fetching the latest pip in the func* tox targets, see
# https://stackoverflow.com/a/38133283
requires = pip < 20.3
virtualenv < 20.0
requires =
pip < 20.3
virtualenv < 20.0
setuptools < 50.0.0
# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci
minversion = 3.2.0
minversion = 3.18.0
[testenv]
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
CHARM_DIR={envdir}
install_command =
pip install {opts} {packages}
{toxinidir}/pip.sh install {opts} {packages}
commands = stestr run --slowest {posargs}
whitelist_externals = juju
allowlist_externals = juju
passenv = HOME TERM CS_* OS_* TEST_*
deps = -r{toxinidir}/test-requirements.txt

View File

@ -15,7 +15,6 @@
from mock import patch
import ceph_radosgw_context as context
import charmhelpers
import charmhelpers.contrib.storage.linux.ceph as ceph
import charmhelpers.fetch as fetch
@ -69,290 +68,6 @@ class HAProxyContextTests(CharmTestCase):
self.assertEqual(expect, haproxy_context())
class IdentityServiceContextTest(CharmTestCase):
def setUp(self):
super(IdentityServiceContextTest, self).setUp(context, TO_PATCH)
self.relation_get.side_effect = self.test_relation.get
self.config.side_effect = self.test_config.get
self.maxDiff = None
self.cmp_pkgrevno.return_value = 1
self.leader_get.return_value = 'False'
@patch.object(charmhelpers.contrib.openstack.context,
'filter_installed_packages', return_value=['absent-pkg'])
@patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr')
@patch.object(charmhelpers.contrib.openstack.context, 'context_complete')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_get')
@patch.object(charmhelpers.contrib.openstack.context, 'related_units')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp,
_format_ipv6_addr, _filter_installed_packages,
jewel_installed=False, cmp_pkgrevno_side_effects=None):
self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects
if cmp_pkgrevno_side_effects
else [-1, 1, -1])
self.test_config.set('operator-roles', 'Babel')
self.test_config.set('admin-roles', 'Dart')
self.test_config.set('cache-size', '42')
self.relation_ids.return_value = ['identity-service:5']
self.related_units.return_value = ['keystone/0']
_format_ipv6_addr.return_value = False
_rids.return_value = 'rid1'
_runits.return_value = 'runit'
_ctxt_comp.return_value = True
id_data = {
'service_port': 9876,
'service_host': '127.0.0.4',
'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'service_domain_id': '8e50f28a556911e8aaeed33789425d23',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'service_tenant': 'ten',
'service_username': 'admin',
'service_password': 'adminpass',
}
_rget.return_value = id_data
ids_ctxt = context.IdentityServiceContext()
expect = {
'admin_domain_id': '8e50f28a556911e8aaeed33789425d23',
'admin_password': 'adminpass',
'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'admin_tenant_name': 'ten',
'admin_user': 'admin',
'api_version': '2.0',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'auth_protocol': 'http',
'auth_type': 'keystone',
'namespace_tenants': False,
'cache_size': '42',
'service_host': '127.0.0.4',
'service_port': 9876,
'service_protocol': 'http',
}
if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[2] >= 0:
expect['user_roles'] = 'Babel'
expect['admin_roles'] = 'Dart'
else:
expect['user_roles'] = 'Babel,Dart'
if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] < 0:
expect['keystone_revocation_parameter_supported'] = True
if jewel_installed:
expect['auth_keystone_v3_supported'] = True
self.assertEqual(expect, ids_ctxt())
@patch.object(charmhelpers.contrib.openstack.context,
'filter_installed_packages', return_value=['absent-pkg'])
@patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr')
@patch.object(charmhelpers.contrib.openstack.context, 'context_complete')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_get')
@patch.object(charmhelpers.contrib.openstack.context, 'related_units')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_ids_ctxt_with_namespace(self, _log, _rids, _runits, _rget,
_ctxt_comp, _format_ipv6_addr,
_filter_installed_packages,
jewel_installed=False,
cmp_pkgrevno_side_effects=None):
self.cmp_pkgrevno.side_effect = (cmp_pkgrevno_side_effects
if cmp_pkgrevno_side_effects
else [-1, 1, -1])
self.test_config.set('operator-roles', 'Babel')
self.test_config.set('admin-roles', 'Dart')
self.test_config.set('cache-size', '42')
self.relation_ids.return_value = ['identity-service:5']
self.related_units.return_value = ['keystone/0']
_format_ipv6_addr.return_value = False
_rids.return_value = 'rid1'
_runits.return_value = 'runit'
_ctxt_comp.return_value = True
self.leader_get.return_value = 'True'
id_data = {
'service_port': 9876,
'service_host': '127.0.0.4',
'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'service_domain_id': '8e50f28a556911e8aaeed33789425d23',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'service_tenant': 'ten',
'service_username': 'admin',
'service_password': 'adminpass',
}
_rget.return_value = id_data
ids_ctxt = context.IdentityServiceContext()
expect = {
'admin_domain_id': '8e50f28a556911e8aaeed33789425d23',
'admin_password': 'adminpass',
'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'admin_tenant_name': 'ten',
'admin_user': 'admin',
'api_version': '2.0',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'auth_protocol': 'http',
'auth_type': 'keystone',
'namespace_tenants': True,
'cache_size': '42',
'service_host': '127.0.0.4',
'service_port': 9876,
'service_protocol': 'http',
}
if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[2] >= 0:
expect['user_roles'] = 'Babel'
expect['admin_roles'] = 'Dart'
else:
expect['user_roles'] = 'Babel,Dart'
if cmp_pkgrevno_side_effects and cmp_pkgrevno_side_effects[1] < 0:
expect['keystone_revocation_parameter_supported'] = True
if jewel_installed:
expect['auth_keystone_v3_supported'] = True
self.assertEqual(expect, ids_ctxt())
@patch.object(charmhelpers.contrib.openstack.context,
'filter_installed_packages', return_value=['absent-pkg'])
@patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr')
@patch.object(charmhelpers.contrib.openstack.context, 'context_complete')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_get')
@patch.object(charmhelpers.contrib.openstack.context, 'related_units')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_ids_ctxt_missing_admin_domain_id(
self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr,
_filter_installed_packages, jewel_installed=False):
self.test_config.set('operator-roles', 'Babel')
self.test_config.set('admin-roles', 'Dart')
self.test_config.set('cache-size', '42')
self.relation_ids.return_value = ['identity-service:5']
self.related_units.return_value = ['keystone/0']
_format_ipv6_addr.return_value = False
_rids.return_value = ['rid1']
_runits.return_value = ['runit']
_ctxt_comp.return_value = True
self.cmp_pkgrevno.return_value = -1
if jewel_installed:
self.cmp_pkgrevno.return_value = 0
id_data = {
'service_port': 9876,
'service_host': '127.0.0.4',
'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'service_tenant': 'ten',
'service_username': 'admin',
'service_password': 'adminpass',
}
_rget.return_value = id_data
ids_ctxt = context.IdentityServiceContext()
expect = {
'admin_password': 'adminpass',
'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'admin_tenant_name': 'ten',
'admin_user': 'admin',
'api_version': '2.0',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'auth_protocol': 'http',
'auth_type': 'keystone',
'namespace_tenants': False,
'cache_size': '42',
'keystone_revocation_parameter_supported': True,
'service_host': '127.0.0.4',
'service_port': 9876,
'service_protocol': 'http',
'user_roles': 'Babel,Dart',
}
if jewel_installed:
expect['auth_keystone_v3_supported'] = True
self.assertEqual(expect, ids_ctxt())
@patch.object(charmhelpers.contrib.openstack.context,
'filter_installed_packages', return_value=['absent-pkg'])
@patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr')
@patch.object(charmhelpers.contrib.openstack.context, 'context_complete')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_get')
@patch.object(charmhelpers.contrib.openstack.context, 'related_units')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_ids_ctxt_v3(
self, _log, _rids, _runits, _rget, _ctxt_comp, _format_ipv6_addr,
_filter_installed_packages, jewel_installed=False):
self.test_config.set('operator-roles', 'Babel')
self.test_config.set('admin-roles', 'Dart')
self.test_config.set('cache-size', '42')
self.relation_ids.return_value = ['identity-service:5']
self.related_units.return_value = ['keystone/0']
_format_ipv6_addr.return_value = False
_rids.return_value = ['rid1']
_runits.return_value = ['runit']
_ctxt_comp.return_value = True
self.cmp_pkgrevno.return_value = -1
if jewel_installed:
self.cmp_pkgrevno.return_value = 0
id_data = {
'service_port': 9876,
'service_host': '127.0.0.4',
'service_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'service_domain_id': '8e50f28a556911e8aaeed33789425d23',
'service_domain': 'service_domain',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'service_tenant': 'ten',
'service_username': 'admin',
'service_password': 'adminpass',
'api_version': '3',
}
_rget.return_value = id_data
ids_ctxt = context.IdentityServiceContext()
expect = {
'admin_domain_id': '8e50f28a556911e8aaeed33789425d23',
'admin_domain_name': 'service_domain',
'admin_password': 'adminpass',
'admin_tenant_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'admin_tenant_name': 'ten',
'admin_user': 'admin',
'api_version': '3',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'auth_protocol': 'http',
'auth_type': 'keystone',
'namespace_tenants': False,
'cache_size': '42',
'keystone_revocation_parameter_supported': True,
'service_domain_id': '8e50f28a556911e8aaeed33789425d23',
'service_host': '127.0.0.4',
'service_port': 9876,
'service_project_id': '2852107b8f8f473aaf0d769c7bbcf86b',
'service_protocol': 'http',
'user_roles': 'Babel,Dart',
}
if jewel_installed:
expect['auth_keystone_v3_supported'] = True
self.assertEqual(expect, ids_ctxt())
def test_ids_ctxt_jewel(self):
self.test_ids_ctxt(jewel_installed=True,
cmp_pkgrevno_side_effects=[0, 1, -1])
def test_ids_ctxt_luminous(self):
self.test_ids_ctxt(jewel_installed=True,
cmp_pkgrevno_side_effects=[1, 1, 0])
def test_ids_ctxt_octopus(self):
self.test_ids_ctxt(jewel_installed=True,
cmp_pkgrevno_side_effects=[1, -1, 0])
@patch.object(charmhelpers.contrib.openstack.context,
'filter_installed_packages', return_value=['absent-pkg'])
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_ids_ctxt_no_rels(self, _log, _rids, _filter_installed_packages):
_rids.return_value = []
ids_ctxt = context.IdentityServiceContext()
self.assertEqual(ids_ctxt(), None)
class MonContextTest(CharmTestCase):
def setUp(self):