Add Antelope support

* sync charm-helpers to classic charms
* change openstack-origin/source default to antelope
* align testing with antelope
* add new antelope bundles
* add antelope bundles to tests.yaml
* add antelope tests to osci.yaml and .zuul.yaml
* update build-on and run-on bases

Change-Id: I6ddab6f3b39a8f0fa4db07450022d85ba60d72de
This commit is contained in:
Corey Bryant 2023-02-14 21:26:01 +00:00 committed by Felipe Reyes
parent edde9a09a6
commit 6ad93e8710
31 changed files with 619 additions and 78 deletions

View File

@ -1,4 +1,4 @@
- project: - project:
templates: templates:
- openstack-python3-charm-zed-jobs - openstack-python3-charm-jobs
- openstack-cover-jobs - openstack-cover-jobs

View File

@ -33,3 +33,6 @@ bases:
- name: ubuntu - name: ubuntu
channel: "22.10" channel: "22.10"
architectures: [amd64, s390x, ppc64el, arm64] architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "23.04"
architectures: [amd64, s390x, ppc64el, arm64]

View File

@ -19,6 +19,7 @@
import glob import glob
import grp import grp
import json
import os import os
import pwd import pwd
import re import re
@ -28,7 +29,9 @@ import subprocess
import yaml import yaml
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
application_name,
config, config,
ERROR,
hook_name, hook_name,
local_unit, local_unit,
log, log,
@ -174,7 +177,8 @@ define service {{
if os.path.exists(os.path.join(path, parts[0])): if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0]) command = os.path.join(path, parts[0])
if len(parts) > 1: if len(parts) > 1:
command += " " + " ".join(parts[1:]) safe_args = [shlex.quote(arg) for arg in parts[1:]]
command += " " + " ".join(safe_args)
return command return command
log('Check command not found: {}'.format(parts[0])) log('Check command not found: {}'.format(parts[0]))
return '' return ''
@ -414,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
:param str unit_name: Unit name to use in check description :param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately :param bool immediate_check: For sysv init, run the service check immediately
""" """
# check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
# just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
for rid in relation_ids("ha"):
ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
if ha_resources:
try:
ha_resources_parsed = json.loads(ha_resources)
except ValueError as e:
log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
raise
if "lsb:haproxy" in ha_resources_parsed.values():
if "haproxy" in services:
log("removed check_haproxy. This service will be monitored by check_crm")
services.remove("haproxy")
for svc in services: for svc in services:
# Don't add a check for these services from neutron-gateway # Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']: if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
@ -520,3 +538,39 @@ def remove_deprecated_check(nrpe, deprecated_services):
for dep_svc in deprecated_services: for dep_svc in deprecated_services:
log('Deprecated service: {}'.format(dep_svc)) log('Deprecated service: {}'.format(dep_svc))
nrpe.remove_check(shortname=dep_svc) nrpe.remove_check(shortname=dep_svc)
def add_deferred_restarts_check(nrpe):
"""
Add NRPE check for services with deferred restarts.
:param NRPE nrpe: NRPE object to add check to
"""
unit_name = local_unit().replace('/', '-')
shortname = unit_name + '_deferred_restarts'
check_cmd = 'check_deferred_restarts.py --application {}'.format(
application_name())
log('Adding deferred restarts nrpe check: {}'.format(shortname))
nrpe.add_check(
shortname=shortname,
description='Check deferred service restarts {}'.format(unit_name),
check_cmd=check_cmd)
def remove_deferred_restarts_check(nrpe):
"""
Remove NRPE check for services with deferred service restarts.
:param NRPE nrpe: NRPE object to remove check from
"""
unit_name = local_unit().replace('/', '-')
shortname = unit_name + '_deferred_restarts'
check_cmd = 'check_deferred_restarts.py --application {}'.format(
application_name())
log('Removing deferred restarts nrpe check: {}'.format(shortname))
nrpe.remove_check(
shortname=shortname,
description='Check deferred service restarts {}'.format(unit_name),
check_cmd=check_cmd)

View File

@ -324,7 +324,7 @@ def valid_hacluster_config():
''' '''
vip = config_get('vip') vip = config_get('vip')
dns = config_get('dns-ha') dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)): if not (bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to ' msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability') 'use high availability')
status_set('blocked', msg) status_set('blocked', msg)

View File

@ -539,7 +539,7 @@ def port_has_listener(address, port):
""" """
cmd = ['nc', '-z', address, str(port)] cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd) result = subprocess.call(cmd)
return not(bool(result)) return not (bool(result))
def assert_charm_supports_ipv6(): def assert_charm_supports_ipv6():

View File

@ -648,7 +648,7 @@ def patch_ports_on_bridge(bridge):
uuid_for_port( uuid_for_port(
interface['options']['peer'])), interface['options']['peer'])),
interface['options']['peer']) interface['options']['peer'])
yield(Patch(this_end, other_end)) yield Patch(this_end, other_end)
# We expect one result and it is ok if it turns out to be a port # We expect one result and it is ok if it turns out to be a port
# for a different bridge. However we need a break here to satisfy # for a different bridge. However we need a break here to satisfy
# the for/else check which is in place to detect interface referring # the for/else check which is in place to detect interface referring

View File

@ -139,6 +139,19 @@ class OVNClusterStatus(object):
""" """
return self.leader == 'self' return self.leader == 'self'
def to_yaml(self):
"""Return yaml-serializable dict representation of this object.
:returns: dictionary suitable for serialization by yaml.safe_dump()
:rtype: Dict[str, Any]
"""
yaml_dict = self.__dict__
# Convert types that are not natively convertable to yaml
yaml_dict["cluster_id"] = str(self.cluster_id)
yaml_dict["server_id"] = str(self.server_id)
return yaml_dict
def cluster_status(target, schema=None, use_ovs_appctl=False, rundir=None): def cluster_status(target, schema=None, use_ovs_appctl=False, rundir=None):
"""Retrieve status information from clustered OVSDB. """Retrieve status information from clustered OVSDB.

View File

@ -205,7 +205,7 @@ class SimpleOVSDB(object):
decoded_set = [] decoded_set = []
for el in data[1]: for el in data[1]:
decoded_set.append(self._deserialize_ovsdb(el)) decoded_set.append(self._deserialize_ovsdb(el))
return(decoded_set) return decoded_set
# fall back to normal processing below # fall back to normal processing below
break break
@ -213,20 +213,14 @@ class SimpleOVSDB(object):
f = ovs_type_cb_map.get(data[0], str) f = ovs_type_cb_map.get(data[0], str)
return f(data[1]) return f(data[1])
def _find_tbl(self, condition=None): def _cmd_deserialize_data_generator(self, cmd):
"""Run and parse output of OVSDB `find` command. """Run command and provide generator with deserialized data.
:param condition: An optional RFC 7047 5.1 match condition :param cmd: Command and arguments to run.
:type condition: Optional[str] :type cmd: Iterable[str]
:returns: Dictionary with data :returns: Deserialzed data.
:rtype: Dict[str, any] :rtype: Generator[Dict[str,any], None, None]
""" """
cmd = [self._tool]
if self._args:
cmd.extend(self._args)
cmd.extend(['-f', 'json', 'find', self._table])
if condition:
cmd.append(condition)
output = utils._run(*cmd) output = utils._run(*cmd)
data = json.loads(output) data = json.loads(output)
for row in data['data']: for row in data['data']:
@ -238,9 +232,49 @@ class SimpleOVSDB(object):
values.append(col) values.append(col)
yield dict(zip(data['headings'], values)) yield dict(zip(data['headings'], values))
def _get_command(self):
"""Get base command.
:rtype: List[str]
"""
cmd = [self._tool]
if self._args:
cmd.extend(self._args)
cmd.extend(['-f', 'json'])
return cmd
def _find_tbl(self, condition=None):
"""Run and parse output of OVSDB `find` command.
:param condition: An optional RFC 7047 5.1 match condition
:type condition: Optional[str]
:returns: Dictionary with data
:rtype: Generator[Dict[str, any], None, None]
"""
cmd = self._get_command()
cmd.extend(['find', self._table])
if condition:
cmd.append(condition)
return self._cmd_deserialize_data_generator(cmd)
def _list_tbl_record(self, record):
"""Run and parse output of OVSDB `list` command for record.
:param record: The UUID of the record to list data for.
:type record: uuid.UUID
:returns: Dictionary with data
:rtype: Dict[str, any]
"""
cmd = self._get_command()
cmd.extend(['list', self._table, str(record)])
return next(self._cmd_deserialize_data_generator(cmd))
def __iter__(self): def __iter__(self):
return self._find_tbl() return self._find_tbl()
def __getitem__(self, key):
return self._list_tbl_record(key)
def clear(self, rec, col): def clear(self, rec, col):
utils._run(self._tool, 'clear', self._table, rec, col) utils._run(self._tool, 'clear', self._table, rec, col)

View File

@ -25,6 +25,7 @@ import socket
import time import time
from base64 import b64decode from base64 import b64decode
from distutils.version import LooseVersion
from subprocess import ( from subprocess import (
check_call, check_call,
check_output, check_output,
@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
filter_installed_packages, filter_installed_packages,
get_installed_version,
) )
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
NoNetworkBinding, NoNetworkBinding,
@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import (
network_get_primary_address, network_get_primary_address,
WARNING, WARNING,
service_name, service_name,
remote_service_name,
) )
from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.sysctl import create as sysctl_create
@ -130,6 +133,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public'] ADDRESS_TYPES = ['admin', 'internal', 'public']
HAPROXY_RUN_DIR = '/var/run/haproxy/' HAPROXY_RUN_DIR = '/var/run/haproxy/'
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404
def ensure_packages(packages): def ensure_packages(packages):
@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir):
class IdentityServiceContext(OSContextGenerator): class IdentityServiceContext(OSContextGenerator):
_forward_compat_remaps = {
'admin_user': 'admin-user-name',
'service_username': 'service-user-name',
'service_tenant': 'service-project-name',
'service_tenant_id': 'service-project-id',
'service_domain': 'service-domain-name',
}
def __init__(self, def __init__(self,
service=None, service=None,
service_user=None, service_user=None,
@ -397,11 +409,16 @@ class IdentityServiceContext(OSContextGenerator):
# 'www_authenticate_uri' replaced 'auth_uri' since Stein, # 'www_authenticate_uri' replaced 'auth_uri' since Stein,
# see keystonemiddleware upstream sources for more info # see keystonemiddleware upstream sources for more info
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
c.update(( if 'public_auth_url' in ctxt:
('www_authenticate_uri', "{}://{}:{}/v3".format( c.update((
ctxt.get('service_protocol', ''), ('www_authenticate_uri', '{}/v3'.format(
ctxt.get('service_host', ''), ctxt.get('public_auth_url'))),))
ctxt.get('service_port', ''))),)) else:
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
else: else:
c.update(( c.update((
('auth_uri', "{}://{}:{}/v3".format( ('auth_uri', "{}://{}:{}/v3".format(
@ -409,11 +426,17 @@ class IdentityServiceContext(OSContextGenerator):
ctxt.get('service_host', ''), ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),)) ctxt.get('service_port', ''))),))
if 'internal_auth_url' in ctxt:
c.update((
('auth_url', ctxt.get('internal_auth_url')),))
else:
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),))
c.update(( c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),
('project_domain_name', ctxt.get('admin_domain_name', '')), ('project_domain_name', ctxt.get('admin_domain_name', '')),
('user_domain_name', ctxt.get('admin_domain_name', '')), ('user_domain_name', ctxt.get('admin_domain_name', '')),
('project_name', ctxt.get('admin_tenant_name', '')), ('project_name', ctxt.get('admin_tenant_name', '')),
@ -441,39 +464,86 @@ class IdentityServiceContext(OSContextGenerator):
for rid in relation_ids(self.rel_name): for rid in relation_ids(self.rel_name):
self.related = True self.related = True
for unit in related_units(rid): for unit in related_units(rid):
rdata = {}
# NOTE(jamespage):
# forwards compat with application data
# bag driven approach to relation.
_adata = relation_get(rid=rid, app=remote_service_name(rid))
adata = {}
# if no app data bag presented - fallback
# to legacy unit based relation data
rdata = relation_get(rid=rid, unit=unit) rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host') if _adata:
# New app data bag uses - instead of _
# in key names - remap for compat with
# existing relation data keys
for key, value in _adata.items():
if key == 'api-version':
adata[key.replace('-', '_')] = value.strip('v')
else:
adata[key.replace('-', '_')] = value
# Re-map some keys for backwards compatibility
for target, source in self._forward_compat_remaps.items():
adata[target] = _adata.get(source)
# Now preferentially get data from the app data bag, but if
# it's not available, get it from the legacy based relation
# data.
def _resolve(key):
return adata.get(key) or rdata.get(key)
serv_host = _resolve('service_host')
serv_host = format_ipv6_addr(serv_host) or serv_host serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host') auth_host = _resolve('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host auth_host = format_ipv6_addr(auth_host) or auth_host
int_host = rdata.get('internal_host') int_host = _resolve('internal_host',)
int_host = format_ipv6_addr(int_host) or int_host int_host = format_ipv6_addr(int_host) or int_host
svc_protocol = rdata.get('service_protocol') or 'http' svc_protocol = _resolve('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http' auth_protocol = _resolve('auth_protocol') or 'http'
int_protocol = rdata.get('internal_protocol') or 'http' admin_role = _resolve('admin_role') or 'Admin'
api_version = rdata.get('api_version') or '2.0' int_protocol = _resolve('internal_protocol') or 'http'
ctxt.update({'service_port': rdata.get('service_port'), api_version = _resolve('api_version') or '2.0'
ctxt.update({'service_port': _resolve('service_port'),
'service_host': serv_host, 'service_host': serv_host,
'auth_host': auth_host, 'auth_host': auth_host,
'auth_port': rdata.get('auth_port'), 'auth_port': _resolve('auth_port'),
'internal_host': int_host, 'internal_host': int_host,
'internal_port': rdata.get('internal_port'), 'internal_port': _resolve('internal_port'),
'admin_tenant_name': rdata.get('service_tenant'), 'admin_tenant_name': _resolve('service_tenant'),
'admin_user': rdata.get('service_username'), 'admin_user': _resolve('service_username'),
'admin_password': rdata.get('service_password'), 'admin_password': _resolve('service_password'),
'admin_role': admin_role,
'service_protocol': svc_protocol, 'service_protocol': svc_protocol,
'auth_protocol': auth_protocol, 'auth_protocol': auth_protocol,
'internal_protocol': int_protocol, 'internal_protocol': int_protocol,
'api_version': api_version}) 'api_version': api_version})
if rdata.get('service_type'): service_type = _resolve('service_type')
ctxt['service_type'] = rdata.get('service_type') if service_type:
ctxt['service_type'] = service_type
if float(api_version) > 2: if float(api_version) > 2:
ctxt.update({ ctxt.update({
'admin_domain_name': rdata.get('service_domain'), 'admin_domain_name': _resolve('service_domain'),
'service_project_id': rdata.get('service_tenant_id'), 'service_project_id': _resolve('service_tenant_id'),
'service_domain_id': rdata.get('service_domain_id')}) 'service_domain_id': _resolve('service_domain_id')})
# NOTE:
# keystone-k8s operator presents full URLS
# for all three endpoints - public and internal are
# externally addressable for machine based charm
public_auth_url = _resolve('public_auth_url')
# if 'public_auth_url' in rdata:
if public_auth_url:
ctxt.update({
'public_auth_url': public_auth_url,
})
internal_auth_url = _resolve('internal_auth_url')
# if 'internal_auth_url' in rdata:
if internal_auth_url:
ctxt.update({
'internal_auth_url': internal_auth_url,
})
# we keep all veriables in ctxt for compatibility and # we keep all veriables in ctxt for compatibility and
# add nested dictionary for keystone_authtoken generic # add nested dictionary for keystone_authtoken generic
@ -487,8 +557,8 @@ class IdentityServiceContext(OSContextGenerator):
# NOTE(jamespage) this is required for >= icehouse # NOTE(jamespage) this is required for >= icehouse
# so a missing value just indicates keystone needs # so a missing value just indicates keystone needs
# upgrading # upgrading
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') ctxt['admin_tenant_id'] = _resolve('service_tenant_id')
ctxt['admin_domain_id'] = rdata.get('service_domain_id') ctxt['admin_domain_id'] = _resolve('service_domain_id')
return ctxt return ctxt
return {} return {}
@ -860,9 +930,14 @@ class HAProxyContext(OSContextGenerator):
interfaces = ['cluster'] interfaces = ['cluster']
def __init__(self, singlenode_mode=False, def __init__(self, singlenode_mode=False,
address_types=ADDRESS_TYPES): address_types=None,
exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT):
if address_types is None:
address_types = ADDRESS_TYPES[:]
self.address_types = address_types self.address_types = address_types
self.singlenode_mode = singlenode_mode self.singlenode_mode = singlenode_mode
self.exporter_stats_port = exporter_stats_port
def __call__(self): def __call__(self):
if not os.path.isdir(HAPROXY_RUN_DIR): if not os.path.isdir(HAPROXY_RUN_DIR):
@ -957,10 +1032,20 @@ class HAProxyContext(OSContextGenerator):
db = kv() db = kv()
ctxt['stat_password'] = db.get('stat-password') ctxt['stat_password'] = db.get('stat-password')
if not ctxt['stat_password']: if not ctxt['stat_password']:
ctxt['stat_password'] = db.set('stat-password', ctxt['stat_password'] = db.set('stat-password', pwgen(32))
pwgen(32))
db.flush() db.flush()
# NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0
# New bind will be created and a prometheus-exporter
# will be used for path /metrics. At the same time,
# prometheus-exporter avoids using auth.
haproxy_version = get_installed_version("haproxy")
if (haproxy_version and
haproxy_version.ver_str >= LooseVersion("2.0.0") and
is_relation_made("haproxy-exporter")):
ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter")
ctxt["stats_exporter_port"] = self.exporter_stats_port
for frontend in cluster_hosts: for frontend in cluster_hosts:
if (len(cluster_hosts[frontend]['backends']) > 1 or if (len(cluster_hosts[frontend]['backends']) > 1 or
self.singlenode_mode): self.singlenode_mode):

View File

@ -127,7 +127,9 @@ def deferred_events():
""" """
events = [] events = []
for defer_file in deferred_events_files(): for defer_file in deferred_events_files():
events.append((defer_file, read_event_file(defer_file))) event = read_event_file(defer_file)
if event.policy_requestor_name == hookenv.service_name():
events.append((defer_file, event))
return events return events

View File

@ -25,6 +25,7 @@ Helpers for high availability.
import hashlib import hashlib
import json import json
import os
import re import re
@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
config, config,
status_set, status_set,
DEBUG, DEBUG,
application_name,
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
VIP_GROUP_NAME = 'grp_{service}_vips' VIP_GROUP_NAME = 'grp_{service}_vips'
DNSHA_GROUP_NAME = 'grp_{service}_hostnames' DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
class DNSHAException(Exception): class DNSHAException(Exception):
@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
relation_data['groups'] = { relation_data['groups'] = {
key: ' '.join(vip_group) key: ' '.join(vip_group)
} }
def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
"""Load grafana dashboard json model and insert prometheus datasource.
:param prometheus_app_name: name of the 'prometheus' application that will
be used as datasource in grafana dashboard
:type prometheus_app_name: str
:param haproxy_dashboard: path to haproxy dashboard
:type haproxy_dashboard: str
:return: Grafana dashboard json model as a str.
:rtype: str
"""
from charmhelpers.contrib.templating import jinja
dashboard_template = os.path.basename(haproxy_dashboard)
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
app_name = application_name()
datasource = "{} - Juju generated source".format(prometheus_app_name)
return jinja.render(dashboard_template,
{"datasource": datasource,
"app_name": app_name,
"prometheus_app_name": prometheus_app_name},
template_dir=dashboard_template_dir,
jinja_env_args={"variable_start_string": "<< ",
"variable_end_string": " >>"})

View File

@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
is_ipv6, is_ipv6,
get_ipv6_addr, get_ipv6_addr,
resolve_network_cidr, resolve_network_cidr,
get_iface_for_address
) )
from charmhelpers.contrib.hahelpers.cluster import is_clustered from charmhelpers.contrib.hahelpers.cluster import is_clustered
@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
return unit_get(unit_get_fallback) return unit_get(unit_get_fallback)
def get_invalid_vips():
"""Check if any of the provided vips are invalid.
A vip is invalid if it doesn't belong to the subnet in any interface.
If all vips are valid, this returns an empty list.
:returns: A list of strings, where each string is an invalid vip address.
:rtype: list
"""
clustered = is_clustered()
vips = config('vip')
if vips:
vips = vips.split()
invalid_vips = []
if clustered and vips:
for vip in vips:
iface_for_vip = get_iface_for_address(vip)
if iface_for_vip is None:
invalid_vips.append(vip)
return invalid_vips
def resolve_address(endpoint_type=PUBLIC, override=True): def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config. """Return unit address depending on net config.

View File

@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None):
for hosts_line in hosts: for hosts_line in hosts:
if hosts_line.rstrip(): if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip()) known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list) return known_hosts_list
def ssh_authorized_keys_lines(application_name, user=None): def ssh_authorized_keys_lines(application_name, user=None):
@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None):
for authkey_line in keys: for authkey_line in keys:
if authkey_line.rstrip(): if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip()) authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list) return authorized_keys_list
def ssh_compute_remove(public_key, application_name, user=None): def ssh_compute_remove(public_key, application_name, user=None):

View File

@ -49,6 +49,11 @@ defaults
listen stats listen stats
bind {{ local_host }}:{{ stat_port }} bind {{ local_host }}:{{ stat_port }}
{%- if stats_exporter_host and stats_exporter_port %}
bind {{ stats_exporter_host }}:{{ stats_exporter_port }}
option http-use-htx
http-request use-service prometheus-exporter if { path /metrics }
{%- endif %}
mode http mode http
stats enable stats enable
stats hide-version stats hide-version

View File

@ -12,4 +12,6 @@ signing_dir = {{ signing_dir }}
{% if service_type -%} {% if service_type -%}
service_type = {{ service_type }} service_type = {{ service_type }}
{% endif -%} {% endif -%}
service_token_roles = {{ admin_role }}
service_token_roles_required = True
{% endif -%} {% endif -%}

View File

@ -22,4 +22,6 @@ signing_dir = {{ signing_dir }}
{% if use_memcache == true %} {% if use_memcache == true %}
memcached_servers = {{ memcache_url }} memcached_servers = {{ memcache_url }}
{% endif -%} {% endif -%}
service_token_roles = {{ admin_role }}
service_token_roles_required = True
{% endif -%} {% endif -%}

View File

@ -0,0 +1,11 @@
{% if auth_host -%}
[service_user]
send_service_user_token = true
auth_type = password
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
project_domain_id = default
user_domain_id = default
project_name = {{ admin_tenant_name }}
username = {{ admin_user }}
password = {{ admin_password }}
{% endif -%}

View File

@ -159,6 +159,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2021.2', 'xena'), ('2021.2', 'xena'),
('2022.1', 'yoga'), ('2022.1', 'yoga'),
('2022.2', 'zed'), ('2022.2', 'zed'),
('2023.1', 'antelope'),
]) ])
# The ugly duckling - must list releases oldest to newest # The ugly duckling - must list releases oldest to newest
@ -1327,7 +1328,7 @@ def _check_listening_on_services_ports(services, test=False):
@param test: default=False, if False, test for closed, otherwise open. @param test: default=False, if False, test for closed, otherwise open.
@returns OrderedDict(service: [port-not-open, ...]...), [boolean] @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
""" """
test = not(not(test)) # ensure test is True or False test = not (not (test)) # ensure test is True or False
all_ports = list(itertools.chain(*services.values())) all_ports = list(itertools.chain(*services.values()))
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
map_ports = OrderedDict() map_ports = OrderedDict()
@ -1583,7 +1584,7 @@ def is_unit_paused_set():
with unitdata.HookData()() as t: with unitdata.HookData()() as t:
kv = t[0] kv = t[0]
# transform something truth-y into a Boolean. # transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused'))) return not (not (kv.get('unit-paused')))
except Exception: except Exception:
return False return False
@ -2181,7 +2182,7 @@ def is_unit_upgrading_set():
with unitdata.HookData()() as t: with unitdata.HookData()() as t:
kv = t[0] kv = t[0]
# transform something truth-y into a Boolean. # transform something truth-y into a Boolean.
return not(not(kv.get('unit-upgrading'))) return not (not (kv.get('unit-upgrading')))
except Exception: except Exception:
return False return False

View File

@ -173,7 +173,12 @@ def retrieve_secret_id(url, token):
# hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate
if not isinstance(client.adapter, hvac.adapters.Request): if not isinstance(client.adapter, hvac.adapters.Request):
client.adapter = hvac.adapters.Request(base_uri=url, token=token) client.adapter = hvac.adapters.Request(base_uri=url, token=token)
response = client._post('/v1/sys/wrapping/unwrap') try:
# hvac == 1.0.0 has an API to unwrap with the user token
response = client.sys.unwrap()
except AttributeError:
# fallback to hvac < 1.0.0
response = client._post('/v1/sys/wrapping/unwrap')
if response.status_code == 200: if response.status_code == 200:
data = response.json() data = response.json()
return data['data']['secret_id'] return data['data']['secret_id']

View File

@ -23,6 +23,12 @@ from subprocess import (
call call
) )
from charmhelpers.core.hookenv import (
log,
WARNING,
INFO
)
def _luks_uuid(dev): def _luks_uuid(dev):
""" """
@ -110,7 +116,7 @@ def is_device_mounted(device):
return bool(re.search(r'MOUNTPOINT=".+"', out)) return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False, inode_size=1024): def mkfs_xfs(device, force=False, inode_size=None):
"""Format device with XFS filesystem. """Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it. By default this should fail if the device already has a filesystem on it.
@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
:ptype device: tr :ptype device: tr
:param force: Force operation :param force: Force operation
:ptype: force: boolean :ptype: force: boolean
:param inode_size: XFS inode size in bytes :param inode_size: XFS inode size in bytes; if set to 0 or None,
the value used will be the XFS system default
:ptype inode_size: int""" :ptype inode_size: int"""
cmd = ['mkfs.xfs'] cmd = ['mkfs.xfs']
if force: if force:
cmd.append("-f") cmd.append("-f")
cmd += ['-i', "size={}".format(inode_size), device] if inode_size:
if inode_size >= 256 and inode_size <= 2048:
cmd += ['-i', "size={}".format(inode_size)]
else:
log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
else:
log("Using XFS filesystem with system default inode size.", level=INFO)
cmd += [device]
check_call(cmd) check_call(cmd)

View File

@ -954,7 +954,7 @@ def pwgen(length=None):
random_generator = random.SystemRandom() random_generator = random.SystemRandom()
random_chars = [ random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)] random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars)) return ''.join(random_chars)
def is_phy_iface(interface): def is_phy_iface(interface):

View File

@ -31,6 +31,7 @@ UBUNTU_RELEASES = (
'impish', 'impish',
'jammy', 'jammy',
'kinetic', 'kinetic',
'lunar',
) )

View File

@ -171,8 +171,9 @@ class Storage(object):
path parameter which causes sqlite3 to only build the db in memory. path parameter which causes sqlite3 to only build the db in memory.
This should only be used for testing purposes. This should only be used for testing purposes.
""" """
def __init__(self, path=None): def __init__(self, path=None, keep_revisions=False):
self.db_path = path self.db_path = path
self.keep_revisions = keep_revisions
if path is None: if path is None:
if 'UNIT_STATE_DB' in os.environ: if 'UNIT_STATE_DB' in os.environ:
self.db_path = os.environ['UNIT_STATE_DB'] self.db_path = os.environ['UNIT_STATE_DB']
@ -242,7 +243,7 @@ class Storage(object):
Remove a key from the database entirely. Remove a key from the database entirely.
""" """
self.cursor.execute('delete from kv where key=?', [key]) self.cursor.execute('delete from kv where key=?', [key])
if self.revision and self.cursor.rowcount: if self.keep_revisions and self.revision and self.cursor.rowcount:
self.cursor.execute( self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)', 'insert into kv_revisions values (?, ?, ?)',
[key, self.revision, json.dumps('DELETED')]) [key, self.revision, json.dumps('DELETED')])
@ -259,14 +260,14 @@ class Storage(object):
if keys is not None: if keys is not None:
keys = ['%s%s' % (prefix, key) for key in keys] keys = ['%s%s' % (prefix, key) for key in keys]
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
if self.revision and self.cursor.rowcount: if self.keep_revisions and self.revision and self.cursor.rowcount:
self.cursor.execute( self.cursor.execute(
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
else: else:
self.cursor.execute('delete from kv where key like ?', self.cursor.execute('delete from kv where key like ?',
['%s%%' % prefix]) ['%s%%' % prefix])
if self.revision and self.cursor.rowcount: if self.keep_revisions and self.revision and self.cursor.rowcount:
self.cursor.execute( self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)', 'insert into kv_revisions values (?, ?, ?)',
['%s%%' % prefix, self.revision, json.dumps('DELETED')]) ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
@ -299,7 +300,7 @@ class Storage(object):
where key = ?''', [serialized, key]) where key = ?''', [serialized, key])
# Save # Save
if not self.revision: if (not self.keep_revisions) or (not self.revision):
return value return value
self.cursor.execute( self.cursor.execute(

View File

@ -230,6 +230,18 @@ CLOUD_ARCHIVE_POCKETS = {
'zed/proposed': 'jammy-proposed/zed', 'zed/proposed': 'jammy-proposed/zed',
'jammy-zed/proposed': 'jammy-proposed/zed', 'jammy-zed/proposed': 'jammy-proposed/zed',
'jammy-proposed/zed': 'jammy-proposed/zed', 'jammy-proposed/zed': 'jammy-proposed/zed',
# antelope
'antelope': 'jammy-updates/antelope',
'jammy-antelope': 'jammy-updates/antelope',
'jammy-antelope/updates': 'jammy-updates/antelope',
'jammy-updates/antelope': 'jammy-updates/antelope',
'antelope/proposed': 'jammy-proposed/antelope',
'jammy-antelope/proposed': 'jammy-proposed/antelope',
'jammy-proposed/antelope': 'jammy-proposed/antelope',
# OVN
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03',
} }
@ -257,6 +269,7 @@ OPENSTACK_RELEASES = (
'xena', 'xena',
'yoga', 'yoga',
'zed', 'zed',
'antelope',
) )
@ -284,6 +297,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('impish', 'xena'), ('impish', 'xena'),
('jammy', 'yoga'), ('jammy', 'yoga'),
('kinetic', 'zed'), ('kinetic', 'zed'),
('lunar', 'antelope'),
]) ])
@ -363,6 +377,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
:type quiet: bool :type quiet: bool
:raises: subprocess.CalledProcessError :raises: subprocess.CalledProcessError
""" """
if not packages:
log("Nothing to install", level=DEBUG)
return
if options is None: if options is None:
options = ['--option=Dpkg::Options::=--force-confold'] options = ['--option=Dpkg::Options::=--force-confold']
@ -687,6 +704,7 @@ def add_source(source, key=None, fail_invalid=False):
(r"^cloud-archive:(.*)$", _add_apt_repository), (r"^cloud-archive:(.*)$", _add_apt_repository),
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
(r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket), (r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
@ -750,6 +768,11 @@ def _add_apt_repository(spec):
) )
def __write_sources_list_d_actual_pocket(file, actual_pocket):
with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
def _add_cloud_pocket(pocket): def _add_cloud_pocket(pocket):
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
@ -769,8 +792,9 @@ def _add_cloud_pocket(pocket):
'Unsupported cloud: source option %s' % 'Unsupported cloud: source option %s' %
pocket) pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: __write_sources_list_d_actual_pocket(
apt.write(CLOUD_ARCHIVE.format(actual_pocket)) 'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'),
actual_pocket)
def _add_cloud_staging(cloud_archive_release, openstack_release): def _add_cloud_staging(cloud_archive_release, openstack_release):
@ -931,10 +955,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
try: try:
result = subprocess.check_call(cmd, env=env, **kwargs) result = subprocess.check_call(cmd, env=env, **kwargs)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > max_retries:
raise
result = e.returncode result = e.returncode
if result not in retry_results:
# a non-retriable exitcode was produced
raise
retry_count += 1
if retry_count > max_retries:
# a retriable exitcode was produced more than {max_retries} times
raise
log(retry_message) log(retry_message)
time.sleep(CMD_RETRY_DELAY) time.sleep(CMD_RETRY_DELAY)

View File

@ -19,6 +19,7 @@ tags:
series: series:
- jammy - jammy
- kinetic - kinetic
- lunar
extra-bindings: extra-bindings:
data: data:
provides: provides:

View File

@ -1,7 +1,7 @@
- project: - project:
templates: templates:
- charm-unit-jobs-py310 - charm-unit-jobs-py310
- charm-zed-functional-jobs - charm-functional-jobs
vars: vars:
needs_charm_build: true needs_charm_build: true
charm_build_name: neutron-openvswitch charm_build_name: neutron-openvswitch

View File

@ -26,3 +26,4 @@ git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.open
git+https://opendev.org/openstack/tempest.git#egg=tempest git+https://opendev.org/openstack/tempest.git#egg=tempest
croniter # needed for charm-rabbitmq-server unit tests croniter # needed for charm-rabbitmq-server unit tests
psutil

View File

@ -0,0 +1,222 @@
variables:
openstack-origin: &openstack-origin cloud:jammy-antelope
series: &series jammy
machines:
0:
constraints: "mem=3072M"
1:
constraints: "mem=3072M"
2:
constraints: "mem=3072M"
3: {}
4: {}
5: {}
6: {}
7: {}
8:
constraints: "root-disk=20G mem=4G"
9:
constraints: "root-disk=20G mem=4G"
10: {}
11: {}
12: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications:
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
nova-mysql-router:
charm: ch:mysql-router
channel: latest/edge
glance-mysql-router:
charm: ch:mysql-router
channel: latest/edge
neutron-mysql-router:
charm: ch:mysql-router
channel: latest/edge
placement-mysql-router:
charm: ch:mysql-router
channel: latest/edge
vault-mysql-router:
charm: ch:mysql-router
channel: latest/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
channel: latest/edge
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
to:
- '3'
channel: latest/edge
neutron-api:
charm: ch:neutron-api
series: *series
num_units: 1
options:
manage-neutron-plugin-legacy-mode: true
overlay-network-type: 'vxlan'
l2-population: True
flat-network-providers: physnet1
neutron-security-groups: true
openstack-origin: *openstack-origin
enable-dvr: True
enable-qos: True
to:
- '4'
channel: latest/edge
keystone:
charm: ch:keystone
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '5'
channel: latest/edge
glance:
charm: ch:glance
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '6'
channel: latest/edge
neutron-openvswitch:
charm: ../../neutron-openvswitch.charm
options:
use-dvr-snat: True
bridge-mappings: physnet1:br-ex
enable-local-dhcp-and-metadata: True
nova-cloud-controller:
charm: ch:nova-cloud-controller
num_units: 1
options:
network-manager: Neutron
openstack-origin: *openstack-origin
to:
- '7'
channel: latest/edge
nova-compute:
charm: ch:nova-compute
num_units: 2
options:
config-flags: default_ephemeral_format=ext4
enable-live-migration: true
enable-resize: true
migration-auth-type: ssh
openstack-origin: *openstack-origin
to:
- '8'
- '9'
channel: latest/edge
placement:
charm: ch:placement
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '10'
channel: latest/edge
vault-mysql-router:
charm: ch:mysql-router
channel: latest/edge
vault:
charm: ch:vault
num_units: 1
to:
- '11'
channel: latest/edge
ovn-central:
charm: ch:ovn-central
num_units: 3
options:
source: *openstack-origin
to:
- '12'
channel: latest/edge
neutron-api-plugin-ovn:
charm: ch:neutron-api-plugin-ovn
channel: latest/edge
ovn-chassis:
charm: ch:ovn-chassis
options:
# start new units paused to allow unit by unit OVS to OVN migration
new-units-paused: true
channel: latest/edge
relations:
- - 'neutron-api:amqp'
- 'rabbitmq-server:amqp'
- - 'neutron-api:neutron-api'
- 'nova-cloud-controller:neutron-api'
- - 'neutron-api:identity-service'
- 'keystone:identity-service'
- - 'nova-compute:neutron-plugin'
- 'neutron-openvswitch:neutron-plugin'
- - 'neutron-api:neutron-plugin-api'
- 'neutron-openvswitch:neutron-plugin-api'
- - 'nova-cloud-controller:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'neutron-openvswitch:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:identity-service'
- 'keystone:identity-service'
- - 'nova-cloud-controller:cloud-compute'
- 'nova-compute:cloud-compute'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-compute:image-service'
- 'glance:image-service'
- - 'nova-cloud-controller:image-service'
- 'glance:image-service'
- - 'placement:identity-service'
- 'keystone:identity-service'
- - 'placement:placement'
- 'nova-cloud-controller:placement'
- ["keystone:shared-db", "keystone-mysql-router:shared-db"]
- ["glance:shared-db", "glance-mysql-router:shared-db"]
- ["nova-cloud-controller:shared-db", "nova-mysql-router:shared-db"]
- ["neutron-api:shared-db", "neutron-mysql-router:shared-db"]
- ["placement:shared-db", "placement-mysql-router:shared-db"]
- ["keystone-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["nova-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["glance-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["neutron-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["placement-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
# We need to defer the addition of the neutron-api-plugin-ovn subordinate
# relation to the functional test as the test will first validate the legacy
# Neutron ML2+OVS topology, migrate it to OVN and then confirm connectivity
# post migration.
#
# - - neutron-api-plugin-ovn:neutron-plugin
# - neutron-api:neutron-plugin-api-subordinate
- - ovn-central:certificates
- vault:certificates
- - ovn-central:ovsdb-cms
- neutron-api-plugin-ovn:ovsdb-cms
- - ovn-chassis:nova-compute
- nova-compute:neutron-plugin
- - ovn-chassis:certificates
- vault:certificates
- - ovn-chassis:ovsdb
- ovn-central:ovsdb
- - vault:certificates
- neutron-api-plugin-ovn:certificates
- - vault:shared-db
- vault-mysql-router:shared-db
- - vault-mysql-router:db-router
- mysql-innodb-cluster:db-router

View File

@ -1,7 +1,7 @@
variables: variables:
openstack-origin: &openstack-origin distro openstack-origin: &openstack-origin distro
series: &series jammy series: &series lunar
machines: machines:
0: 0:

View File

@ -6,15 +6,15 @@ charm_name: neutron-openvswitch
# OVS to OVN. # OVS to OVN.
smoke_bundles: smoke_bundles:
- jammy-yoga - jammy-zed
gate_bundles: gate_bundles:
- jammy-yoga - jammy-zed
dev_bundles: dev_bundles:
- jammy-yoga - jammy-antelope
- jammy-zed
- kinetic-zed - kinetic-zed
- lunar-antelope
configure: configure:
- zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation - zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation
@ -58,3 +58,4 @@ tests_options:
zaza.openstack.charm_tests.neutron.tests.NeutronNetworkingTest.test_instances_have_networking.run_resource_cleanup: false zaza.openstack.charm_tests.neutron.tests.NeutronNetworkingTest.test_instances_have_networking.run_resource_cleanup: false
force_deploy: force_deploy:
- kinetic-zed-dvr-snat - kinetic-zed-dvr-snat
- lunar-antelope-dvr-snat

View File

@ -25,7 +25,7 @@ setenv = VIRTUAL_ENV={envdir}
commands = stestr run --slowest {posargs} commands = stestr run --slowest {posargs}
allowlist_externals = allowlist_externals =
charmcraft charmcraft
rename.sh {toxinidir}/rename.sh
passenv = passenv =
HOME HOME
TERM TERM