From a6de5f8d77be1743154c3dfd02b23ab44585c7ee Mon Sep 17 00:00:00 2001
From: Corey Bryant <corey.bryant@canonical.com>
Date: Tue, 14 Feb 2023 21:26:01 +0000
Subject: [PATCH] Add Antelope support

* sync charm-helpers to classic charms
* change openstack-origin/source default to antelope
* align testing with antelope
* add new antelope bundles
* add antelope bundles to tests.yaml
* add antelope tests to osci.yaml and .zuul.yaml
* update build-on and run-on bases

Change-Id: Id93b0ee0999c7a23ef34e68646b03d910816c655
---
 .zuul.yaml                                    |   2 +-
 charmcraft.yaml                               |   3 +
 config.yaml                                   |   2 +-
 .../charmhelpers/contrib/charmsupport/nrpe.py |  16 +
 .../charmhelpers/contrib/hahelpers/cluster.py |   2 +-
 hooks/charmhelpers/contrib/network/ip.py      |   2 +-
 .../contrib/network/ovs/__init__.py           |   2 +-
 hooks/charmhelpers/contrib/network/ovs/ovn.py |  13 +
 .../charmhelpers/contrib/network/ovs/ovsdb.py |  60 +++-
 .../charmhelpers/contrib/openstack/context.py | 149 +++++++--
 .../contrib/openstack/ha/utils.py             |  29 ++
 hooks/charmhelpers/contrib/openstack/ip.py    |  25 ++
 .../contrib/openstack/ssh_migrations.py       |   4 +-
 .../contrib/openstack/templates/haproxy.cfg   |   5 +
 .../templates/section-keystone-authtoken      |   2 +
 .../openstack/templates/section-service-user  |  11 +
 hooks/charmhelpers/contrib/openstack/utils.py |   7 +-
 .../contrib/openstack/vaultlocker.py          |   7 +-
 .../contrib/storage/linux/utils.py            |  21 +-
 hooks/charmhelpers/core/host.py               |   2 +-
 .../charmhelpers/core/host_factory/ubuntu.py  |   1 +
 hooks/charmhelpers/core/unitdata.py           |  11 +-
 hooks/charmhelpers/fetch/ubuntu.py            |  38 ++-
 metadata.yaml                                 |   1 +
 osci.yaml                                     |   2 +-
 test-requirements.txt                         |   1 +
 tests/bundles/jammy-antelope.yaml             | 287 ++++++++++++++++++
 .../{jammy-yoga.yaml => lunar-antelope.yaml}  |   2 +-
 tests/tests.yaml                              |   9 +-
 tox.ini                                       |   2 +-
 30 files changed, 641 insertions(+), 77 deletions(-)
 create mode 100644 hooks/charmhelpers/contrib/openstack/templates/section-service-user
 create mode 100644 tests/bundles/jammy-antelope.yaml
 rename tests/bundles/{jammy-yoga.yaml => lunar-antelope.yaml} (99%)

diff --git a/.zuul.yaml b/.zuul.yaml
index 23bf5f62..fd20909e 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,4 +1,4 @@
 - project:
     templates:
-      - openstack-python3-charm-zed-jobs
+      - openstack-python3-charm-jobs
       - openstack-cover-jobs
diff --git a/charmcraft.yaml b/charmcraft.yaml
index 102ded4c..f6121727 100644
--- a/charmcraft.yaml
+++ b/charmcraft.yaml
@@ -33,3 +33,6 @@ bases:
       - name: ubuntu
         channel: "22.10"
         architectures: [amd64, s390x, ppc64el, arm64]
+      - name: ubuntu
+        channel: "23.04"
+        architectures: [amd64, s390x, ppc64el, arm64]
diff --git a/config.yaml b/config.yaml
index 3f3418e2..8d2c7574 100644
--- a/config.yaml
+++ b/config.yaml
@@ -14,7 +14,7 @@ options:
       Setting this to True will allow supporting services to log to syslog.
   openstack-origin:
     type: string
-    default: zed
+    default: antelope
     description: |
       Repository from which to install. May be one of the following:
       distro (default), ppa:somecustom/ppa, a deb url sources entry,
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
index bad7a533..ac002bc6 100644
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -19,6 +19,7 @@
 
 import glob
 import grp
+import json
 import os
 import pwd
 import re
@@ -30,6 +31,7 @@ import yaml
 from charmhelpers.core.hookenv import (
     application_name,
     config,
+    ERROR,
     hook_name,
     local_unit,
     log,
@@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
     :param str unit_name: Unit name to use in check description
     :param bool immediate_check: For sysv init, run the service check immediately
     """
+    # check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
+    # just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
+    for rid in relation_ids("ha"):
+        ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
+        if ha_resources:
+            try:
+                ha_resources_parsed = json.loads(ha_resources)
+            except ValueError as e:
+                log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
+                raise
+            if "lsb:haproxy" in ha_resources_parsed.values():
+                if "haproxy" in services:
+                    log("removed check_haproxy. This service will be monitored by check_crm")
+                    services.remove("haproxy")
     for svc in services:
         # Don't add a check for these services from neutron-gateway
         if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py
index 146beba6..ffda5fe1 100644
--- a/hooks/charmhelpers/contrib/hahelpers/cluster.py
+++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py
@@ -324,7 +324,7 @@ def valid_hacluster_config():
     '''
     vip = config_get('vip')
     dns = config_get('dns-ha')
-    if not(bool(vip) ^ bool(dns)):
+    if not (bool(vip) ^ bool(dns)):
         msg = ('HA: Either vip or dns-ha must be set but not both in order to '
                'use high availability')
         status_set('blocked', msg)
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
index f8edf37a..cf9926b9 100644
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ b/hooks/charmhelpers/contrib/network/ip.py
@@ -539,7 +539,7 @@ def port_has_listener(address, port):
     """
     cmd = ['nc', '-z', address, str(port)]
     result = subprocess.call(cmd)
-    return not(bool(result))
+    return not (bool(result))
 
 
 def assert_charm_supports_ipv6():
diff --git a/hooks/charmhelpers/contrib/network/ovs/__init__.py b/hooks/charmhelpers/contrib/network/ovs/__init__.py
index 3f5af878..e0e9de0d 100644
--- a/hooks/charmhelpers/contrib/network/ovs/__init__.py
+++ b/hooks/charmhelpers/contrib/network/ovs/__init__.py
@@ -648,7 +648,7 @@ def patch_ports_on_bridge(bridge):
                     uuid_for_port(
                         interface['options']['peer'])),
                     interface['options']['peer'])
-                yield(Patch(this_end, other_end))
+                yield Patch(this_end, other_end)
             # We expect one result and it is ok if it turns out to be a port
             # for a different bridge. However we need a break here to satisfy
             # the for/else check which is in place to detect interface referring
diff --git a/hooks/charmhelpers/contrib/network/ovs/ovn.py b/hooks/charmhelpers/contrib/network/ovs/ovn.py
index 2075f11a..83170277 100644
--- a/hooks/charmhelpers/contrib/network/ovs/ovn.py
+++ b/hooks/charmhelpers/contrib/network/ovs/ovn.py
@@ -139,6 +139,19 @@ class OVNClusterStatus(object):
         """
         return self.leader == 'self'
 
+    def to_yaml(self):
+        """Return yaml-serializable dict representation of this object.
+
+        :returns: dictionary suitable for serialization by yaml.safe_dump()
+        :rtype: Dict[str, Any]
+        """
+        yaml_dict = self.__dict__
+        # Convert types that are not natively convertable to yaml
+        yaml_dict["cluster_id"] = str(self.cluster_id)
+        yaml_dict["server_id"] = str(self.server_id)
+
+        return yaml_dict
+
 
 def cluster_status(target, schema=None, use_ovs_appctl=False, rundir=None):
     """Retrieve status information from clustered OVSDB.
diff --git a/hooks/charmhelpers/contrib/network/ovs/ovsdb.py b/hooks/charmhelpers/contrib/network/ovs/ovsdb.py
index 975f3a7d..04546c10 100644
--- a/hooks/charmhelpers/contrib/network/ovs/ovsdb.py
+++ b/hooks/charmhelpers/contrib/network/ovs/ovsdb.py
@@ -205,7 +205,7 @@ class SimpleOVSDB(object):
                         decoded_set = []
                         for el in data[1]:
                             decoded_set.append(self._deserialize_ovsdb(el))
-                        return(decoded_set)
+                        return decoded_set
                     # fall back to normal processing below
                     break
 
@@ -213,20 +213,14 @@ class SimpleOVSDB(object):
             f = ovs_type_cb_map.get(data[0], str)
             return f(data[1])
 
-        def _find_tbl(self, condition=None):
-            """Run and parse output of OVSDB `find` command.
+        def _cmd_deserialize_data_generator(self, cmd):
+            """Run command and provide generator with deserialized data.
 
-            :param condition: An optional RFC 7047 5.1 match condition
-            :type condition: Optional[str]
-            :returns: Dictionary with data
-            :rtype: Dict[str, any]
+            :param cmd: Command and arguments to run.
+            :type cmd: Iterable[str]
+            :returns: Deserialzed data.
+            :rtype: Generator[Dict[str,any], None, None]
             """
-            cmd = [self._tool]
-            if self._args:
-                cmd.extend(self._args)
-            cmd.extend(['-f', 'json', 'find', self._table])
-            if condition:
-                cmd.append(condition)
             output = utils._run(*cmd)
             data = json.loads(output)
             for row in data['data']:
@@ -238,9 +232,49 @@ class SimpleOVSDB(object):
                         values.append(col)
                 yield dict(zip(data['headings'], values))
 
+        def _get_command(self):
+            """Get base command.
+
+            :rtype: List[str]
+            """
+            cmd = [self._tool]
+            if self._args:
+                cmd.extend(self._args)
+            cmd.extend(['-f', 'json'])
+            return cmd
+
+        def _find_tbl(self, condition=None):
+            """Run and parse output of OVSDB `find` command.
+
+            :param condition: An optional RFC 7047 5.1 match condition
+            :type condition: Optional[str]
+            :returns: Dictionary with data
+            :rtype: Generator[Dict[str, any], None, None]
+            """
+            cmd = self._get_command()
+            cmd.extend(['find', self._table])
+            if condition:
+                cmd.append(condition)
+            return self._cmd_deserialize_data_generator(cmd)
+
+        def _list_tbl_record(self, record):
+            """Run and parse output of OVSDB `list` command for record.
+
+            :param record: The UUID of the record to list data for.
+            :type record: uuid.UUID
+            :returns: Dictionary with data
+            :rtype: Dict[str, any]
+            """
+            cmd = self._get_command()
+            cmd.extend(['list', self._table, str(record)])
+            return next(self._cmd_deserialize_data_generator(cmd))
+
         def __iter__(self):
             return self._find_tbl()
 
+        def __getitem__(self, key):
+            return self._list_tbl_record(key)
+
         def clear(self, rec, col):
             utils._run(self._tool, 'clear', self._table, rec, col)
 
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index 970a657b..d894b6a6 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -25,6 +25,7 @@ import socket
 import time
 
 from base64 import b64decode
+from distutils.version import LooseVersion
 from subprocess import (
     check_call,
     check_output,
@@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
 from charmhelpers.fetch import (
     apt_install,
     filter_installed_packages,
+    get_installed_version,
 )
 from charmhelpers.core.hookenv import (
     NoNetworkBinding,
@@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import (
     network_get_primary_address,
     WARNING,
     service_name,
+    remote_service_name,
 )
 
 from charmhelpers.core.sysctl import create as sysctl_create
@@ -130,6 +133,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
 ADDRESS_TYPES = ['admin', 'internal', 'public']
 HAPROXY_RUN_DIR = '/var/run/haproxy/'
 DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
+DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404
 
 
 def ensure_packages(packages):
@@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir):
 
 class IdentityServiceContext(OSContextGenerator):
 
+    _forward_compat_remaps = {
+        'admin_user': 'admin-user-name',
+        'service_username': 'service-user-name',
+        'service_tenant': 'service-project-name',
+        'service_tenant_id': 'service-project-id',
+        'service_domain': 'service-domain-name',
+    }
+
     def __init__(self,
                  service=None,
                  service_user=None,
@@ -397,11 +409,16 @@ class IdentityServiceContext(OSContextGenerator):
         # 'www_authenticate_uri' replaced 'auth_uri' since Stein,
         # see keystonemiddleware upstream sources for more info
         if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
-            c.update((
-                ('www_authenticate_uri', "{}://{}:{}/v3".format(
-                    ctxt.get('service_protocol', ''),
-                    ctxt.get('service_host', ''),
-                    ctxt.get('service_port', ''))),))
+            if 'public_auth_url' in ctxt:
+                c.update((
+                    ('www_authenticate_uri', '{}/v3'.format(
+                        ctxt.get('public_auth_url'))),))
+            else:
+                c.update((
+                    ('www_authenticate_uri', "{}://{}:{}/v3".format(
+                        ctxt.get('service_protocol', ''),
+                        ctxt.get('service_host', ''),
+                        ctxt.get('service_port', ''))),))
         else:
             c.update((
                 ('auth_uri', "{}://{}:{}/v3".format(
@@ -409,11 +426,17 @@ class IdentityServiceContext(OSContextGenerator):
                     ctxt.get('service_host', ''),
                     ctxt.get('service_port', ''))),))
 
+        if 'internal_auth_url' in ctxt:
+            c.update((
+                ('auth_url', ctxt.get('internal_auth_url')),))
+        else:
+            c.update((
+                ('auth_url', "{}://{}:{}/v3".format(
+                    ctxt.get('auth_protocol', ''),
+                    ctxt.get('auth_host', ''),
+                    ctxt.get('auth_port', ''))),))
+
         c.update((
-            ('auth_url', "{}://{}:{}/v3".format(
-                ctxt.get('auth_protocol', ''),
-                ctxt.get('auth_host', ''),
-                ctxt.get('auth_port', ''))),
             ('project_domain_name', ctxt.get('admin_domain_name', '')),
             ('user_domain_name', ctxt.get('admin_domain_name', '')),
             ('project_name', ctxt.get('admin_tenant_name', '')),
@@ -441,39 +464,86 @@ class IdentityServiceContext(OSContextGenerator):
         for rid in relation_ids(self.rel_name):
             self.related = True
             for unit in related_units(rid):
+                rdata = {}
+                # NOTE(jamespage):
+                # forwards compat with application data
+                # bag driven approach to relation.
+                _adata = relation_get(rid=rid, app=remote_service_name(rid))
+                adata = {}
+                # if no app data bag presented - fallback
+                # to legacy unit based relation data
                 rdata = relation_get(rid=rid, unit=unit)
-                serv_host = rdata.get('service_host')
+                if _adata:
+                    # New app data bag uses - instead of _
+                    # in key names - remap for compat with
+                    # existing relation data keys
+                    for key, value in _adata.items():
+                        if key == 'api-version':
+                            adata[key.replace('-', '_')] = value.strip('v')
+                        else:
+                            adata[key.replace('-', '_')] = value
+                    # Re-map some keys for backwards compatibility
+                    for target, source in self._forward_compat_remaps.items():
+                        adata[target] = _adata.get(source)
+                # Now preferentially get data from the app data bag, but if
+                # it's not available, get it from the legacy based relation
+                # data.
+
+                def _resolve(key):
+                    return adata.get(key) or rdata.get(key)
+
+                serv_host = _resolve('service_host')
                 serv_host = format_ipv6_addr(serv_host) or serv_host
-                auth_host = rdata.get('auth_host')
+                auth_host = _resolve('auth_host')
                 auth_host = format_ipv6_addr(auth_host) or auth_host
-                int_host = rdata.get('internal_host')
+                int_host = _resolve('internal_host',)
                 int_host = format_ipv6_addr(int_host) or int_host
-                svc_protocol = rdata.get('service_protocol') or 'http'
-                auth_protocol = rdata.get('auth_protocol') or 'http'
-                int_protocol = rdata.get('internal_protocol') or 'http'
-                api_version = rdata.get('api_version') or '2.0'
-                ctxt.update({'service_port': rdata.get('service_port'),
+                svc_protocol = _resolve('service_protocol') or 'http'
+                auth_protocol = _resolve('auth_protocol') or 'http'
+                admin_role = _resolve('admin_role') or 'Admin'
+                int_protocol = _resolve('internal_protocol') or 'http'
+                api_version = _resolve('api_version') or '2.0'
+                ctxt.update({'service_port': _resolve('service_port'),
                              'service_host': serv_host,
                              'auth_host': auth_host,
-                             'auth_port': rdata.get('auth_port'),
+                             'auth_port': _resolve('auth_port'),
                              'internal_host': int_host,
-                             'internal_port': rdata.get('internal_port'),
-                             'admin_tenant_name': rdata.get('service_tenant'),
-                             'admin_user': rdata.get('service_username'),
-                             'admin_password': rdata.get('service_password'),
+                             'internal_port': _resolve('internal_port'),
+                             'admin_tenant_name': _resolve('service_tenant'),
+                             'admin_user': _resolve('service_username'),
+                             'admin_password': _resolve('service_password'),
+                             'admin_role': admin_role,
                              'service_protocol': svc_protocol,
                              'auth_protocol': auth_protocol,
                              'internal_protocol': int_protocol,
                              'api_version': api_version})
 
-                if rdata.get('service_type'):
-                    ctxt['service_type'] = rdata.get('service_type')
+                service_type = _resolve('service_type')
+                if service_type:
+                    ctxt['service_type'] = service_type
 
                 if float(api_version) > 2:
                     ctxt.update({
-                        'admin_domain_name': rdata.get('service_domain'),
-                        'service_project_id': rdata.get('service_tenant_id'),
-                        'service_domain_id': rdata.get('service_domain_id')})
+                        'admin_domain_name': _resolve('service_domain'),
+                        'service_project_id': _resolve('service_tenant_id'),
+                        'service_domain_id': _resolve('service_domain_id')})
+
+                # NOTE:
+                # keystone-k8s operator presents full URLS
+                # for all three endpoints - public and internal are
+                # externally addressable for machine based charm
+                public_auth_url = _resolve('public_auth_url')
+                # if 'public_auth_url' in rdata:
+                if public_auth_url:
+                    ctxt.update({
+                        'public_auth_url': public_auth_url,
+                    })
+                internal_auth_url = _resolve('internal_auth_url')
+                # if 'internal_auth_url' in rdata:
+                if internal_auth_url:
+                    ctxt.update({
+                        'internal_auth_url': internal_auth_url,
+                    })
 
                 # we keep all veriables in ctxt for compatibility and
                 # add nested dictionary for keystone_authtoken generic
@@ -487,8 +557,8 @@ class IdentityServiceContext(OSContextGenerator):
                     # NOTE(jamespage) this is required for >= icehouse
                     # so a missing value just indicates keystone needs
                     # upgrading
-                    ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
-                    ctxt['admin_domain_id'] = rdata.get('service_domain_id')
+                    ctxt['admin_tenant_id'] = _resolve('service_tenant_id')
+                    ctxt['admin_domain_id'] = _resolve('service_domain_id')
                     return ctxt
 
         return {}
@@ -860,9 +930,14 @@ class HAProxyContext(OSContextGenerator):
     interfaces = ['cluster']
 
     def __init__(self, singlenode_mode=False,
-                 address_types=ADDRESS_TYPES):
+                 address_types=None,
+                 exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT):
+        if address_types is None:
+            address_types = ADDRESS_TYPES[:]
+
         self.address_types = address_types
         self.singlenode_mode = singlenode_mode
+        self.exporter_stats_port = exporter_stats_port
 
     def __call__(self):
         if not os.path.isdir(HAPROXY_RUN_DIR):
@@ -957,10 +1032,20 @@ class HAProxyContext(OSContextGenerator):
         db = kv()
         ctxt['stat_password'] = db.get('stat-password')
         if not ctxt['stat_password']:
-            ctxt['stat_password'] = db.set('stat-password',
-                                           pwgen(32))
+            ctxt['stat_password'] = db.set('stat-password', pwgen(32))
             db.flush()
 
+        # NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0
+        #                 New bind will be created and a prometheus-exporter
+        #                 will be used for path /metrics. At the same time,
+        #                 prometheus-exporter avoids using auth.
+        haproxy_version = get_installed_version("haproxy")
+        if (haproxy_version and
+                haproxy_version.ver_str >= LooseVersion("2.0.0") and
+                is_relation_made("haproxy-exporter")):
+            ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter")
+            ctxt["stats_exporter_port"] = self.exporter_stats_port
+
         for frontend in cluster_hosts:
             if (len(cluster_hosts[frontend]['backends']) > 1 or
                     self.singlenode_mode):
diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py
index a5cbdf53..b4912c42 100644
--- a/hooks/charmhelpers/contrib/openstack/ha/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/ha/utils.py
@@ -25,6 +25,7 @@ Helpers for high availability.
 
 import hashlib
 import json
+import os
 
 import re
 
@@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
     config,
     status_set,
     DEBUG,
+    application_name,
 )
 
 from charmhelpers.core.host import (
@@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
 
 VIP_GROUP_NAME = 'grp_{service}_vips'
 DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
+HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
 
 
 class DNSHAException(Exception):
@@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
             relation_data['groups'] = {
                 key: ' '.join(vip_group)
             }
+
+
+def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
+    """Load grafana dashboard json model and insert prometheus datasource.
+
+    :param prometheus_app_name: name of the 'prometheus' application that will
+                                be used as datasource in grafana dashboard
+    :type prometheus_app_name: str
+    :param haproxy_dashboard: path to haproxy dashboard
+    :type haproxy_dashboard: str
+    :return: Grafana dashboard json model as a str.
+    :rtype: str
+    """
+    from charmhelpers.contrib.templating import jinja
+
+    dashboard_template = os.path.basename(haproxy_dashboard)
+    dashboard_template_dir = os.path.dirname(haproxy_dashboard)
+    app_name = application_name()
+    datasource = "{} - Juju generated source".format(prometheus_app_name)
+    return jinja.render(dashboard_template,
+                        {"datasource": datasource,
+                         "app_name": app_name,
+                         "prometheus_app_name": prometheus_app_name},
+                        template_dir=dashboard_template_dir,
+                        jinja_env_args={"variable_start_string": "<< ",
+                                        "variable_end_string": " >>"})
diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py
index b8c94c56..2afad369 100644
--- a/hooks/charmhelpers/contrib/openstack/ip.py
+++ b/hooks/charmhelpers/contrib/openstack/ip.py
@@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
     is_ipv6,
     get_ipv6_addr,
     resolve_network_cidr,
+    get_iface_for_address
 )
 from charmhelpers.contrib.hahelpers.cluster import is_clustered
 
@@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
         return unit_get(unit_get_fallback)
 
 
+def get_invalid_vips():
+    """Check if any of the provided vips are invalid.
+    A vip is invalid if it doesn't belong to the subnet in any interface.
+    If all vips are valid, this returns an empty list.
+
+    :returns: A list of strings, where each string is an invalid vip address.
+    :rtype: list
+    """
+
+    clustered = is_clustered()
+    vips = config('vip')
+    if vips:
+        vips = vips.split()
+    invalid_vips = []
+
+    if clustered and vips:
+        for vip in vips:
+            iface_for_vip = get_iface_for_address(vip)
+            if iface_for_vip is None:
+                invalid_vips.append(vip)
+
+    return invalid_vips
+
+
 def resolve_address(endpoint_type=PUBLIC, override=True):
     """Return unit address depending on net config.
 
diff --git a/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/hooks/charmhelpers/contrib/openstack/ssh_migrations.py
index 96b9f71d..0512e3a5 100644
--- a/hooks/charmhelpers/contrib/openstack/ssh_migrations.py
+++ b/hooks/charmhelpers/contrib/openstack/ssh_migrations.py
@@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None):
         for hosts_line in hosts:
             if hosts_line.rstrip():
                 known_hosts_list.append(hosts_line.rstrip())
-    return(known_hosts_list)
+    return known_hosts_list
 
 
 def ssh_authorized_keys_lines(application_name, user=None):
@@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None):
         for authkey_line in keys:
             if authkey_line.rstrip():
                 authorized_keys_list.append(authkey_line.rstrip())
-    return(authorized_keys_list)
+    return authorized_keys_list
 
 
 def ssh_compute_remove(public_key, application_name, user=None):
diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
index 626ecbab..da2522f6 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -49,6 +49,11 @@ defaults
 
 listen stats
     bind {{ local_host }}:{{ stat_port }}
+{%- if stats_exporter_host and stats_exporter_port %}
+    bind {{ stats_exporter_host }}:{{ stats_exporter_port }}
+    option http-use-htx
+    http-request use-service prometheus-exporter if { path /metrics }
+{%- endif %}
     mode http
     stats enable
     stats hide-version
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
index c9b01528..dbad506f 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
+++ b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
@@ -12,4 +12,6 @@ signing_dir = {{ signing_dir }}
 {% if service_type -%}
 service_type = {{ service_type }}
 {% endif -%}
+service_token_roles = {{ admin_role }}
+service_token_roles_required = True
 {% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-service-user b/hooks/charmhelpers/contrib/openstack/templates/section-service-user
new file mode 100644
index 00000000..c740cc28
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/templates/section-service-user
@@ -0,0 +1,11 @@
+{% if auth_host -%}
+[service_user]
+send_service_user_token = true
+auth_type = password
+auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
+project_domain_id = default
+user_domain_id = default
+project_name = {{ admin_tenant_name }}
+username = {{ admin_user }}
+password = {{ admin_password }}
+{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 1fa2814a..3d52eb16 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -159,6 +159,7 @@ OPENSTACK_CODENAMES = OrderedDict([
     ('2021.2', 'xena'),
     ('2022.1', 'yoga'),
     ('2022.2', 'zed'),
+    ('2023.1', 'antelope'),
 ])
 
 # The ugly duckling - must list releases oldest to newest
@@ -1327,7 +1328,7 @@ def _check_listening_on_services_ports(services, test=False):
     @param test: default=False, if False, test for closed, otherwise open.
     @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
     """
-    test = not(not(test))  # ensure test is True or False
+    test = not (not (test))  # ensure test is True or False
     all_ports = list(itertools.chain(*services.values()))
     ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
     map_ports = OrderedDict()
@@ -1583,7 +1584,7 @@ def is_unit_paused_set():
         with unitdata.HookData()() as t:
             kv = t[0]
             # transform something truth-y into a Boolean.
-            return not(not(kv.get('unit-paused')))
+            return not (not (kv.get('unit-paused')))
     except Exception:
         return False
 
@@ -2181,7 +2182,7 @@ def is_unit_upgrading_set():
         with unitdata.HookData()() as t:
             kv = t[0]
             # transform something truth-y into a Boolean.
-            return not(not(kv.get('unit-upgrading')))
+            return not (not (kv.get('unit-upgrading')))
     except Exception:
         return False
 
diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py
index e5418c39..002bc579 100644
--- a/hooks/charmhelpers/contrib/openstack/vaultlocker.py
+++ b/hooks/charmhelpers/contrib/openstack/vaultlocker.py
@@ -173,7 +173,12 @@ def retrieve_secret_id(url, token):
         # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate
         if not isinstance(client.adapter, hvac.adapters.Request):
             client.adapter = hvac.adapters.Request(base_uri=url, token=token)
-    response = client._post('/v1/sys/wrapping/unwrap')
+    try:
+        # hvac == 1.0.0 has an API to unwrap with the user token
+        response = client.sys.unwrap()
+    except AttributeError:
+        # fallback to hvac < 1.0.0
+        response = client._post('/v1/sys/wrapping/unwrap')
     if response.status_code == 200:
         data = response.json()
         return data['data']['secret_id']
diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py
index a3561760..4d05b121 100644
--- a/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ b/hooks/charmhelpers/contrib/storage/linux/utils.py
@@ -23,6 +23,12 @@ from subprocess import (
     call
 )
 
+from charmhelpers.core.hookenv import (
+    log,
+    WARNING,
+    INFO
+)
+
 
 def _luks_uuid(dev):
     """
@@ -110,7 +116,7 @@ def is_device_mounted(device):
     return bool(re.search(r'MOUNTPOINT=".+"', out))
 
 
-def mkfs_xfs(device, force=False, inode_size=1024):
+def mkfs_xfs(device, force=False, inode_size=None):
     """Format device with XFS filesystem.
 
     By default this should fail if the device already has a filesystem on it.
@@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
     :ptype device: tr
     :param force: Force operation
     :ptype: force: boolean
-    :param inode_size: XFS inode size in bytes
+    :param inode_size: XFS inode size in bytes; if set to 0 or None,
+        the value used will be the XFS system default
     :ptype inode_size: int"""
     cmd = ['mkfs.xfs']
     if force:
         cmd.append("-f")
 
-    cmd += ['-i', "size={}".format(inode_size), device]
+    if inode_size:
+        if inode_size >= 256 and inode_size <= 2048:
+            cmd += ['-i', "size={}".format(inode_size)]
+        else:
+            log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
+    else:
+        log("Using XFS filesystem with system default inode size.", level=INFO)
+
+    cmd += [device]
     check_call(cmd)
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index ef6c8eca..70dde6a5 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -954,7 +954,7 @@ def pwgen(length=None):
     random_generator = random.SystemRandom()
     random_chars = [
         random_generator.choice(alphanumeric_chars) for _ in range(length)]
-    return(''.join(random_chars))
+    return ''.join(random_chars)
 
 
 def is_phy_iface(interface):
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
index cc2d89fe..a279d5be 100644
--- a/hooks/charmhelpers/core/host_factory/ubuntu.py
+++ b/hooks/charmhelpers/core/host_factory/ubuntu.py
@@ -31,6 +31,7 @@ UBUNTU_RELEASES = (
     'impish',
     'jammy',
     'kinetic',
+    'lunar',
 )
 
 
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
index d9b8d0b0..8f4bbc61 100644
--- a/hooks/charmhelpers/core/unitdata.py
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -171,8 +171,9 @@ class Storage(object):
     path parameter which causes sqlite3 to only build the db in memory.
     This should only be used for testing purposes.
     """
-    def __init__(self, path=None):
+    def __init__(self, path=None, keep_revisions=False):
         self.db_path = path
+        self.keep_revisions = keep_revisions
         if path is None:
             if 'UNIT_STATE_DB' in os.environ:
                 self.db_path = os.environ['UNIT_STATE_DB']
@@ -242,7 +243,7 @@ class Storage(object):
         Remove a key from the database entirely.
         """
         self.cursor.execute('delete from kv where key=?', [key])
-        if self.revision and self.cursor.rowcount:
+        if self.keep_revisions and self.revision and self.cursor.rowcount:
             self.cursor.execute(
                 'insert into kv_revisions values (?, ?, ?)',
                 [key, self.revision, json.dumps('DELETED')])
@@ -259,14 +260,14 @@ class Storage(object):
         if keys is not None:
             keys = ['%s%s' % (prefix, key) for key in keys]
             self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
-            if self.revision and self.cursor.rowcount:
+            if self.keep_revisions and self.revision and self.cursor.rowcount:
                 self.cursor.execute(
                     'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
                     list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
         else:
             self.cursor.execute('delete from kv where key like ?',
                                 ['%s%%' % prefix])
-            if self.revision and self.cursor.rowcount:
+            if self.keep_revisions and self.revision and self.cursor.rowcount:
                 self.cursor.execute(
                     'insert into kv_revisions values (?, ?, ?)',
                     ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
@@ -299,7 +300,7 @@ class Storage(object):
             where key = ?''', [serialized, key])
 
         # Save
-        if not self.revision:
+        if (not self.keep_revisions) or (not self.revision):
             return value
 
         self.cursor.execute(
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
index 93b92765..effc884a 100644
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -230,6 +230,18 @@ CLOUD_ARCHIVE_POCKETS = {
     'zed/proposed': 'jammy-proposed/zed',
     'jammy-zed/proposed': 'jammy-proposed/zed',
     'jammy-proposed/zed': 'jammy-proposed/zed',
+    # antelope
+    'antelope': 'jammy-updates/antelope',
+    'jammy-antelope': 'jammy-updates/antelope',
+    'jammy-antelope/updates': 'jammy-updates/antelope',
+    'jammy-updates/antelope': 'jammy-updates/antelope',
+    'antelope/proposed': 'jammy-proposed/antelope',
+    'jammy-antelope/proposed': 'jammy-proposed/antelope',
+    'jammy-proposed/antelope': 'jammy-proposed/antelope',
+
+    # OVN
+    'focal-ovn-22.03': 'focal-updates/ovn-22.03',
+    'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03',
 }
 
 
@@ -257,6 +269,7 @@ OPENSTACK_RELEASES = (
     'xena',
     'yoga',
     'zed',
+    'antelope',
 )
 
 
@@ -284,6 +297,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
     ('impish', 'xena'),
     ('jammy', 'yoga'),
     ('kinetic', 'zed'),
+    ('lunar', 'antelope'),
 ])
 
 
@@ -363,6 +377,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
     :type quiet: bool
     :raises: subprocess.CalledProcessError
     """
+    if not packages:
+        log("Nothing to install", level=DEBUG)
+        return
     if options is None:
         options = ['--option=Dpkg::Options::=--force-confold']
 
@@ -687,6 +704,7 @@ def add_source(source, key=None, fail_invalid=False):
         (r"^cloud-archive:(.*)$", _add_apt_repository),
         (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
         (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
+        (r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check),
         (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
         (r"^cloud:(.*)$", _add_cloud_pocket),
         (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
@@ -750,6 +768,11 @@ def _add_apt_repository(spec):
                       )
 
 
+def __write_sources_list_d_actual_pocket(file, actual_pocket):
+    with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt:
+        apt.write(CLOUD_ARCHIVE.format(actual_pocket))
+
+
 def _add_cloud_pocket(pocket):
     """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
 
@@ -769,8 +792,9 @@ def _add_cloud_pocket(pocket):
             'Unsupported cloud: source option %s' %
             pocket)
     actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
-    with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
-        apt.write(CLOUD_ARCHIVE.format(actual_pocket))
+    __write_sources_list_d_actual_pocket(
+        'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'),
+        actual_pocket)
 
 
 def _add_cloud_staging(cloud_archive_release, openstack_release):
@@ -931,10 +955,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
         try:
             result = subprocess.check_call(cmd, env=env, **kwargs)
         except subprocess.CalledProcessError as e:
-            retry_count = retry_count + 1
-            if retry_count > max_retries:
-                raise
             result = e.returncode
+            if result not in retry_results:
+                # a non-retriable exitcode was produced
+                raise
+            retry_count += 1
+            if retry_count > max_retries:
+                # a retriable exitcode was produced more than {max_retries} times
+                raise
             log(retry_message)
             time.sleep(CMD_RETRY_DELAY)
 
diff --git a/metadata.yaml b/metadata.yaml
index 4cfd6d8c..4d40658d 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -18,6 +18,7 @@ tags:
 series:
 - jammy
 - kinetic
+- lunar
 extra-bindings:
   data:
 provides:
diff --git a/osci.yaml b/osci.yaml
index 76da4721..6148a32b 100644
--- a/osci.yaml
+++ b/osci.yaml
@@ -1,7 +1,7 @@
 - project:
     templates:
       - charm-unit-jobs-py310
-      - charm-zed-functional-jobs
+      - charm-functional-jobs
     vars:
       needs_charm_build: true
       charm_build_name: neutron-gateway
diff --git a/test-requirements.txt b/test-requirements.txt
index 40d87f30..e972406e 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -26,3 +26,4 @@ git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.open
 git+https://opendev.org/openstack/tempest.git#egg=tempest
 
 croniter            # needed for charm-rabbitmq-server unit tests
+psutil
diff --git a/tests/bundles/jammy-antelope.yaml b/tests/bundles/jammy-antelope.yaml
new file mode 100644
index 00000000..7a7075fb
--- /dev/null
+++ b/tests/bundles/jammy-antelope.yaml
@@ -0,0 +1,287 @@
+variables:
+  openstack-origin: &openstack-origin cloud:jammy-antelope
+
+series: &series jammy
+
+machines:
+  0:
+    constraints: "mem=3072M"
+  1:
+    constraints: "mem=3072M"
+  2:
+    constraints: "mem=3072M"
+  3: {}
+  4: {}
+  5: {}
+  6: {}
+  7: {}
+  8:
+    constraints: "mem=4G"
+  9:
+    constraints: "root-disk=20G mem=4G"
+  10:
+    constraints: "root-disk=20G mem=4G"
+  11: {}
+  12: {}
+  13: {}
+
+# We specify machine placements for these to improve iteration
+# time, given that machine "0" comes up way before machine "7"
+applications:
+
+  neutron-api-mysql-router:
+    charm: ch:mysql-router
+    channel: latest/edge
+  keystone-mysql-router:
+    charm: ch:mysql-router
+    channel: latest/edge
+  nova-cloud-controller-mysql-router:
+    charm: ch:mysql-router
+    channel: latest/edge
+  glance-mysql-router:
+    charm: ch:mysql-router
+    channel: latest/edge
+  placement-mysql-router:
+    charm: ch:mysql-router
+    channel: latest/edge
+
+  mysql-innodb-cluster:
+    charm: ch:mysql-innodb-cluster
+    num_units: 3
+    to:
+      - '0'
+      - '1'
+      - '2'
+    channel: latest/edge
+
+  rabbitmq-server:
+    charm: ch:rabbitmq-server
+    num_units: 1
+    options:
+    to:
+      - '3'
+    channel: latest/edge
+
+  neutron-api:
+    charm: ch:neutron-api
+    series: *series
+    num_units: 1
+    options:
+      manage-neutron-plugin-legacy-mode: true
+      flat-network-providers: physnet1
+      neutron-security-groups: true
+      openstack-origin: *openstack-origin
+      enable-qos: true
+    to:
+      - '4'
+    channel: latest/edge
+
+  keystone:
+    charm: ch:keystone
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '5'
+    channel: latest/edge
+
+  glance:
+    charm: ch:glance
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '6'
+    channel: latest/edge
+
+  neutron-openvswitch:
+    charm: ch:neutron-openvswitch
+    channel: latest/edge
+  neutron-gateway:
+    charm: ../../neutron-gateway.charm
+    num_units: 1
+    options:
+      bridge-mappings: physnet1:br-ex
+      openstack-origin: *openstack-origin
+    to:
+      - '7'
+
+  ovn-dedicated-chassis:
+    charm: ch:ovn-dedicated-chassis
+    num_units: 1
+    options:
+      # start new units paused to allow unit by unit OVS to OVN migration
+      new-units-paused: true
+    to:
+      # NOTE: We deliberately colocate the ovn-dedicated-chassis with the
+      # neutron-gateway for migration test purposes.
+      - '7'
+    channel: latest/edge
+
+  nova-cloud-controller:
+    charm: ch:nova-cloud-controller
+    num_units: 1
+    options:
+      network-manager: Neutron
+      openstack-origin: *openstack-origin
+    to:
+      - '8'
+    channel: latest/edge
+
+  nova-compute:
+    charm: ch:nova-compute
+    num_units: 2
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '9'
+      - '10'
+    channel: latest/edge
+
+  placement:
+    charm: ch:placement
+    num_units: 1
+    options:
+      openstack-origin: *openstack-origin
+    to:
+      - '11'
+    channel: latest/edge
+
+  vault-mysql-router:
+    charm: ch:mysql-router
+    channel: latest/edge
+  vault:
+    charm: ch:vault
+    num_units: 1
+    to:
+      - '12'
+    channel: latest/edge
+  ovn-central:
+    charm: ch:ovn-central
+    num_units: 3
+    options:
+      source: *openstack-origin
+    to:
+      - '13'
+    channel: latest/edge
+  neutron-api-plugin-ovn:
+    charm: ch:neutron-api-plugin-ovn
+    channel: latest/edge
+  ovn-chassis:
+    charm: ch:ovn-chassis
+    options:
+      # start new units paused to allow unit by unit OVS to OVN migration
+      new-units-paused: true
+    channel: latest/edge
+
+relations:
+
+  - - 'neutron-api:shared-db'
+    - 'neutron-api-mysql-router:shared-db'
+  - - 'neutron-api-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'neutron-api:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'neutron-api:neutron-api'
+    - 'nova-cloud-controller:neutron-api'
+
+  - - 'neutron-api:neutron-plugin-api'
+    - 'neutron-gateway:neutron-plugin-api'
+
+  - - 'neutron-api:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'keystone:shared-db'
+    - 'keystone-mysql-router:shared-db'
+  - - 'keystone-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'nova-compute:neutron-plugin'
+    - 'neutron-openvswitch:neutron-plugin'
+
+  - - 'nova-cloud-controller:shared-db'
+    - 'nova-cloud-controller-mysql-router:shared-db'
+  - - 'nova-cloud-controller-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'neutron-gateway:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'nova-cloud-controller:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'nova-compute:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'neutron-openvswitch:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'neutron-openvswitch:neutron-plugin-api'
+    - 'neutron-api:neutron-plugin-api'
+
+  - - 'nova-cloud-controller:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'nova-cloud-controller:cloud-compute'
+    - 'nova-compute:cloud-compute'
+
+  - - 'glance:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'glance:shared-db'
+    - 'glance-mysql-router:shared-db'
+  - - 'glance-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'glance:amqp'
+    - 'rabbitmq-server:amqp'
+
+  - - 'nova-compute:image-service'
+    - 'glance:image-service'
+
+  - - 'nova-cloud-controller:image-service'
+    - 'glance:image-service'
+
+  - - 'nova-cloud-controller:quantum-network-service'
+    - 'neutron-gateway:quantum-network-service'
+
+  - - 'placement:shared-db'
+    - 'placement-mysql-router:shared-db'
+  - - 'placement-mysql-router:db-router'
+    - 'mysql-innodb-cluster:db-router'
+
+  - - 'placement:identity-service'
+    - 'keystone:identity-service'
+
+  - - 'placement:placement'
+    - 'nova-cloud-controller:placement'
+
+    # We need to defer the addition of the neutron-api-plugin-ovn subordinate
+  # relation to the functional test as the test will first validate the legacy
+  # Neutron ML2+OVS topology, migrate it to OVN and then confirm connectivity
+  # post migration.
+  #
+  # - - neutron-api-plugin-ovn:neutron-plugin
+  #   - neutron-api:neutron-plugin-api-subordinate
+  - - ovn-central:certificates
+    - vault:certificates
+  - - ovn-central:ovsdb-cms
+    - neutron-api-plugin-ovn:ovsdb-cms
+  - - ovn-chassis:nova-compute
+    - nova-compute:neutron-plugin
+  - - ovn-chassis:certificates
+    - vault:certificates
+  - - ovn-chassis:ovsdb
+    - ovn-central:ovsdb
+  - - ovn-dedicated-chassis:certificates
+    - vault:certificates
+  - - ovn-dedicated-chassis:ovsdb
+    - ovn-central:ovsdb
+  - - vault:certificates
+    - neutron-api-plugin-ovn:certificates
+  - - vault:shared-db
+    - vault-mysql-router:shared-db
+  - - vault-mysql-router:db-router
+    - mysql-innodb-cluster:db-router
diff --git a/tests/bundles/jammy-yoga.yaml b/tests/bundles/lunar-antelope.yaml
similarity index 99%
rename from tests/bundles/jammy-yoga.yaml
rename to tests/bundles/lunar-antelope.yaml
index 5416b548..28175632 100644
--- a/tests/bundles/jammy-yoga.yaml
+++ b/tests/bundles/lunar-antelope.yaml
@@ -1,7 +1,7 @@
 variables:
   openstack-origin: &openstack-origin distro
 
-series: &series jammy
+series: &series lunar
 
 machines:
   0:
diff --git a/tests/tests.yaml b/tests/tests.yaml
index 0ee7c1a6..fdf4492c 100644
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -6,15 +6,15 @@ charm_name: neutron-gateway
 # OVS to OVN.
 #
 smoke_bundles:
-  - migrate-ovn: jammy-yoga
+  - migrate-ovn: jammy-zed
 
 gate_bundles:
-  - migrate-ovn: jammy-yoga
+  - migrate-ovn: jammy-zed
 
 dev_bundles:
-  - migrate-ovn: jammy-yoga
-  - migrate-ovn: jammy-zed
+  - migrate-ovn: jammy-antelope
   - migrate-ovn: kinetic-zed
+  - migrate-ovn: lunar-antelope
 
 target_deploy_status:
   neutron-api-plugin-ovn:
@@ -81,3 +81,4 @@ tests_options:
   zaza.openstack.charm_tests.neutron.tests.NeutronNetworkingTest.test_instances_have_networking.run_resource_cleanup: false
   force_deploy:
     - kinetic-zed
+    - lunar-antelope
diff --git a/tox.ini b/tox.ini
index ae4d124c..2cb6ca16 100644
--- a/tox.ini
+++ b/tox.ini
@@ -25,7 +25,7 @@ setenv = VIRTUAL_ENV={envdir}
 commands = stestr run --slowest {posargs}
 allowlist_externals =
     charmcraft
-    rename.sh
+    {toxinidir}/rename.sh
 passenv =
     HOME
     TERM