Add enable-fwaas to relation data

Add enable-fwaas to neutron-plugin-api relation data so
that neutron-openvswitch updates neutron-l3-agent
configuration accordingly.

Synced charmhelpers to get related changes
https://github.com/juju/charm-helpers/pull/635

Partial-Bug: #1934129
Change-Id: I5019c5ed3b8ab556d4900f1fe46dee69f5f09ee7
This commit is contained in:
Hemanth Nakkina 2021-08-31 11:07:12 +05:30
parent 3f0855a8d3
commit 0cfaca49b0
17 changed files with 250 additions and 2059 deletions

View File

@ -1,13 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,387 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OPENSTACK_RELEASES_PAIRS
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the ~openstack-charmers
base_charms = {
'mysql': ['trusty'],
'mongodb': ['trusty'],
'nrpe': ['trusty', 'xenial'],
}
for svc in other_services:
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if svc['name'] in base_charms:
# NOTE: not all charms have support for all series we
# want/need to test against, so fix to most recent
# that each base charm supports
target_series = self.series
if self.series not in base_charms[svc['name']]:
target_series = base_charms[svc['name']][-1]
svc['location'] = 'cs:{}/{}'.format(target_series,
svc['name'])
elif self.stable:
svc['location'] = 'cs:{}/{}'.format(self.series,
svc['name'])
else:
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
self.series,
svc['name']
)
return other_services
def _add_services(self, this_service, other_services, use_source=None,
no_origin=None):
"""Add services to the deployment and optionally set
openstack-origin/source.
:param this_service dict: Service dictionary describing the service
whose amulet tests are being run
:param other_services dict: List of service dictionaries describing
the services needed to support the target
service
:param use_source list: List of services which use the 'source' config
option rather than 'openstack-origin'
:param no_origin list: List of services which do not support setting
the Cloud Archive.
Service Dict:
{
'name': str charm-name,
'units': int number of units,
'constraints': dict of juju constraints,
'location': str location of charm,
}
eg
this_service = {
'name': 'openvswitch-odl',
'constraints': {'mem': '8G'},
}
other_services = [
{
'name': 'nova-compute',
'units': 2,
'constraints': {'mem': '4G'},
'location': cs:~bob/xenial/nova-compute
},
{
'name': 'mysql',
'constraints': {'mem': '2G'},
},
{'neutron-api-odl'}]
use_source = ['mysql']
no_origin = ['neutron-api-odl']
"""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = use_source or []
no_origin = no_origin or []
# Charms which should use the source config option
use_source = list(set(
use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon',
'ceph-proxy', 'percona-cluster', 'lxd']))
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = list(set(
no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
'odl-controller', 'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt',
'ceilometer-agent']))
if self.openstack:
for svc in services:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=None):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
if not timeout:
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
self.log.info('Waiting for extended status on units for {}s...'
''.format(timeout))
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
# Check for idleness
self.d.sentry.wait(timeout=timeout)
# Check for error states and bail early
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
setattr(self, os_pair, i)
releases = {
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('xenial', None): self.xenial_mitaka,
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens,
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('bionic', 'cloud:bionic-train'): self.bionic_train,
('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri,
('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein,
('eoan', None): self.eoan_train,
('focal', None): self.focal_ussuri,
('focal', 'cloud:focal-victoria'): self.focal_victoria,
('groovy', None): self.groovy_victoria,
}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('trusty', 'icehouse'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_percona_service_entry(self, memory_constraint=None):
"""Return a amulet service entry for percona cluster.
:param memory_constraint: Override the default memory constraint
in the service entry.
:type memory_constraint: str
:returns: Amulet service entry.
:rtype: dict
"""
memory_constraint = memory_constraint or '3072M'
svc_entry = {
'name': 'percona-cluster',
'constraints': {'mem': memory_constraint}}
if self._get_openstack_release() <= self.trusty_mitaka:
svc_entry['location'] = 'cs:trusty/percona-cluster'
return svc_entry
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() == self.trusty_icehouse:
# Icehouse
pools = [
'data',
'metadata',
'rbd',
'cinder-ceph',
'glance'
]
elif (self.trusty_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,10 @@ import socket
import time import time
from base64 import b64decode from base64 import b64decode
from subprocess import check_call, CalledProcessError from subprocess import (
check_call,
check_output,
CalledProcessError)
import six import six
@ -453,18 +456,24 @@ class IdentityServiceContext(OSContextGenerator):
serv_host = format_ipv6_addr(serv_host) or serv_host serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host') auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host auth_host = format_ipv6_addr(auth_host) or auth_host
int_host = rdata.get('internal_host')
int_host = format_ipv6_addr(int_host) or int_host
svc_protocol = rdata.get('service_protocol') or 'http' svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http'
int_protocol = rdata.get('internal_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0' api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'), ctxt.update({'service_port': rdata.get('service_port'),
'service_host': serv_host, 'service_host': serv_host,
'auth_host': auth_host, 'auth_host': auth_host,
'auth_port': rdata.get('auth_port'), 'auth_port': rdata.get('auth_port'),
'internal_host': int_host,
'internal_port': rdata.get('internal_port'),
'admin_tenant_name': rdata.get('service_tenant'), 'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'), 'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'), 'admin_password': rdata.get('service_password'),
'service_protocol': svc_protocol, 'service_protocol': svc_protocol,
'auth_protocol': auth_protocol, 'auth_protocol': auth_protocol,
'internal_protocol': int_protocol,
'api_version': api_version}) 'api_version': api_version})
if float(api_version) > 2: if float(api_version) > 2:
@ -1781,6 +1790,10 @@ class NeutronAPIContext(OSContextGenerator):
'rel_key': 'enable-port-forwarding', 'rel_key': 'enable-port-forwarding',
'default': False, 'default': False,
}, },
'enable_fwaas': {
'rel_key': 'enable-fwaas',
'default': False,
},
'global_physnet_mtu': { 'global_physnet_mtu': {
'rel_key': 'global-physnet-mtu', 'rel_key': 'global-physnet-mtu',
'default': 1500, 'default': 1500,
@ -1815,6 +1828,11 @@ class NeutronAPIContext(OSContextGenerator):
if ctxt['enable_port_forwarding']: if ctxt['enable_port_forwarding']:
l3_extension_plugins.append('port_forwarding') l3_extension_plugins.append('port_forwarding')
if ctxt['enable_fwaas']:
l3_extension_plugins.append('fwaas_v2')
if ctxt['enable_nfg_logging']:
l3_extension_plugins.append('fwaas_v2_log')
ctxt['l3_extension_plugins'] = l3_extension_plugins ctxt['l3_extension_plugins'] = l3_extension_plugins
return ctxt return ctxt
@ -2578,14 +2596,22 @@ class OVSDPDKDeviceContext(OSContextGenerator):
return format(mask, '#04x') return format(mask, '#04x')
def socket_memory(self): def socket_memory(self):
"""Formatted list of socket memory configuration per NUMA node """Formatted list of socket memory configuration per socket.
:returns: socket memory configuration per NUMA node :returns: socket memory configuration per socket.
:rtype: str :rtype: str
""" """
lscpu_out = check_output(
['lscpu', '-p=socket']).decode('UTF-8').strip()
sockets = set()
for line in lscpu_out.split('\n'):
try:
sockets.add(int(line))
except ValueError:
# lscpu output is headed by comments so ignore them.
pass
sm_size = config('dpdk-socket-memory') sm_size = config('dpdk-socket-memory')
node_regex = '/sys/devices/system/node/node*' mem_list = [str(sm_size) for _ in sockets]
mem_list = [str(sm_size) for _ in glob.glob(node_regex)]
if mem_list: if mem_list:
return ','.join(mem_list) return ','.join(mem_list)
else: else:

View File

@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release,
restart_handler() restart_handler()
@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") @charmhelpers.deprecate("Use maybe_do_policyd_overrides instead")
def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs):
"""This function is designed to be called from the config changed hook. """This function is designed to be called from the config changed hook.

View File

@ -1,10 +1,22 @@
global global
log /var/lib/haproxy/dev/log local0 # NOTE: on startup haproxy chroot's to /var/lib/haproxy.
log /var/lib/haproxy/dev/log local1 notice #
# Unfortunately the program will open some files prior to the call to
# chroot never to reopen them, and some after. So looking at the on-disk
# layout of haproxy resources you will find some resources relative to /
# such as the admin socket, and some relative to /var/lib/haproxy such as
# the log socket.
#
# The logging socket is (re-)opened after the chroot and must be relative
# to /var/lib/haproxy.
log /dev/log local0
log /dev/log local1 notice
maxconn 20000 maxconn 20000
user haproxy user haproxy
group haproxy group haproxy
spread-checks 0 spread-checks 0
# The admin socket is opened prior to the chroot never to be reopened, so
# it lives outside the chroot directory in the filesystem.
stats socket /var/run/haproxy/admin.sock mode 600 level admin stats socket /var/run/haproxy/admin.sock mode 600 level admin
stats timeout 2m stats timeout 2m

View File

@ -15,7 +15,7 @@ Listen {{ public_port }}
{% if port -%} {% if port -%}
<VirtualHost *:{{ port }}> <VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP} display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }} WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }} WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
@ -41,7 +41,7 @@ Listen {{ public_port }}
{% if admin_port -%} {% if admin_port -%}
<VirtualHost *:{{ admin_port }}> <VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP} display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-admin WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }} WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
@ -67,7 +67,7 @@ Listen {{ public_port }}
{% if public_port -%} {% if public_port -%}
<VirtualHost *:{{ public_port }}> <VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP} display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-public WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }} WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}

View File

@ -15,7 +15,7 @@ Listen {{ public_port }}
{% if port -%} {% if port -%}
<VirtualHost *:{{ port }}> <VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP} display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }} WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }} WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
@ -41,7 +41,7 @@ Listen {{ public_port }}
{% if admin_port -%} {% if admin_port -%}
<VirtualHost *:{{ admin_port }}> <VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP} display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-admin WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }} WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
@ -67,7 +67,7 @@ Listen {{ public_port }}
{% if public_port -%} {% if public_port -%}
<VirtualHost *:{{ public_port }}> <VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP} display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-public WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }} WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}

View File

@ -106,6 +106,8 @@ from charmhelpers.fetch import (
filter_installed_packages, filter_installed_packages,
filter_missing_packages, filter_missing_packages,
ubuntu_apt_pkg as apt, ubuntu_apt_pkg as apt,
OPENSTACK_RELEASES,
UBUNTU_OPENSTACK_RELEASE,
) )
from charmhelpers.fetch.snap import ( from charmhelpers.fetch.snap import (
@ -132,54 +134,9 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe') 'restricted main multiverse universe')
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
'wallaby',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
('hirsute', 'wallaby'),
])
OPENSTACK_CODENAMES = OrderedDict([ OPENSTACK_CODENAMES = OrderedDict([
# NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version
# number. This just means the i-th version of the year yyyy.
('2011.2', 'diablo'), ('2011.2', 'diablo'),
('2012.1', 'essex'), ('2012.1', 'essex'),
('2012.2', 'folsom'), ('2012.2', 'folsom'),
@ -200,6 +157,8 @@ OPENSTACK_CODENAMES = OrderedDict([
('2020.1', 'ussuri'), ('2020.1', 'ussuri'),
('2020.2', 'victoria'), ('2020.2', 'victoria'),
('2021.1', 'wallaby'), ('2021.1', 'wallaby'),
('2021.2', 'xena'),
('2022.1', 'yoga'),
]) ])
# The ugly duckling - must list releases oldest to newest # The ugly duckling - must list releases oldest to newest

View File

@ -106,6 +106,8 @@ if __platform__ == "ubuntu":
apt_pkg = fetch.ubuntu_apt_pkg apt_pkg = fetch.ubuntu_apt_pkg
get_apt_dpkg_env = fetch.get_apt_dpkg_env get_apt_dpkg_env = fetch.get_apt_dpkg_env
get_installed_version = fetch.get_installed_version get_installed_version = fetch.get_installed_version
OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES
UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE
elif __platform__ == "centos": elif __platform__ == "centos":
yum_search = fetch.yum_search yum_search = fetch.yum_search

View File

@ -208,12 +208,79 @@ CLOUD_ARCHIVE_POCKETS = {
'wallaby/proposed': 'focal-proposed/wallaby', 'wallaby/proposed': 'focal-proposed/wallaby',
'focal-wallaby/proposed': 'focal-proposed/wallaby', 'focal-wallaby/proposed': 'focal-proposed/wallaby',
'focal-proposed/wallaby': 'focal-proposed/wallaby', 'focal-proposed/wallaby': 'focal-proposed/wallaby',
# Xena
'xena': 'focal-updates/xena',
'focal-xena': 'focal-updates/xena',
'focal-xena/updates': 'focal-updates/xena',
'focal-updates/xena': 'focal-updates/xena',
'xena/proposed': 'focal-proposed/xena',
'focal-xena/proposed': 'focal-proposed/xena',
'focal-proposed/xena': 'focal-proposed/xena',
# Yoga
'yoga': 'focal-updates/yoga',
'focal-yoga': 'focal-updates/yoga',
'focal-yoga/updates': 'focal-updates/yoga',
'focal-updates/yoga': 'focal-updates/yoga',
'yoga/proposed': 'focal-proposed/yoga',
'focal-yoga/proposed': 'focal-proposed/yoga',
'focal-proposed/yoga': 'focal-proposed/yoga',
} }
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
'wallaby',
'xena',
'yoga',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
('hirsute', 'wallaby'),
('impish', 'xena'),
])
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times.
def filter_installed_packages(packages): def filter_installed_packages(packages):
@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False):
with be used. If staging is NOT used then the cloud archive [3] will be with be used. If staging is NOT used then the cloud archive [3] will be
added, and the 'ubuntu-cloud-keyring' package will be added for the added, and the 'ubuntu-cloud-keyring' package will be added for the
current distro. current distro.
'<openstack-version>': translate to cloud:<release> based on the current
distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or
'distro'.
'<openstack-version>/proposed': as above, but for proposed.
Otherwise the source is not recognised and this is logged to the juju log. Otherwise the source is not recognised and this is logged to the juju log.
However, no error is raised, unless sys_error_on_exit is True. However, no error is raised, unless sys_error_on_exit is True.
@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False):
@raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
valid pocket in CLOUD_ARCHIVE_POCKETS valid pocket in CLOUD_ARCHIVE_POCKETS
""" """
# extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use
# the list in contrib.openstack.utils as it might not be included in
# classic charms and would break everything. Having OpenStack specific
# code in this file is a bit of an antipattern, anyway.
os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES))
_mapping = OrderedDict([ _mapping = OrderedDict([
(r"^distro$", lambda: None), # This is a NOP (r"^distro$", lambda: None), # This is a NOP
(r"^(?:proposed|distro-proposed)$", _add_proposed), (r"^(?:proposed|distro-proposed)$", _add_proposed),
@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False):
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket), (r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
(r"^{}\/proposed$".format(os_versions_regex),
_add_bare_openstack_proposed),
(r"^{}$".format(os_versions_regex), _add_bare_openstack),
]) ])
if source is None: if source is None:
source = '' source = ''
@ -662,7 +742,8 @@ def _add_apt_repository(spec):
series = get_distrib_codename() series = get_distrib_codename()
spec = spec.replace('{series}', series) spec = spec.replace('{series}', series)
_run_with_retries(['add-apt-repository', '--yes', spec], _run_with_retries(['add-apt-repository', '--yes', spec],
cmd_env=env_proxy_settings(['https', 'http'])) cmd_env=env_proxy_settings(['https', 'http', 'no_proxy'])
)
def _add_cloud_pocket(pocket): def _add_cloud_pocket(pocket):
@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release):
'version ({})'.format(release, os_release, ubuntu_rel)) 'version ({})'.format(release, os_release, ubuntu_rel))
def _add_bare_openstack(openstack_release):
"""Add cloud or distro based on the release given.
The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri
or 'distro' depending on whether the ubuntu release is bionic or focal.
:param openstack_release: the OpenStack codename to determine the release
for.
:type openstack_release: str
:raises: SourceConfigError
"""
# TODO(ajkavanagh) - surely this means we should be removing cloud archives
# if they exist?
__add_bare_helper(openstack_release, "{}-{}", lambda: None)
def _add_bare_openstack_proposed(openstack_release):
"""Add cloud of distro but with proposed.
The spec given is, say, 'ussuri' but this could apply
cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the
ubuntu release is bionic or focal.
:param openstack_release: the OpenStack codename to determine the release
for.
:type openstack_release: str
:raises: SourceConfigError
"""
__add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed)
def __add_bare_helper(openstack_release, pocket_format, final_function):
"""Helper for _add_bare_openstack[_proposed]
The bulk of the work between the two functions is exactly the same except
for the pocket format and the function that is run if it's the distro
version.
:param openstack_release: the OpenStack codename. e.g. ussuri
:type openstack_release: str
:param pocket_format: the pocket formatter string to construct a pocket str
from the openstack_release and the current ubuntu version.
:type pocket_format: str
:param final_function: the function to call if it is the distro version.
:type final_function: Callable
:raises SourceConfigError on error
"""
ubuntu_version = get_distrib_codename()
possible_pocket = pocket_format.format(ubuntu_version, openstack_release)
if possible_pocket in CLOUD_ARCHIVE_POCKETS:
_add_cloud_pocket(possible_pocket)
return
# Otherwise it's almost certainly the distro version; verify that it
# exists.
try:
assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release
except KeyError:
raise SourceConfigError(
"Invalid ubuntu version {} isn't known to this library"
.format(ubuntu_version))
except AssertionError:
raise SourceConfigError(
'Invalid OpenStack release specificed: {} for ubuntu version {}'
.format(openstack_release, ubuntu_version))
final_function()
def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None, quiet=False): retry_message="", cmd_env=None, quiet=False):
"""Run a command and retry until success or max_retries is reached. """Run a command and retry until success or max_retries is reached.

View File

@ -264,7 +264,7 @@ def version_compare(a, b):
else: else:
raise RuntimeError('Unable to compare "{}" and "{}", according to ' raise RuntimeError('Unable to compare "{}" and "{}", according to '
'our logic they are neither greater, equal nor ' 'our logic they are neither greater, equal nor '
'less than each other.') 'less than each other.'.format(a, b))
class PkgVersion(): class PkgVersion():

View File

@ -28,6 +28,9 @@ def get_platform():
elif "elementary" in current_platform: elif "elementary" in current_platform:
# ElementaryOS fails to run tests locally without this. # ElementaryOS fails to run tests locally without this.
return "ubuntu" return "ubuntu"
elif "Pop!_OS" in current_platform:
# Pop!_OS also fails to run tests locally without this.
return "ubuntu"
else: else:
raise RuntimeError("This module is not supported on {}." raise RuntimeError("This module is not supported on {}."
.format(current_platform)) .format(current_platform))

View File

@ -282,6 +282,28 @@ def is_port_forwarding_enabled():
return False return False
def is_fwaas_enabled():
"""
Check if Firewall as a service feature should be enabled.
returns: True if enable-fwaas config item is True,
otherwise False.
:rtype: boolean
"""
if config('enable-fwaas'):
cmp_release = CompareOpenStackReleases(os_release('neutron-server'))
if cmp_release < 'stein' or cmp_release > 'ussuri':
log("The fwaas option is only supported "
"on Stein to Ussuri",
ERROR)
return False
return True
return False
def is_vlan_trunking_requested_and_valid(): def is_vlan_trunking_requested_and_valid():
"""Check whether VLAN trunking should be enabled by checking whether """Check whether VLAN trunking should be enabled by checking whether
it has been requested and, if it has, is it supported in the current it has been requested and, if it has, is it supported in the current

View File

@ -107,6 +107,7 @@ from neutron_api_context import (
get_l2population, get_l2population,
get_l3ha, get_l3ha,
get_overlay_network_type, get_overlay_network_type,
is_fwaas_enabled,
is_nfg_logging_enabled, is_nfg_logging_enabled,
is_nsg_logging_enabled, is_nsg_logging_enabled,
is_qos_requested_and_valid, is_qos_requested_and_valid,
@ -554,6 +555,7 @@ def neutron_plugin_api_relation_joined(rid=None):
'enable-nsg-logging': is_nsg_logging_enabled(), 'enable-nsg-logging': is_nsg_logging_enabled(),
'enable-nfg-logging': is_nfg_logging_enabled(), 'enable-nfg-logging': is_nfg_logging_enabled(),
'enable-port-forwarding': is_port_forwarding_enabled(), 'enable-port-forwarding': is_port_forwarding_enabled(),
'enable-fwaas': is_fwaas_enabled(),
'overlay-network-type': get_overlay_network_type(), 'overlay-network-type': get_overlay_network_type(),
'addr': unit_get('private-address'), 'addr': unit_get('private-address'),
'polling-interval': config('polling-interval'), 'polling-interval': config('polling-interval'),

View File

@ -303,6 +303,8 @@ class IdentityServiceContext(CharmTestCase):
'service_tenant': 'ten', 'service_tenant': 'ten',
'service_username': 'admin', 'service_username': 'admin',
'service_password': 'adminpass', 'service_password': 'adminpass',
'internal_host': '127.0.0.4',
'internal_port': 5432,
} }
_rget.return_value = id_data _rget.return_value = id_data
ids_ctxt = context.IdentityServiceContext() ids_ctxt = context.IdentityServiceContext()

View File

@ -91,6 +91,7 @@ TO_PATCH = [
'is_nsg_logging_enabled', 'is_nsg_logging_enabled',
'is_nfg_logging_enabled', 'is_nfg_logging_enabled',
'is_port_forwarding_enabled', 'is_port_forwarding_enabled',
'is_fwaas_enabled',
'remove_old_packages', 'remove_old_packages',
'services', 'services',
'service_restart', 'service_restart',
@ -144,6 +145,7 @@ class NeutronAPIHooksTests(CharmTestCase):
self.is_nsg_logging_enabled.return_value = False self.is_nsg_logging_enabled.return_value = False
self.is_nfg_logging_enabled.return_value = False self.is_nfg_logging_enabled.return_value = False
self.is_port_forwarding_enabled.return_value = False self.is_port_forwarding_enabled.return_value = False
self.is_fwaas_enabled.return_value = True
def _fake_relids(self, rel_name): def _fake_relids(self, rel_name):
return [randrange(100) for _count in range(2)] return [randrange(100) for _count in range(2)]
@ -542,6 +544,7 @@ class NeutronAPIHooksTests(CharmTestCase):
'enable-nsg-logging': False, 'enable-nsg-logging': False,
'enable-nfg-logging': False, 'enable-nfg-logging': False,
'enable-port-forwarding': False, 'enable-port-forwarding': False,
'enable-fwaas': True,
'global-physnet-mtu': 1500, 'global-physnet-mtu': 1500,
'physical-network-mtus': None, 'physical-network-mtus': None,
} }
@ -588,6 +591,7 @@ class NeutronAPIHooksTests(CharmTestCase):
'enable-nsg-logging': True, 'enable-nsg-logging': True,
'enable-nfg-logging': False, 'enable-nfg-logging': False,
'enable-port-forwarding': False, 'enable-port-forwarding': False,
'enable-fwaas': True,
'global-physnet-mtu': 1500, 'global-physnet-mtu': 1500,
'physical-network-mtus': None, 'physical-network-mtus': None,
} }
@ -640,6 +644,7 @@ class NeutronAPIHooksTests(CharmTestCase):
'enable-nsg-logging': False, 'enable-nsg-logging': False,
'enable-nfg-logging': True, 'enable-nfg-logging': True,
'enable-port-forwarding': False, 'enable-port-forwarding': False,
'enable-fwaas': True,
'global-physnet-mtu': 1500, 'global-physnet-mtu': 1500,
'physical-network-mtus': None, 'physical-network-mtus': None,
} }
@ -691,6 +696,7 @@ class NeutronAPIHooksTests(CharmTestCase):
'enable-nsg-logging': False, 'enable-nsg-logging': False,
'enable-nfg-logging': False, 'enable-nfg-logging': False,
'enable-port-forwarding': True, 'enable-port-forwarding': True,
'enable-fwaas': True,
'global-physnet-mtu': 1500, 'global-physnet-mtu': 1500,
'physical-network-mtus': None, 'physical-network-mtus': None,
} }
@ -742,6 +748,7 @@ class NeutronAPIHooksTests(CharmTestCase):
'enable-nsg-logging': False, 'enable-nsg-logging': False,
'enable-nfg-logging': False, 'enable-nfg-logging': False,
'enable-port-forwarding': False, 'enable-port-forwarding': False,
'enable-fwaas': True,
'global-physnet-mtu': 1500, 'global-physnet-mtu': 1500,
'physical-network-mtus': None, 'physical-network-mtus': None,
} }
@ -788,6 +795,7 @@ class NeutronAPIHooksTests(CharmTestCase):
'enable-nsg-logging': False, 'enable-nsg-logging': False,
'enable-nfg-logging': False, 'enable-nfg-logging': False,
'enable-port-forwarding': False, 'enable-port-forwarding': False,
'enable-fwaas': True,
'global-physnet-mtu': 1500, 'global-physnet-mtu': 1500,
'physical-network-mtus': None, 'physical-network-mtus': None,
} }
@ -836,6 +844,7 @@ class NeutronAPIHooksTests(CharmTestCase):
'enable-nsg-logging': False, 'enable-nsg-logging': False,
'enable-nfg-logging': False, 'enable-nfg-logging': False,
'enable-port-forwarding': False, 'enable-port-forwarding': False,
'enable-fwaas': True,
'global-physnet-mtu': 1500, 'global-physnet-mtu': 1500,
'physical-network-mtus': None, 'physical-network-mtus': None,
} }
@ -883,6 +892,7 @@ class NeutronAPIHooksTests(CharmTestCase):
'enable-nsg-logging': False, 'enable-nsg-logging': False,
'enable-nfg-logging': False, 'enable-nfg-logging': False,
'enable-port-forwarding': False, 'enable-port-forwarding': False,
'enable-fwaas': True,
'global-physnet-mtu': 1500, 'global-physnet-mtu': 1500,
'physical-network-mtus': None, 'physical-network-mtus': None,
} }