Updates for zed stable branch creation

- Set default branch for git review/gerrit to stable/zed
- Switch tests to stable.
- Switch to using stable charm-helpers branch.
- Switch to using stable charm.openstack branch.
- Switch to using stable zaza, zaza-openstack-tests
  branch
- (reactive charms) Add build.lock file
- (classic charms) make sync
- (reactive: not reactive plugin): lock charm-tools < 3.1
- (reactive: with reactive plugin): lock charm snap to 3.x/stable

Change-Id: I2635c0ade478c616454c3eaf1a23c063033c1d0d
This commit is contained in:
Alex Kavanagh 2022-10-14 17:03:17 +01:00
parent 7b17d0d2cc
commit e745792556
17 changed files with 226 additions and 65 deletions

View File

@ -2,3 +2,5 @@
host=review.opendev.org
port=29418
project=openstack/charm-cinder-backup.git
defaultbranch=stable/zed

View File

@ -1,4 +1,4 @@
repo: https://github.com/juju/charm-helpers
repo: https://github.com/juju/charm-helpers@stable/zed
destination: hooks/charmhelpers
include:
- core

View File

@ -324,7 +324,7 @@ def valid_hacluster_config():
'''
vip = config_get('vip')
dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)):
if not (bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability')
status_set('blocked', msg)

View File

@ -539,7 +539,7 @@ def port_has_listener(address, port):
"""
cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd)
return not(bool(result))
return not (bool(result))
def assert_charm_supports_ipv6():

View File

@ -25,6 +25,7 @@ import socket
import time
from base64 import b64decode
from distutils.version import LooseVersion
from subprocess import (
check_call,
check_output,
@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
from charmhelpers.fetch import (
apt_install,
filter_installed_packages,
get_installed_version,
)
from charmhelpers.core.hookenv import (
NoNetworkBinding,
@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import (
network_get_primary_address,
WARNING,
service_name,
remote_service_name,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -130,6 +133,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
HAPROXY_RUN_DIR = '/var/run/haproxy/'
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404
def ensure_packages(packages):
@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir):
class IdentityServiceContext(OSContextGenerator):
_forward_compat_remaps = {
'admin_user': 'admin-user-name',
'service_username': 'service-user-name',
'service_tenant': 'service-project-name',
'service_tenant_id': 'service-project-id',
'service_domain': 'service-domain-name',
}
def __init__(self,
service=None,
service_user=None,
@ -397,11 +409,16 @@ class IdentityServiceContext(OSContextGenerator):
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
# see keystonemiddleware upstream sources for more info
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
if 'public_auth_url' in ctxt:
c.update((
('www_authenticate_uri', '{}/v3'.format(
ctxt.get('public_auth_url'))),))
else:
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
else:
c.update((
('auth_uri', "{}://{}:{}/v3".format(
@ -409,11 +426,17 @@ class IdentityServiceContext(OSContextGenerator):
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
if 'internal_auth_url' in ctxt:
c.update((
('auth_url', ctxt.get('internal_auth_url')),))
else:
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),))
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),
('project_domain_name', ctxt.get('admin_domain_name', '')),
('user_domain_name', ctxt.get('admin_domain_name', '')),
('project_name', ctxt.get('admin_tenant_name', '')),
@ -441,7 +464,27 @@ class IdentityServiceContext(OSContextGenerator):
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
rdata = {}
# NOTE(jamespage):
# forwards compat with application data
# bag driven approach to relation.
_adata = relation_get(rid=rid, app=remote_service_name(rid))
if _adata:
# New app data bag uses - instead of _
# in key names - remap for compat with
# existing relation data keys
for key, value in _adata.items():
if key == 'api-version':
rdata[key.replace('-', '_')] = value.strip('v')
else:
rdata[key.replace('-', '_')] = value
# Re-map some keys for backwards compatibility
for target, source in self._forward_compat_remaps.items():
rdata[target] = _adata.get(source)
else:
# No app data bag presented - fallback
# to legacy unit based relation data
rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host')
serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host')
@ -475,6 +518,19 @@ class IdentityServiceContext(OSContextGenerator):
'service_project_id': rdata.get('service_tenant_id'),
'service_domain_id': rdata.get('service_domain_id')})
# NOTE:
# keystone-k8s operator presents full URLS
# for all three endpoints - public and internal are
# externally addressable for machine based charm
if 'public_auth_url' in rdata:
ctxt.update({
'public_auth_url': rdata.get('public_auth_url'),
})
if 'internal_auth_url' in rdata:
ctxt.update({
'internal_auth_url': rdata.get('internal_auth_url'),
})
# we keep all veriables in ctxt for compatibility and
# add nested dictionary for keystone_authtoken generic
# templating
@ -860,9 +916,14 @@ class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
def __init__(self, singlenode_mode=False,
address_types=ADDRESS_TYPES):
address_types=None,
exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT):
if address_types is None:
address_types = ADDRESS_TYPES[:]
self.address_types = address_types
self.singlenode_mode = singlenode_mode
self.exporter_stats_port = exporter_stats_port
def __call__(self):
if not os.path.isdir(HAPROXY_RUN_DIR):
@ -957,10 +1018,20 @@ class HAProxyContext(OSContextGenerator):
db = kv()
ctxt['stat_password'] = db.get('stat-password')
if not ctxt['stat_password']:
ctxt['stat_password'] = db.set('stat-password',
pwgen(32))
ctxt['stat_password'] = db.set('stat-password', pwgen(32))
db.flush()
# NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0
# New bind will be created and a prometheus-exporter
# will be used for path /metrics. At the same time,
# prometheus-exporter avoids using auth.
haproxy_version = get_installed_version("haproxy")
if (haproxy_version and
haproxy_version.ver_str >= LooseVersion("2.0.0") and
is_relation_made("haproxy-exporter")):
ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter")
ctxt["stats_exporter_port"] = self.exporter_stats_port
for frontend in cluster_hosts:
if (len(cluster_hosts[frontend]['backends']) > 1 or
self.singlenode_mode):

View File

@ -25,6 +25,7 @@ Helpers for high availability.
import hashlib
import json
import os
import re
@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
config,
status_set,
DEBUG,
application_name,
)
from charmhelpers.core.host import (
@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
VIP_GROUP_NAME = 'grp_{service}_vips'
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
class DNSHAException(Exception):
@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
relation_data['groups'] = {
key: ' '.join(vip_group)
}
def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
"""Load grafana dashboard json model and insert prometheus datasource.
:param prometheus_app_name: name of the 'prometheus' application that will
be used as datasource in grafana dashboard
:type prometheus_app_name: str
:param haproxy_dashboard: path to haproxy dashboard
:type haproxy_dashboard: str
:return: Grafana dashboard json model as a str.
:rtype: str
"""
from charmhelpers.contrib.templating import jinja
dashboard_template = os.path.basename(haproxy_dashboard)
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
app_name = application_name()
datasource = "{} - Juju generated source".format(prometheus_app_name)
return jinja.render(dashboard_template,
{"datasource": datasource,
"app_name": app_name,
"prometheus_app_name": prometheus_app_name},
template_dir=dashboard_template_dir,
jinja_env_args={"variable_start_string": "<< ",
"variable_end_string": " >>"})

View File

@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
get_iface_for_address
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
return unit_get(unit_get_fallback)
def get_invalid_vips():
"""Check if any of the provided vips are invalid.
A vip is invalid if it doesn't belong to the subnet in any interface.
If all vips are valid, this returns an empty list.
:returns: A list of strings, where each string is an invalid vip address.
:rtype: list
"""
clustered = is_clustered()
vips = config('vip')
if vips:
vips = vips.split()
invalid_vips = []
if clustered and vips:
for vip in vips:
iface_for_vip = get_iface_for_address(vip)
if iface_for_vip is None:
invalid_vips.append(vip)
return invalid_vips
def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config.

View File

@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None):
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list)
return known_hosts_list
def ssh_authorized_keys_lines(application_name, user=None):
@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None):
for authkey_line in keys:
if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list)
return authorized_keys_list
def ssh_compute_remove(public_key, application_name, user=None):

View File

@ -49,6 +49,11 @@ defaults
listen stats
bind {{ local_host }}:{{ stat_port }}
{%- if stats_exporter_host and stats_exporter_port %}
bind {{ stats_exporter_host }}:{{ stats_exporter_port }}
option http-use-htx
http-request use-service prometheus-exporter if { path /metrics }
{%- endif %}
mode http
stats enable
stats hide-version

View File

@ -1327,7 +1327,7 @@ def _check_listening_on_services_ports(services, test=False):
@param test: default=False, if False, test for closed, otherwise open.
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
"""
test = not(not(test)) # ensure test is True or False
test = not (not (test)) # ensure test is True or False
all_ports = list(itertools.chain(*services.values()))
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
map_ports = OrderedDict()
@ -1583,7 +1583,7 @@ def is_unit_paused_set():
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused')))
return not (not (kv.get('unit-paused')))
except Exception:
return False
@ -2181,7 +2181,7 @@ def is_unit_upgrading_set():
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-upgrading')))
return not (not (kv.get('unit-upgrading')))
except Exception:
return False

View File

@ -23,6 +23,12 @@ from subprocess import (
call
)
from charmhelpers.core.hookenv import (
log,
WARNING,
INFO
)
def _luks_uuid(dev):
"""
@ -110,7 +116,7 @@ def is_device_mounted(device):
return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False, inode_size=1024):
def mkfs_xfs(device, force=False, inode_size=None):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
:ptype device: tr
:param force: Force operation
:ptype: force: boolean
:param inode_size: XFS inode size in bytes
:param inode_size: XFS inode size in bytes; if set to 0 or None,
the value used will be the XFS system default
:ptype inode_size: int"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', "size={}".format(inode_size), device]
if inode_size:
if inode_size >= 256 and inode_size <= 2048:
cmd += ['-i', "size={}".format(inode_size)]
else:
log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
else:
log("Using XFS filesystem with system default inode size.", level=INFO)
cmd += [device]
check_call(cmd)

View File

@ -954,7 +954,7 @@ def pwgen(length=None):
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
return ''.join(random_chars)
def is_phy_iface(interface):

View File

@ -230,6 +230,10 @@ CLOUD_ARCHIVE_POCKETS = {
'zed/proposed': 'jammy-proposed/zed',
'jammy-zed/proposed': 'jammy-proposed/zed',
'jammy-proposed/zed': 'jammy-proposed/zed',
# OVN
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03',
}
@ -363,6 +367,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
:type quiet: bool
:raises: subprocess.CalledProcessError
"""
if not packages:
log("Nothing to install", level=DEBUG)
return
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
@ -687,6 +694,7 @@ def add_source(source, key=None, fail_invalid=False):
(r"^cloud-archive:(.*)$", _add_apt_repository),
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
(r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
@ -750,6 +758,11 @@ def _add_apt_repository(spec):
)
def __write_sources_list_d_actual_pocket(file, actual_pocket):
with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
def _add_cloud_pocket(pocket):
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
@ -769,8 +782,9 @@ def _add_cloud_pocket(pocket):
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
__write_sources_list_d_actual_pocket(
'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'),
actual_pocket)
def _add_cloud_staging(cloud_archive_release, openstack_release):

View File

@ -20,8 +20,8 @@ cliff<3.0.0
coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
git+https://github.com/openstack-charmers/zaza.git@stable/zed#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/zed#egg=zaza.openstack
# Needed for charm-glance:
git+https://opendev.org/openstack/tempest.git#egg=tempest

View File

@ -28,13 +28,13 @@ applications:
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
cinder-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
glance-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
@ -45,7 +45,7 @@ applications:
- '0'
- '1'
- '2'
channel: latest/edge
channel: 8.0/edge
keystone:
charm: ch:keystone
@ -54,14 +54,14 @@ applications:
openstack-origin: *openstack-origin
to:
- '3'
channel: latest/edge
channel: zed/edge
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
to:
- '4'
channel: latest/edge
channel: 3.9/edge
ceph-mon:
charm: ch:ceph-mon
@ -73,7 +73,7 @@ applications:
- '5'
- '6'
- '7'
channel: latest/edge
channel: quincy/edge
ceph-osd:
charm: ch:ceph-osd
@ -87,7 +87,7 @@ applications:
- '8'
- '9'
- '10'
channel: latest/edge
channel: quincy/edge
cinder:
charm: ch:cinder
@ -98,7 +98,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '11'
channel: latest/edge
channel: zed/edge
cinder-backup:
charm: ../../cinder-backup.charm
@ -109,7 +109,7 @@ applications:
charm: ch:cinder-ceph
options:
ceph-osd-replication-count: 3
channel: latest/edge
channel: zed/edge
glance:
charm: ch:glance
@ -118,7 +118,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '12'
channel: latest/edge
channel: zed/edge
nova-compute:
charm: ch:nova-compute
@ -127,7 +127,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '13'
channel: latest/edge
channel: zed/edge
relations:

View File

@ -28,13 +28,13 @@ applications:
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
cinder-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
glance-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
@ -45,7 +45,7 @@ applications:
- '0'
- '1'
- '2'
channel: latest/edge
channel: 8.0/edge
keystone:
charm: ch:keystone
@ -54,14 +54,14 @@ applications:
openstack-origin: *openstack-origin
to:
- '3'
channel: latest/edge
channel: zed/edge
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
to:
- '4'
channel: latest/edge
channel: 3.9/edge
ceph-mon:
charm: ch:ceph-mon
@ -73,7 +73,7 @@ applications:
- '5'
- '6'
- '7'
channel: latest/edge
channel: quincy/edge
ceph-osd:
charm: ch:ceph-osd
@ -87,7 +87,7 @@ applications:
- '8'
- '9'
- '10'
channel: latest/edge
channel: quincy/edge
cinder:
charm: ch:cinder
@ -98,7 +98,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '11'
channel: latest/edge
channel: zed/edge
cinder-backup:
charm: ../../cinder-backup.charm
@ -109,7 +109,7 @@ applications:
charm: ch:cinder-ceph
options:
ceph-osd-replication-count: 3
channel: latest/edge
channel: zed/edge
glance:
charm: ch:glance
@ -118,7 +118,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '12'
channel: latest/edge
channel: zed/edge
nova-compute:
charm: ch:nova-compute
@ -127,7 +127,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '13'
channel: latest/edge
channel: zed/edge
relations:

View File

@ -28,13 +28,13 @@ applications:
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
cinder-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
glance-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
@ -45,7 +45,7 @@ applications:
- '0'
- '1'
- '2'
channel: latest/edge
channel: 8.0/edge
keystone:
charm: ch:keystone
@ -54,14 +54,14 @@ applications:
openstack-origin: *openstack-origin
to:
- '3'
channel: latest/edge
channel: zed/edge
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
to:
- '4'
channel: latest/edge
channel: 3.9/edge
ceph-mon:
charm: ch:ceph-mon
@ -73,7 +73,7 @@ applications:
- '5'
- '6'
- '7'
channel: latest/edge
channel: quincy/edge
ceph-osd:
charm: ch:ceph-osd
@ -87,7 +87,7 @@ applications:
- '8'
- '9'
- '10'
channel: latest/edge
channel: quincy/edge
cinder:
charm: ch:cinder
@ -98,7 +98,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '11'
channel: latest/edge
channel: zed/edge
cinder-backup:
charm: ../../cinder-backup.charm
@ -109,7 +109,7 @@ applications:
charm: ch:cinder-ceph
options:
ceph-osd-replication-count: 3
channel: latest/edge
channel: zed/edge
glance:
charm: ch:glance
@ -118,7 +118,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '12'
channel: latest/edge
channel: zed/edge
nova-compute:
charm: ch:nova-compute
@ -127,7 +127,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '13'
channel: latest/edge
channel: zed/edge
relations: