Add kinetic support and sync charm-helpers

Add 22.10 run-on base and add kinetic to metadata.yaml.
Sync charm-helpers to pick up kinetic support.

Change-Id: I26aaceb01f16ddb31123a37454a9cf0d61ed384c
This commit is contained in:
Corey Bryant 2023-01-25 11:43:42 -05:00
parent 0ef5451292
commit f5aa13ee02
22 changed files with 410 additions and 106 deletions

View File

@ -34,3 +34,6 @@ bases:
- name: ubuntu
channel: "22.04"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "22.10"
architectures: [amd64, s390x, ppc64el, arm64]

View File

@ -19,6 +19,7 @@
import glob
import grp
import json
import os
import pwd
import re
@ -30,6 +31,7 @@ import yaml
from charmhelpers.core.hookenv import (
application_name,
config,
ERROR,
hook_name,
local_unit,
log,
@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
# check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
# just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
for rid in relation_ids("ha"):
ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
if ha_resources:
try:
ha_resources_parsed = json.loads(ha_resources)
except ValueError as e:
log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
raise
if "lsb:haproxy" in ha_resources_parsed.values():
if "haproxy" in services:
log("removed check_haproxy. This service will be monitored by check_crm")
services.remove("haproxy")
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:

View File

@ -324,7 +324,7 @@ def valid_hacluster_config():
'''
vip = config_get('vip')
dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)):
if not (bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability')
status_set('blocked', msg)

View File

@ -467,7 +467,7 @@ def ns_query(address):
try:
answers = dns.resolver.query(address, rtype)
except dns.resolver.NXDOMAIN:
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
return None
if answers:
@ -539,7 +539,7 @@ def port_has_listener(address, port):
"""
cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd)
return not(bool(result))
return not (bool(result))
def assert_charm_supports_ipv6():

View File

@ -25,6 +25,7 @@ import socket
import time
from base64 import b64decode
from distutils.version import LooseVersion
from subprocess import (
check_call,
check_output,
@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
from charmhelpers.fetch import (
apt_install,
filter_installed_packages,
get_installed_version,
)
from charmhelpers.core.hookenv import (
NoNetworkBinding,
@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import (
network_get_primary_address,
WARNING,
service_name,
remote_service_name,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -118,12 +121,7 @@ from charmhelpers.contrib.openstack.utils import (
)
from charmhelpers.core.unitdata import kv
try:
from sriov_netplan_shim import pci
except ImportError:
# The use of the function and contexts that require the pci module is
# optional.
pass
from charmhelpers.contrib.hardware import pci
try:
import psutil
@ -135,6 +133,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
HAPROXY_RUN_DIR = '/var/run/haproxy/'
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404
def ensure_packages(packages):
@ -350,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir):
class IdentityServiceContext(OSContextGenerator):
_forward_compat_remaps = {
'admin_user': 'admin-user-name',
'service_username': 'service-user-name',
'service_tenant': 'service-project-name',
'service_tenant_id': 'service-project-id',
'service_domain': 'service-domain-name',
}
def __init__(self,
service=None,
service_user=None,
@ -402,11 +409,16 @@ class IdentityServiceContext(OSContextGenerator):
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
# see keystonemiddleware upstream sources for more info
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
if 'public_auth_url' in ctxt:
c.update((
('www_authenticate_uri', '{}/v3'.format(
ctxt.get('public_auth_url'))),))
else:
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
else:
c.update((
('auth_uri', "{}://{}:{}/v3".format(
@ -414,11 +426,17 @@ class IdentityServiceContext(OSContextGenerator):
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
if 'internal_auth_url' in ctxt:
c.update((
('auth_url', ctxt.get('internal_auth_url')),))
else:
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),))
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),
('project_domain_name', ctxt.get('admin_domain_name', '')),
('user_domain_name', ctxt.get('admin_domain_name', '')),
('project_name', ctxt.get('admin_tenant_name', '')),
@ -426,6 +444,9 @@ class IdentityServiceContext(OSContextGenerator):
('password', ctxt.get('admin_password', '')),
('signing_dir', ctxt.get('signing_dir', '')),))
if ctxt.get('service_type'):
c.update((('service_type', ctxt.get('service_type')),))
return c
def __call__(self):
@ -443,36 +464,86 @@ class IdentityServiceContext(OSContextGenerator):
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = {}
# NOTE(jamespage):
# forwards compat with application data
# bag driven approach to relation.
_adata = relation_get(rid=rid, app=remote_service_name(rid))
adata = {}
# if no app data bag presented - fallback
# to legacy unit based relation data
rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host')
if _adata:
# New app data bag uses - instead of _
# in key names - remap for compat with
# existing relation data keys
for key, value in _adata.items():
if key == 'api-version':
adata[key.replace('-', '_')] = value.strip('v')
else:
adata[key.replace('-', '_')] = value
# Re-map some keys for backwards compatibility
for target, source in self._forward_compat_remaps.items():
adata[target] = _adata.get(source)
# Now preferentially get data from the app data bag, but if
# it's not available, get it from the legacy based relation
# data.
def _resolve(key):
return adata.get(key) or rdata.get(key)
serv_host = _resolve('service_host')
serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host')
auth_host = _resolve('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
int_host = rdata.get('internal_host')
int_host = _resolve('internal_host',)
int_host = format_ipv6_addr(int_host) or int_host
svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
int_protocol = rdata.get('internal_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'),
svc_protocol = _resolve('service_protocol') or 'http'
auth_protocol = _resolve('auth_protocol') or 'http'
admin_role = _resolve('admin_role') or 'Admin'
int_protocol = _resolve('internal_protocol') or 'http'
api_version = _resolve('api_version') or '2.0'
ctxt.update({'service_port': _resolve('service_port'),
'service_host': serv_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'auth_port': _resolve('auth_port'),
'internal_host': int_host,
'internal_port': rdata.get('internal_port'),
'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'internal_port': _resolve('internal_port'),
'admin_tenant_name': _resolve('service_tenant'),
'admin_user': _resolve('service_username'),
'admin_password': _resolve('service_password'),
'admin_role': admin_role,
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol,
'internal_protocol': int_protocol,
'api_version': api_version})
service_type = _resolve('service_type')
if service_type:
ctxt['service_type'] = service_type
if float(api_version) > 2:
ctxt.update({
'admin_domain_name': rdata.get('service_domain'),
'service_project_id': rdata.get('service_tenant_id'),
'service_domain_id': rdata.get('service_domain_id')})
'admin_domain_name': _resolve('service_domain'),
'service_project_id': _resolve('service_tenant_id'),
'service_domain_id': _resolve('service_domain_id')})
# NOTE:
# keystone-k8s operator presents full URLS
# for all three endpoints - public and internal are
# externally addressable for machine based charm
public_auth_url = _resolve('public_auth_url')
# if 'public_auth_url' in rdata:
if public_auth_url:
ctxt.update({
'public_auth_url': public_auth_url,
})
internal_auth_url = _resolve('internal_auth_url')
# if 'internal_auth_url' in rdata:
if internal_auth_url:
ctxt.update({
'internal_auth_url': internal_auth_url,
})
# we keep all veriables in ctxt for compatibility and
# add nested dictionary for keystone_authtoken generic
@ -486,8 +557,8 @@ class IdentityServiceContext(OSContextGenerator):
# NOTE(jamespage) this is required for >= icehouse
# so a missing value just indicates keystone needs
# upgrading
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
ctxt['admin_domain_id'] = rdata.get('service_domain_id')
ctxt['admin_tenant_id'] = _resolve('service_tenant_id')
ctxt['admin_domain_id'] = _resolve('service_domain_id')
return ctxt
return {}
@ -539,6 +610,9 @@ class IdentityCredentialsContext(IdentityServiceContext):
'api_version': api_version
})
if rdata.get('service_type'):
ctxt['service_type'] = rdata.get('service_type')
if float(api_version) > 2:
ctxt.update({'admin_domain_name':
rdata.get('domain')})
@ -856,9 +930,14 @@ class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
def __init__(self, singlenode_mode=False,
address_types=ADDRESS_TYPES):
address_types=None,
exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT):
if address_types is None:
address_types = ADDRESS_TYPES[:]
self.address_types = address_types
self.singlenode_mode = singlenode_mode
self.exporter_stats_port = exporter_stats_port
def __call__(self):
if not os.path.isdir(HAPROXY_RUN_DIR):
@ -953,10 +1032,20 @@ class HAProxyContext(OSContextGenerator):
db = kv()
ctxt['stat_password'] = db.get('stat-password')
if not ctxt['stat_password']:
ctxt['stat_password'] = db.set('stat-password',
pwgen(32))
ctxt['stat_password'] = db.set('stat-password', pwgen(32))
db.flush()
# NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0
# New bind will be created and a prometheus-exporter
# will be used for path /metrics. At the same time,
# prometheus-exporter avoids using auth.
haproxy_version = get_installed_version("haproxy")
if (haproxy_version and
haproxy_version.ver_str >= LooseVersion("2.0.0") and
is_relation_made("haproxy-exporter")):
ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter")
ctxt["stats_exporter_port"] = self.exporter_stats_port
for frontend in cluster_hosts:
if (len(cluster_hosts[frontend]['backends']) > 1 or
self.singlenode_mode):
@ -2556,14 +2645,18 @@ class OVSDPDKDeviceContext(OSContextGenerator):
:rtype: List[int]
"""
cores = []
ranges = cpulist.split(',')
for cpu_range in ranges:
if "-" in cpu_range:
cpu_min_max = cpu_range.split('-')
cores += range(int(cpu_min_max[0]),
int(cpu_min_max[1]) + 1)
else:
cores.append(int(cpu_range))
if cpulist and re.match(r"^[0-9,\-^]*$", cpulist):
ranges = cpulist.split(',')
for cpu_range in ranges:
if "-" in cpu_range:
cpu_min_max = cpu_range.split('-')
cores += range(int(cpu_min_max[0]),
int(cpu_min_max[1]) + 1)
elif "^" in cpu_range:
cpu_rm = cpu_range.split('^')
cores.remove(int(cpu_rm[1]))
else:
cores.append(int(cpu_range))
return cores
def _numa_node_cores(self):
@ -2582,36 +2675,32 @@ class OVSDPDKDeviceContext(OSContextGenerator):
def cpu_mask(self):
"""Get hex formatted CPU mask
The mask is based on using the first config:dpdk-socket-cores
cores of each NUMA node in the unit.
:returns: hex formatted CPU mask
:rtype: str
"""
return self.cpu_masks()['dpdk_lcore_mask']
def cpu_masks(self):
"""Get hex formatted CPU masks
The mask is based on using the first config:dpdk-socket-cores
cores of each NUMA node in the unit, followed by the
next config:pmd-socket-cores
:returns: Dict of hex formatted CPU masks
:rtype: Dict[str, str]
"""
num_lcores = config('dpdk-socket-cores')
pmd_cores = config('pmd-socket-cores')
lcore_mask = 0
pmd_mask = 0
num_cores = config('dpdk-socket-cores')
mask = 0
for cores in self._numa_node_cores().values():
for core in cores[:num_lcores]:
lcore_mask = lcore_mask | 1 << core
for core in cores[num_lcores:][:pmd_cores]:
pmd_mask = pmd_mask | 1 << core
return {
'pmd_cpu_mask': format(pmd_mask, '#04x'),
'dpdk_lcore_mask': format(lcore_mask, '#04x')}
for core in cores[:num_cores]:
mask = mask | 1 << core
return format(mask, '#04x')
@classmethod
def pmd_cpu_mask(cls):
"""Get hex formatted pmd CPU mask
The mask is based on config:pmd-cpu-set.
:returns: hex formatted CPU mask
:rtype: str
"""
mask = 0
cpu_list = cls._parse_cpu_list(config('pmd-cpu-set'))
if cpu_list:
for core in cpu_list:
mask = mask | 1 << core
return format(mask, '#x')
def socket_memory(self):
"""Formatted list of socket memory configuration per socket.
@ -2690,6 +2779,7 @@ class OVSDPDKDeviceContext(OSContextGenerator):
ctxt['device_whitelist'] = self.device_whitelist()
ctxt['socket_memory'] = self.socket_memory()
ctxt['cpu_mask'] = self.cpu_mask()
ctxt['pmd_cpu_mask'] = self.pmd_cpu_mask()
return ctxt
@ -3120,7 +3210,7 @@ class SRIOVContext(OSContextGenerator):
"""Determine number of Virtual Functions (VFs) configured for device.
:param device: Object describing a PCI Network interface card (NIC)/
:type device: sriov_netplan_shim.pci.PCINetDevice
:type device: contrib.hardware.pci.PCINetDevice
:param sriov_numvfs: Number of VFs requested for blanket configuration.
:type sriov_numvfs: int
:returns: Number of VFs to configure for device

View File

@ -25,6 +25,7 @@ Helpers for high availability.
import hashlib
import json
import os
import re
@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
config,
status_set,
DEBUG,
application_name,
)
from charmhelpers.core.host import (
@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
VIP_GROUP_NAME = 'grp_{service}_vips'
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
class DNSHAException(Exception):
@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
relation_data['groups'] = {
key: ' '.join(vip_group)
}
def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
"""Load grafana dashboard json model and insert prometheus datasource.
:param prometheus_app_name: name of the 'prometheus' application that will
be used as datasource in grafana dashboard
:type prometheus_app_name: str
:param haproxy_dashboard: path to haproxy dashboard
:type haproxy_dashboard: str
:return: Grafana dashboard json model as a str.
:rtype: str
"""
from charmhelpers.contrib.templating import jinja
dashboard_template = os.path.basename(haproxy_dashboard)
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
app_name = application_name()
datasource = "{} - Juju generated source".format(prometheus_app_name)
return jinja.render(dashboard_template,
{"datasource": datasource,
"app_name": app_name,
"prometheus_app_name": prometheus_app_name},
template_dir=dashboard_template_dir,
jinja_env_args={"variable_start_string": "<< ",
"variable_end_string": " >>"})

View File

@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
get_iface_for_address
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
return unit_get(unit_get_fallback)
def get_invalid_vips():
"""Check if any of the provided vips are invalid.
A vip is invalid if it doesn't belong to the subnet in any interface.
If all vips are valid, this returns an empty list.
:returns: A list of strings, where each string is an invalid vip address.
:rtype: list
"""
clustered = is_clustered()
vips = config('vip')
if vips:
vips = vips.split()
invalid_vips = []
if clustered and vips:
for vip in vips:
iface_for_vip = get_iface_for_address(vip)
if iface_for_vip is None:
invalid_vips.append(vip)
return invalid_vips
def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config.

View File

@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None):
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list)
return known_hosts_list
def ssh_authorized_keys_lines(application_name, user=None):
@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None):
for authkey_line in keys:
if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list)
return authorized_keys_list
def ssh_compute_remove(public_key, application_name, user=None):

View File

@ -49,6 +49,11 @@ defaults
listen stats
bind {{ local_host }}:{{ stat_port }}
{%- if stats_exporter_host and stats_exporter_port %}
bind {{ stats_exporter_host }}:{{ stats_exporter_port }}
option http-use-htx
http-request use-service prometheus-exporter if { path /metrics }
{%- endif %}
mode http
stats enable
stats hide-version

View File

@ -9,4 +9,9 @@ project_name = {{ admin_tenant_name }}
username = {{ admin_user }}
password = {{ admin_password }}
signing_dir = {{ signing_dir }}
{% if service_type -%}
service_type = {{ service_type }}
{% endif -%}
service_token_roles = {{ admin_role }}
service_token_roles_required = True
{% endif -%}

View File

@ -6,6 +6,9 @@ auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3
project_domain_name = {{ admin_domain_name }}
user_domain_name = {{ admin_domain_name }}
{% if service_type -%}
service_type = {{ service_type }}
{% endif -%}
{% else -%}
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}

View File

@ -158,6 +158,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2021.1', 'wallaby'),
('2021.2', 'xena'),
('2022.1', 'yoga'),
('2022.2', 'zed'),
])
# The ugly duckling - must list releases oldest to newest
@ -400,13 +401,16 @@ def get_os_codename_version(vers):
error_out(e)
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES,
raise_exception=False):
'''Determine OpenStack version number from codename.'''
for k, v in version_map.items():
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
'codename: %s' % codename
if raise_exception:
raise ValueError(str(e))
error_out(e)
@ -1323,7 +1327,7 @@ def _check_listening_on_services_ports(services, test=False):
@param test: default=False, if False, test for closed, otherwise open.
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
"""
test = not(not(test)) # ensure test is True or False
test = not (not (test)) # ensure test is True or False
all_ports = list(itertools.chain(*services.values()))
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
map_ports = OrderedDict()
@ -1579,7 +1583,7 @@ def is_unit_paused_set():
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused')))
return not (not (kv.get('unit-paused')))
except Exception:
return False
@ -2177,7 +2181,7 @@ def is_unit_upgrading_set():
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-upgrading')))
return not (not (kv.get('unit-upgrading')))
except Exception:
return False

View File

@ -173,7 +173,12 @@ def retrieve_secret_id(url, token):
# hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate
if not isinstance(client.adapter, hvac.adapters.Request):
client.adapter = hvac.adapters.Request(base_uri=url, token=token)
response = client._post('/v1/sys/wrapping/unwrap')
try:
# hvac == 1.0.0 has an API to unwrap with the user token
response = client.sys.unwrap()
except AttributeError:
# fallback to hvac < 1.0.0
response = client._post('/v1/sys/wrapping/unwrap')
if response.status_code == 200:
data = response.json()
return data['data']['secret_id']

View File

@ -614,7 +614,8 @@ class Pool(BasePool):
class ReplicatedPool(BasePool):
def __init__(self, service, name=None, pg_num=None, replicas=None,
percent_data=None, app_name=None, op=None):
percent_data=None, app_name=None, op=None,
profile_name='replicated_rule'):
"""Initialize ReplicatedPool object.
Pool information is either initialized from individual keyword
@ -631,6 +632,8 @@ class ReplicatedPool(BasePool):
to this replicated pool.
:type replicas: int
:raises: KeyError
:param profile_name: Crush Profile to use
:type profile_name: Optional[str]
"""
# NOTE: Do not perform initialization steps that require live data from
# a running cluster here. The *Pool classes may be used for validation.
@ -645,11 +648,20 @@ class ReplicatedPool(BasePool):
# we will fail with KeyError if it is not provided.
self.replicas = op['replicas']
self.pg_num = op.get('pg_num')
self.profile_name = op.get('crush-profile') or profile_name
else:
self.replicas = replicas or 2
self.pg_num = pg_num
self.profile_name = profile_name or 'replicated_rule'
def _create(self):
# Validate if crush profile exists
if self.profile_name is None:
msg = ("Failed to discover crush profile named "
"{}".format(self.profile_name))
log(msg, level=ERROR)
raise PoolCreationError(msg)
# Do extra validation on pg_num with data from live cluster
if self.pg_num:
# Since the number of placement groups were specified, ensure
@ -667,12 +679,12 @@ class ReplicatedPool(BasePool):
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
),
self.name, str(self.pg_num)
self.name, str(self.pg_num), self.profile_name
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)
self.name, str(self.pg_num), self.profile_name
]
check_call(cmd)
@ -691,7 +703,7 @@ class ErasurePool(BasePool):
def __init__(self, service, name=None, erasure_code_profile=None,
percent_data=None, app_name=None, op=None,
allow_ec_overwrites=False):
"""Initialize ReplicatedPool object.
"""Initialize ErasurePool object.
Pool information is either initialized from individual keyword
arguments or from a individual CephBrokerRq operation Dict.
@ -777,6 +789,9 @@ def enabled_manager_modules():
:rtype: List[str]
"""
cmd = ['ceph', 'mgr', 'module', 'ls']
quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0
if quincy_or_later:
cmd.append('--format=json')
try:
modules = check_output(cmd).decode('utf-8')
except CalledProcessError as e:
@ -1842,7 +1857,7 @@ class CephBrokerRq(object):
}
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
**kwargs):
crush_profile=None, **kwargs):
"""Adds an operation to create a replicated pool.
Refer to docstring for ``_partial_build_common_op_create`` for
@ -1856,6 +1871,10 @@ class CephBrokerRq(object):
for pool.
:type pg_num: int
:raises: AssertionError if provided data is of invalid type/range
:param crush_profile: Name of crush profile to use. If not set the
ceph-mon unit handling the broker request will
set its default value.
:type crush_profile: Optional[str]
"""
if pg_num and kwargs.get('weight'):
raise ValueError('pg_num and weight are mutually exclusive')
@ -1865,6 +1884,7 @@ class CephBrokerRq(object):
'name': name,
'replicas': replica_count,
'pg_num': pg_num,
'crush-profile': crush_profile
}
op.update(self._partial_build_common_op_create(**kwargs))

View File

@ -23,6 +23,12 @@ from subprocess import (
call
)
from charmhelpers.core.hookenv import (
log,
WARNING,
INFO
)
def _luks_uuid(dev):
"""
@ -110,7 +116,7 @@ def is_device_mounted(device):
return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False, inode_size=1024):
def mkfs_xfs(device, force=False, inode_size=None):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
:ptype device: tr
:param force: Force operation
:ptype: force: boolean
:param inode_size: XFS inode size in bytes
:param inode_size: XFS inode size in bytes; if set to 0 or None,
the value used will be the XFS system default
:ptype inode_size: int"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', "size={}".format(inode_size), device]
if inode_size:
if inode_size >= 256 and inode_size <= 2048:
cmd += ['-i', "size={}".format(inode_size)]
else:
log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
else:
log("Using XFS filesystem with system default inode size.", level=INFO)
cmd += [device]
check_call(cmd)

View File

@ -114,6 +114,33 @@ def service_stop(service_name, **kwargs):
return service('stop', service_name, **kwargs)
def service_enable(service_name, **kwargs):
"""Enable a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be restarted. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_enable('ceph-osd', id=4)
:param service_name: the name of the service to enable
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
return service('enable', service_name, **kwargs)
def service_restart(service_name, **kwargs):
"""Restart a system service.
@ -134,7 +161,7 @@ def service_restart(service_name, **kwargs):
:param service_name: the name of the service to restart
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
@ -250,7 +277,7 @@ def service_resume(service_name, init_dir="/etc/init",
return started
def service(action, service_name, **kwargs):
def service(action, service_name=None, **kwargs):
"""Control a system service.
:param action: the action to take on the service
@ -259,7 +286,9 @@ def service(action, service_name, **kwargs):
the form of key=value.
"""
if init_is_systemd(service_name=service_name):
cmd = ['systemctl', action, service_name]
cmd = ['systemctl', action]
if service_name is not None:
cmd.append(service_name)
else:
cmd = ['service', service_name, action]
for key, value in kwargs.items():
@ -925,7 +954,7 @@ def pwgen(length=None):
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
return ''.join(random_chars)
def is_phy_iface(interface):

View File

@ -30,6 +30,7 @@ UBUNTU_RELEASES = (
'hirsute',
'impish',
'jammy',
'kinetic',
)

View File

@ -15,7 +15,8 @@
import os
import json
import inspect
from collections import Iterable, OrderedDict
from collections import OrderedDict
from collections.abc import Iterable
from charmhelpers.core import host
from charmhelpers.core import hookenv

View File

@ -171,8 +171,9 @@ class Storage(object):
path parameter which causes sqlite3 to only build the db in memory.
This should only be used for testing purposes.
"""
def __init__(self, path=None):
def __init__(self, path=None, keep_revisions=False):
self.db_path = path
self.keep_revisions = keep_revisions
if path is None:
if 'UNIT_STATE_DB' in os.environ:
self.db_path = os.environ['UNIT_STATE_DB']
@ -242,7 +243,7 @@ class Storage(object):
Remove a key from the database entirely.
"""
self.cursor.execute('delete from kv where key=?', [key])
if self.revision and self.cursor.rowcount:
if self.keep_revisions and self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)',
[key, self.revision, json.dumps('DELETED')])
@ -259,14 +260,14 @@ class Storage(object):
if keys is not None:
keys = ['%s%s' % (prefix, key) for key in keys]
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
if self.revision and self.cursor.rowcount:
if self.keep_revisions and self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
else:
self.cursor.execute('delete from kv where key like ?',
['%s%%' % prefix])
if self.revision and self.cursor.rowcount:
if self.keep_revisions and self.revision and self.cursor.rowcount:
self.cursor.execute(
'insert into kv_revisions values (?, ?, ?)',
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
@ -299,7 +300,7 @@ class Storage(object):
where key = ?''', [serialized, key])
# Save
if not self.revision:
if (not self.keep_revisions) or (not self.revision):
return value
self.cursor.execute(

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import hashlib
import re
@ -24,11 +25,15 @@ from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.hookenv import (
env_proxy_settings,
)
from charmhelpers.core.host import mkdir, check_hash
from urllib.request import (
build_opener, install_opener, urlopen, urlretrieve,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
ProxyHandler
)
from urllib.parse import urlparse, urlunparse, parse_qs
from urllib.error import URLError
@ -50,6 +55,20 @@ def splitpasswd(user):
return user, None
@contextlib.contextmanager
def proxy_env():
"""
Creates a context which temporarily modifies the proxy settings in os.environ.
"""
restore = {**os.environ} # Copy the current os.environ
juju_proxies = env_proxy_settings() or {}
os.environ.update(**juju_proxies) # Insert or Update the os.environ
yield os.environ
for key in juju_proxies:
del os.environ[key] # remove any keys which were added or updated
os.environ.update(**restore) # restore any original values
class ArchiveUrlFetchHandler(BaseFetchHandler):
"""
Handler to download archive files from arbitrary URLs.
@ -80,6 +99,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
# propagate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse(source)
handlers = []
if proto in ('http', 'https'):
auth, barehost = splituser(netloc)
if auth is not None:
@ -89,10 +109,13 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
# Realm is set to None in add_password to force the username and password
# to be used whatever the realm
passman.add_password(None, source, username, password)
authhandler = HTTPBasicAuthHandler(passman)
opener = build_opener(authhandler)
install_opener(opener)
response = urlopen(source)
handlers.append(HTTPBasicAuthHandler(passman))
with proxy_env():
handlers.append(ProxyHandler())
opener = build_opener(*handlers)
install_opener(opener)
response = urlopen(source)
try:
with open(dest, 'wb') as dest_file:
dest_file.write(response.read())

View File

@ -222,6 +222,18 @@ CLOUD_ARCHIVE_POCKETS = {
'yoga/proposed': 'focal-proposed/yoga',
'focal-yoga/proposed': 'focal-proposed/yoga',
'focal-proposed/yoga': 'focal-proposed/yoga',
# Zed
'zed': 'jammy-updates/zed',
'jammy-zed': 'jammy-updates/zed',
'jammy-zed/updates': 'jammy-updates/zed',
'jammy-updates/zed': 'jammy-updates/zed',
'zed/proposed': 'jammy-proposed/zed',
'jammy-zed/proposed': 'jammy-proposed/zed',
'jammy-proposed/zed': 'jammy-proposed/zed',
# OVN
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03',
}
@ -248,6 +260,7 @@ OPENSTACK_RELEASES = (
'wallaby',
'xena',
'yoga',
'zed',
)
@ -274,6 +287,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('hirsute', 'wallaby'),
('impish', 'xena'),
('jammy', 'yoga'),
('kinetic', 'zed'),
])
@ -353,6 +367,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
:type quiet: bool
:raises: subprocess.CalledProcessError
"""
if not packages:
log("Nothing to install", level=DEBUG)
return
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
@ -677,6 +694,7 @@ def add_source(source, key=None, fail_invalid=False):
(r"^cloud-archive:(.*)$", _add_apt_repository),
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
(r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
@ -740,6 +758,11 @@ def _add_apt_repository(spec):
)
def __write_sources_list_d_actual_pocket(file, actual_pocket):
with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
def _add_cloud_pocket(pocket):
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
@ -759,8 +782,9 @@ def _add_cloud_pocket(pocket):
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
__write_sources_list_d_actual_pocket(
'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'),
actual_pocket)
def _add_cloud_staging(cloud_archive_release, openstack_release):
@ -921,10 +945,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
try:
result = subprocess.check_call(cmd, env=env, **kwargs)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > max_retries:
raise
result = e.returncode
if result not in retry_results:
# a non-retriable exitcode was produced
raise
retry_count += 1
if retry_count > max_retries:
# a retriable exitcode was produced more than {max_retries} times
raise
log(retry_message)
time.sleep(CMD_RETRY_DELAY)

View File

@ -12,6 +12,7 @@ tags:
series:
- focal
- jammy
- kinetic
provides:
amqp:
interface: rabbitmq