Updates for testing period for 20.01 release

Includes updates to charmhelpers/charms.openstack for cert_utils
and unit-get for the install hook error on Juju 2.9

* charm-helpers sync for classic charms
* rebuild for reactive charms
* ensure tox.ini is from release-tools
* ensure requirements.txt files are from release-tools
* On reactive charms:
  - ensure master branch for charms.openstack
  - ensure master branch for charm-helpers
* include fix for local_address() with NoBindingError

Change-Id: I7d8b1ac654f1c7b394c9218c7dbbbe8e6980632d
This commit is contained in:
Alex Kavanagh 2021-01-12 15:24:13 +00:00
parent b77e1e1b1e
commit 033500842d
15 changed files with 468 additions and 69 deletions

View File

@ -139,10 +139,11 @@ define service {{
"""{description} """{description}
check_command check_nrpe!{command} check_command check_nrpe!{command}
servicegroups {nagios_servicegroup} servicegroups {nagios_servicegroup}
{service_config_overrides}
}} }}
""") """)
def __init__(self, shortname, description, check_cmd): def __init__(self, shortname, description, check_cmd, max_check_attempts=None):
super(Check, self).__init__() super(Check, self).__init__()
# XXX: could be better to calculate this from the service name # XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname): if not re.match(self.shortname_re, shortname):
@ -155,6 +156,7 @@ define service {{
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
self.description = description self.description = description
self.check_cmd = self._locate_cmd(check_cmd) self.check_cmd = self._locate_cmd(check_cmd)
self.max_check_attempts = max_check_attempts
def _get_check_filename(self): def _get_check_filename(self):
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
@ -216,12 +218,19 @@ define service {{
nagios_servicegroups): nagios_servicegroups):
self._remove_service_files() self._remove_service_files()
if self.max_check_attempts:
service_config_overrides = ' max_check_attempts {}'.format(
self.max_check_attempts
) # Note indentation is here rather than in the template to avoid trailing spaces
else:
service_config_overrides = '' # empty string to avoid printing 'None'
templ_vars = { templ_vars = {
'nagios_hostname': hostname, 'nagios_hostname': hostname,
'nagios_servicegroup': nagios_servicegroups, 'nagios_servicegroup': nagios_servicegroups,
'description': self.description, 'description': self.description,
'shortname': self.shortname, 'shortname': self.shortname,
'command': self.command, 'command': self.command,
'service_config_overrides': service_config_overrides,
} }
nrpe_service_text = Check.service_template.format(**templ_vars) nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = self._get_service_filename(hostname) nrpe_service_file = self._get_service_filename(hostname)
@ -327,6 +336,11 @@ class NRPE(object):
nrpe_monitors[nrpecheck.shortname] = { nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command, "command": nrpecheck.command,
} }
# If we were passed max_check_attempts, add that to the relation data
try:
nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts
except AttributeError:
pass
# update-status hooks are configured to firing every 5 minutes by # update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server # default. When nagios-nrpe-server is restarted, the nagios server

View File

@ -34,6 +34,10 @@ from charmhelpers.core.hookenv import (
INFO, INFO,
) )
# This file contains the CA cert from the charms ssl_ca configuration
# option, in future the file name should be updated reflect that.
CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert'
def get_cert(cn=None): def get_cert(cn=None):
# TODO: deal with multiple https endpoints via charm config # TODO: deal with multiple https endpoints via charm config
@ -83,4 +87,4 @@ def retrieve_ca_cert(cert_file):
def install_ca_cert(ca_cert): def install_ca_cert(ca_cert):
host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE)

View File

@ -98,3 +98,8 @@ class DisabledModuleAudit(BaseAudit):
def _restart_apache(): def _restart_apache():
"""Restarts the apache process""" """Restarts the apache process"""
subprocess.check_output(['service', 'apache2', 'restart']) subprocess.check_output(['service', 'apache2', 'restart'])
@staticmethod
def is_ssl_enabled():
"""Check if SSL module is enabled or not"""
return 'ssl' in DisabledModuleAudit._get_loaded_modules()

View File

@ -396,7 +396,8 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
if global_addrs: if global_addrs:
# Make sure any found global addresses are not temporary # Make sure any found global addresses are not temporary
cmd = ['ip', 'addr', 'show', iface] cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd).decode('UTF-8') out = subprocess.check_output(
cmd).decode('UTF-8', errors='replace')
if dynamic_only: if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
else: else:

View File

@ -16,6 +16,7 @@
import os import os
import json import json
from base64 import b64decode
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_hostname, get_hostname,
@ -28,26 +29,32 @@ from charmhelpers.core.hookenv import (
related_units, related_units,
relation_get, relation_get,
relation_ids, relation_ids,
unit_get, remote_service_name,
NoNetworkBinding, NoNetworkBinding,
log, log,
WARNING, WARNING,
INFO,
) )
from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.openstack.ip import (
ADMIN,
resolve_address, resolve_address,
get_vip_in_network, get_vip_in_network,
INTERNAL, ADDRESS_MAP,
PUBLIC, get_default_api_bindings,
ADDRESS_MAP) local_address,
)
from charmhelpers.contrib.network.ip import (
get_relation_ip,
)
from charmhelpers.core.host import ( from charmhelpers.core.host import (
CA_CERT_DIR,
install_ca_cert,
mkdir, mkdir,
write_file, write_file,
) )
from charmhelpers.contrib.hahelpers.apache import ( from charmhelpers.contrib.hahelpers.apache import (
install_ca_cert CONFIG_CA_CERT_FILE,
) )
@ -74,7 +81,7 @@ class CertRequest(object):
def add_hostname_cn(self): def add_hostname_cn(self):
"""Add a request for the hostname of the machine""" """Add a request for the hostname of the machine"""
ip = unit_get('private-address') ip = local_address(unit_get_fallback='private-address')
addresses = [ip] addresses = [ip]
# If a vip is being used without os-hostname config or # If a vip is being used without os-hostname config or
# network spaces then we need to ensure the local units # network spaces then we need to ensure the local units
@ -113,63 +120,153 @@ class CertRequest(object):
return req return req
def get_certificate_request(json_encode=True): def get_certificate_request(json_encode=True, bindings=None):
"""Generate a certificatee requests based on the network confioguration """Generate a certificate requests based on the network configuration
:param json_encode: Encode request in JSON or not. Used for setting
directly on a relation.
:type json_encode: boolean
:param bindings: List of bindings to check in addition to default api
bindings.
:type bindings: list of strings
:returns: CertRequest request as dictionary or JSON string.
:rtype: Union[dict, json]
""" """
if bindings:
# Add default API bindings to bindings list
bindings = list(bindings + get_default_api_bindings())
else:
# Use default API bindings
bindings = get_default_api_bindings()
req = CertRequest(json_encode=json_encode) req = CertRequest(json_encode=json_encode)
req.add_hostname_cn() req.add_hostname_cn()
# Add os-hostname entries # Add os-hostname entries
for net_type in [INTERNAL, ADMIN, PUBLIC]: _sans = get_certificate_sans(bindings=bindings)
net_config = config(ADDRESS_MAP[net_type]['override'])
# Handle specific hostnames per binding
for binding in bindings:
try: try:
net_addr = resolve_address(endpoint_type=net_type) hostname_override = config(ADDRESS_MAP[binding]['override'])
ip = network_get_primary_address( except KeyError:
ADDRESS_MAP[net_type]['binding']) hostname_override = None
try:
try:
net_addr = resolve_address(endpoint_type=binding)
except KeyError:
net_addr = None
ip = network_get_primary_address(binding)
addresses = [net_addr, ip] addresses = [net_addr, ip]
vip = get_vip_in_network(resolve_network_cidr(ip)) vip = get_vip_in_network(resolve_network_cidr(ip))
if vip: if vip:
addresses.append(vip) addresses.append(vip)
if net_config:
# Clear any Nones or duplicates
addresses = list(set([i for i in addresses if i]))
# Add hostname certificate request
if hostname_override:
req.add_entry( req.add_entry(
net_type, binding,
net_config, hostname_override,
addresses) addresses)
else: # Remove hostname specific addresses from _sans
# There is network address with no corresponding hostname. for addr in addresses:
# Add the ip to the hostname cert to allow for this. try:
req.add_hostname_cn_ip(addresses) _sans.remove(addr)
except (ValueError, KeyError):
pass
except NoNetworkBinding: except NoNetworkBinding:
log("Skipping request for certificate for ip in {} space, no " log("Skipping request for certificate for ip in {} space, no "
"local address found".format(net_type), WARNING) "local address found".format(binding), WARNING)
# Gurantee all SANs are covered
# These are network addresses with no corresponding hostname.
# Add the ips to the hostname cert to allow for this.
req.add_hostname_cn_ip(_sans)
return req.get_request() return req.get_request()
def create_ip_cert_links(ssl_dir, custom_hostname_link=None): def get_certificate_sans(bindings=None):
"""Get all possible IP addresses for certificate SANs.
:param bindings: List of bindings to check in addition to default api
bindings.
:type bindings: list of strings
:returns: List of binding string names
:rtype: List[str]
"""
_sans = [local_address(unit_get_fallback='private-address')]
if bindings:
# Add default API bindings to bindings list
bindings = list(bindings + get_default_api_bindings())
else:
# Use default API bindings
bindings = get_default_api_bindings()
for binding in bindings:
# Check for config override
try:
net_config = config(ADDRESS_MAP[binding]['config'])
except KeyError:
# There is no configuration network for this binding name
net_config = None
# Using resolve_address is likely redundant. Keeping it here in
# case there is an edge case it handles.
try:
net_addr = resolve_address(endpoint_type=binding)
except KeyError:
net_addr = None
ip = get_relation_ip(binding, cidr_network=net_config)
_sans = _sans + [net_addr, ip]
vip = get_vip_in_network(resolve_network_cidr(ip))
if vip:
_sans.append(vip)
# Clear any Nones and duplicates
return list(set([i for i in _sans if i]))
def create_ip_cert_links(ssl_dir, custom_hostname_link=None, bindings=None):
"""Create symlinks for SAN records """Create symlinks for SAN records
:param ssl_dir: str Directory to create symlinks in :param ssl_dir: str Directory to create symlinks in
:param custom_hostname_link: str Additional link to be created :param custom_hostname_link: str Additional link to be created
:param bindings: List of bindings to check in addition to default api
bindings.
:type bindings: list of strings
""" """
hostname = get_hostname(unit_get('private-address'))
if bindings:
# Add default API bindings to bindings list
bindings = list(bindings + get_default_api_bindings())
else:
# Use default API bindings
bindings = get_default_api_bindings()
# This includes the hostname cert and any specific bindng certs:
# admin, internal, public
req = get_certificate_request(json_encode=False, bindings=bindings)["cert_requests"]
# Specific certs
for cert_req in req.keys():
requested_cert = os.path.join(
ssl_dir,
'cert_{}'.format(cert_req))
requested_key = os.path.join(
ssl_dir,
'key_{}'.format(cert_req))
for addr in req[cert_req]['sans']:
cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
key = os.path.join(ssl_dir, 'key_{}'.format(addr))
if os.path.isfile(requested_cert) and not os.path.isfile(cert):
os.symlink(requested_cert, cert)
os.symlink(requested_key, key)
# Handle custom hostnames
hostname = get_hostname(local_address(unit_get_fallback='private-address'))
hostname_cert = os.path.join( hostname_cert = os.path.join(
ssl_dir, ssl_dir,
'cert_{}'.format(hostname)) 'cert_{}'.format(hostname))
hostname_key = os.path.join( hostname_key = os.path.join(
ssl_dir, ssl_dir,
'key_{}'.format(hostname)) 'key_{}'.format(hostname))
# Add links to hostname cert, used if os-hostname vars not set
for net_type in [INTERNAL, ADMIN, PUBLIC]:
try:
addr = resolve_address(endpoint_type=net_type)
cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
key = os.path.join(ssl_dir, 'key_{}'.format(addr))
if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
os.symlink(hostname_cert, cert)
os.symlink(hostname_key, key)
except NoNetworkBinding:
log("Skipping creating cert symlink for ip in {} space, no "
"local address found".format(net_type), WARNING)
if custom_hostname_link: if custom_hostname_link:
custom_cert = os.path.join( custom_cert = os.path.join(
ssl_dir, ssl_dir,
@ -210,8 +307,35 @@ def install_certs(ssl_dir, certs, chain=None, user='root', group='root'):
content=bundle['key'], perms=0o640) content=bundle['key'], perms=0o640)
def _manage_ca_certs(ca, cert_relation_id):
"""Manage CA certs.
:param ca: CA Certificate from certificate relation.
:type ca: str
:param cert_relation_id: Relation id providing the certs
:type cert_relation_id: str
"""
config_ssl_ca = config('ssl_ca')
config_cert_file = '{}/{}.crt'.format(CA_CERT_DIR, CONFIG_CA_CERT_FILE)
if config_ssl_ca:
log("Installing CA certificate from charm ssl_ca config to {}".format(
config_cert_file), INFO)
install_ca_cert(
b64decode(config_ssl_ca).rstrip(),
name=CONFIG_CA_CERT_FILE)
elif os.path.exists(config_cert_file):
log("Removing CA certificate {}".format(config_cert_file), INFO)
os.remove(config_cert_file)
log("Installing CA certificate from certificate relation", INFO)
install_ca_cert(
ca.encode(),
name='{}_juju_ca_cert'.format(
remote_service_name(relid=cert_relation_id)))
def process_certificates(service_name, relation_id, unit, def process_certificates(service_name, relation_id, unit,
custom_hostname_link=None, user='root', group='root'): custom_hostname_link=None, user='root', group='root',
bindings=None):
"""Process the certificates supplied down the relation """Process the certificates supplied down the relation
:param service_name: str Name of service the certifcates are for. :param service_name: str Name of service the certifcates are for.
@ -222,9 +346,19 @@ def process_certificates(service_name, relation_id, unit,
:type user: str :type user: str
:param group: (Optional) Group of certificate files. Defaults to 'root' :param group: (Optional) Group of certificate files. Defaults to 'root'
:type group: str :type group: str
:param bindings: List of bindings to check in addition to default api
bindings.
:type bindings: list of strings
:returns: True if certificates processed for local unit or False :returns: True if certificates processed for local unit or False
:rtype: bool :rtype: bool
""" """
if bindings:
# Add default API bindings to bindings list
bindings = list(bindings + get_default_api_bindings())
else:
# Use default API bindings
bindings = get_default_api_bindings()
data = relation_get(rid=relation_id, unit=unit) data = relation_get(rid=relation_id, unit=unit)
ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
mkdir(path=ssl_dir) mkdir(path=ssl_dir)
@ -234,11 +368,12 @@ def process_certificates(service_name, relation_id, unit,
ca = data.get('ca') ca = data.get('ca')
if certs: if certs:
certs = json.loads(certs) certs = json.loads(certs)
install_ca_cert(ca.encode()) _manage_ca_certs(ca, relation_id)
install_certs(ssl_dir, certs, chain, user=user, group=group) install_certs(ssl_dir, certs, chain, user=user, group=group)
create_ip_cert_links( create_ip_cert_links(
ssl_dir, ssl_dir,
custom_hostname_link=custom_hostname_link) custom_hostname_link=custom_hostname_link,
bindings=bindings)
return True return True
return False return False

View File

@ -49,7 +49,6 @@ from charmhelpers.core.hookenv import (
relation_ids, relation_ids,
related_units, related_units,
relation_set, relation_set,
unit_get,
unit_private_ip, unit_private_ip,
charm_name, charm_name,
DEBUG, DEBUG,
@ -98,6 +97,7 @@ from charmhelpers.contrib.openstack.ip import (
ADMIN, ADMIN,
PUBLIC, PUBLIC,
ADDRESS_MAP, ADDRESS_MAP,
local_address,
) )
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_address_in_network, get_address_in_network,
@ -247,7 +247,7 @@ class SharedDBContext(OSContextGenerator):
hostname_key = "hostname" hostname_key = "hostname"
access_hostname = get_address_in_network( access_hostname = get_address_in_network(
access_network, access_network,
unit_get('private-address')) local_address(unit_get_fallback='private-address'))
set_hostname = relation_get(attribute=hostname_key, set_hostname = relation_get(attribute=hostname_key,
unit=local_unit()) unit=local_unit())
if set_hostname != access_hostname: if set_hostname != access_hostname:
@ -1088,7 +1088,7 @@ class ApacheSSLContext(OSContextGenerator):
# NOTE(jamespage): Fallback must always be private address # NOTE(jamespage): Fallback must always be private address
# as this is used to bind services on the # as this is used to bind services on the
# local unit. # local unit.
fallback = unit_get("private-address") fallback = local_address(unit_get_fallback="private-address")
if net_config: if net_config:
addr = get_address_in_network(net_config, addr = get_address_in_network(net_config,
fallback) fallback)
@ -1260,7 +1260,7 @@ class NeutronContext(OSContextGenerator):
if is_clustered(): if is_clustered():
host = config('vip') host = config('vip')
else: else:
host = unit_get('private-address') host = local_address(unit_get_fallback='private-address')
ctxt = {'network_manager': self.network_manager, ctxt = {'network_manager': self.network_manager,
'neutron_url': '%s://%s:%s' % (proto, host, '9696')} 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
@ -1534,8 +1534,23 @@ class SubordinateConfigContext(OSContextGenerator):
ctxt[k][section] = config_list ctxt[k][section] = config_list
else: else:
ctxt[k] = v ctxt[k] = v
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) if self.context_complete(ctxt):
return ctxt log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
return ctxt
else:
return {}
def context_complete(self, ctxt):
"""Overridden here to ensure the context is actually complete.
:param ctxt: The current context members
:type ctxt: Dict[str, ANY]
:returns: True if the context is complete
:rtype: bool
"""
if not ctxt.get('sections'):
return False
return super(SubordinateConfigContext, self).context_complete(ctxt)
class LogLevelContext(OSContextGenerator): class LogLevelContext(OSContextGenerator):
@ -3050,6 +3065,9 @@ class SRIOVContext(OSContextGenerator):
blanket = 'blanket' blanket = 'blanket'
explicit = 'explicit' explicit = 'explicit'
PCIDeviceNumVFs = collections.namedtuple(
'PCIDeviceNumVFs', ['device', 'numvfs'])
def _determine_numvfs(self, device, sriov_numvfs): def _determine_numvfs(self, device, sriov_numvfs):
"""Determine number of Virtual Functions (VFs) configured for device. """Determine number of Virtual Functions (VFs) configured for device.
@ -3165,14 +3183,15 @@ class SRIOVContext(OSContextGenerator):
'configuration.') 'configuration.')
self._map = { self._map = {
device.interface_name: self._determine_numvfs(device, sriov_numvfs) device.pci_address: self.PCIDeviceNumVFs(
device, self._determine_numvfs(device, sriov_numvfs))
for device in devices.pci_devices for device in devices.pci_devices
if device.sriov and if device.sriov and
self._determine_numvfs(device, sriov_numvfs) is not None self._determine_numvfs(device, sriov_numvfs) is not None
} }
def __call__(self): def __call__(self):
"""Provide SR-IOV context. """Provide backward compatible SR-IOV context.
:returns: Map interface name: min(configured, max) virtual functions. :returns: Map interface name: min(configured, max) virtual functions.
Example: Example:
@ -3183,6 +3202,23 @@ class SRIOVContext(OSContextGenerator):
} }
:rtype: Dict[str,int] :rtype: Dict[str,int]
""" """
return {
pcidnvfs.device.interface_name: pcidnvfs.numvfs
for _, pcidnvfs in self._map.items()
}
@property
def get_map(self):
"""Provide map of configured SR-IOV capable PCI devices.
:returns: Map PCI-address: (PCIDevice, min(configured, max) VFs.
Example:
{
'0000:81:00.0': self.PCIDeviceNumVFs(<PCIDevice object>, 32),
'0000:81:00.1': self.PCIDeviceNumVFs(<PCIDevice object>, 32),
}
:rtype: Dict[str, self.PCIDeviceNumVFs]
"""
return self._map return self._map

View File

@ -33,6 +33,7 @@ INTERNAL = 'int'
ADMIN = 'admin' ADMIN = 'admin'
ACCESS = 'access' ACCESS = 'access'
# TODO: reconcile 'int' vs 'internal' binding names
ADDRESS_MAP = { ADDRESS_MAP = {
PUBLIC: { PUBLIC: {
'binding': 'public', 'binding': 'public',
@ -58,6 +59,14 @@ ADDRESS_MAP = {
'fallback': 'private-address', 'fallback': 'private-address',
'override': 'os-access-hostname', 'override': 'os-access-hostname',
}, },
# Note (thedac) bridge to begin the reconciliation between 'int' vs
# 'internal' binding names
'internal': {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
} }
@ -114,6 +123,28 @@ def _get_address_override(endpoint_type=PUBLIC):
return addr_override.format(service_name=service_name()) return addr_override.format(service_name=service_name())
def local_address(unit_get_fallback='public-address'):
"""Return a network address for this unit.
Attempt to retrieve a 'default' IP address for this unit
from network-get. If this is running with an old version of Juju then
fallback to unit_get.
Note on juju < 2.9 the binding to juju-info may not exist, so fall back to
the unit-get.
:param unit_get_fallback: Either 'public-address' or 'private-address'.
Only used with old versions of Juju.
:type unit_get_fallback: str
:returns: IP Address
:rtype: str
"""
try:
return network_get_primary_address('juju-info')
except (NotImplementedError, NoNetworkBinding):
return unit_get(unit_get_fallback)
def resolve_address(endpoint_type=PUBLIC, override=True): def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config. """Return unit address depending on net config.
@ -167,7 +198,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
if config('prefer-ipv6'): if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0] fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else: else:
fallback_addr = unit_get(net_fallback) fallback_addr = local_address(unit_get_fallback=net_fallback)
if net_addr: if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr) resolved_address = get_address_in_network(net_addr, fallback_addr)
@ -195,3 +226,10 @@ def get_vip_in_network(network):
if is_address_in_network(network, vip): if is_address_in_network(network, vip):
matching_vip = vip matching_vip = vip
return matching_vip return matching_vip
def get_default_api_bindings():
_default_bindings = []
for binding in [INTERNAL, ADMIN, PUBLIC]:
_default_bindings.append(ADDRESS_MAP[binding]['binding'])
return _default_bindings

View File

@ -15,5 +15,6 @@ password = {{ admin_password }}
{% endif -%} {% endif -%}
{% if region -%} {% if region -%}
os_region_name = {{ region }} os_region_name = {{ region }}
region_name = {{ region }}
{% endif -%} {% endif -%}
randomize_allocation_candidates = true randomize_allocation_candidates = true

View File

@ -18,6 +18,7 @@ from functools import wraps
import subprocess import subprocess
import json import json
import operator
import os import os
import sys import sys
import re import re
@ -33,7 +34,7 @@ from charmhelpers import deprecate
from charmhelpers.contrib.network import ip from charmhelpers.contrib.network import ip
from charmhelpers.core import unitdata from charmhelpers.core import decorators, unitdata
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
WORKLOAD_STATES, WORKLOAD_STATES,
@ -89,13 +90,16 @@ from charmhelpers.core.host import (
service_start, service_start,
restart_on_change_helper, restart_on_change_helper,
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_cache, apt_cache,
apt_install,
import_key as fetch_import_key, import_key as fetch_import_key,
add_source as fetch_add_source, add_source as fetch_add_source,
SourceConfigError, SourceConfigError,
GPGKeyError, GPGKeyError,
get_upstream_version, get_upstream_version,
filter_installed_packages,
filter_missing_packages, filter_missing_packages,
ubuntu_apt_pkg as apt, ubuntu_apt_pkg as apt,
) )
@ -230,7 +234,7 @@ SWIFT_CODENAMES = OrderedDict([
('ussuri', ('ussuri',
['2.24.0', '2.25.0']), ['2.24.0', '2.25.0']),
('victoria', ('victoria',
['2.25.0']), ['2.25.0', '2.26.0']),
]) ])
# >= Liberty version->codename mapping # >= Liberty version->codename mapping
@ -479,9 +483,14 @@ def get_swift_codename(version):
return None return None
@deprecate("moved to charmhelpers.contrib.openstack.utils.get_installed_os_version()", "2021-01", log=juju_log)
def get_os_codename_package(package, fatal=True): def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.''' '''Derive OpenStack release codename from an installed package.'''
codename = get_installed_os_version()
if codename:
return codename
if snap_install_requested(): if snap_install_requested():
cmd = ['snap', 'list', package] cmd = ['snap', 'list', package]
try: try:
@ -569,6 +578,28 @@ def get_os_version_package(pkg, fatal=True):
# error_out(e) # error_out(e)
def get_installed_os_version():
apt_install(filter_installed_packages(['openstack-release']), fatal=False)
print("OpenStack Release: {}".format(openstack_release()))
return openstack_release().get('OPENSTACK_CODENAME')
@cached
def openstack_release():
"""Return /etc/os-release in a dict."""
d = {}
try:
with open('/etc/openstack-release', 'r') as lsb:
for l in lsb:
s = l.split('=')
if len(s) != 2:
continue
d[s[0].strip()] = s[1].strip()
except FileNotFoundError:
pass
return d
# Module local cache variable for the os_release. # Module local cache variable for the os_release.
_os_rel = None _os_rel = None
@ -1295,7 +1326,7 @@ def _check_listening_on_ports_list(ports):
Returns a list of ports being listened to and a list of the Returns a list of ports being listened to and a list of the
booleans. booleans.
@param ports: LIST or port numbers. @param ports: LIST of port numbers.
@returns [(port_num, boolean), ...], [boolean] @returns [(port_num, boolean), ...], [boolean]
""" """
ports_open = [port_has_listener('0.0.0.0', p) for p in ports] ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
@ -1564,6 +1595,21 @@ def manage_payload_services(action, services=None, charm_func=None):
return success, messages return success, messages
def make_wait_for_ports_barrier(ports, retry_count=5):
"""Make a function to wait for port shutdowns.
Create a function which closes over the provided ports. The function will
retry probing ports until they are closed or the retry count has been reached.
"""
@decorators.retry_on_predicate(retry_count, operator.not_, base_delay=0.1)
def retry_port_check():
_, ports_states = _check_listening_on_ports_list(ports)
juju_log("Probe ports {}, result: {}".format(ports, ports_states), level="DEBUG")
return any(ports_states)
return retry_port_check
def pause_unit(assess_status_func, services=None, ports=None, def pause_unit(assess_status_func, services=None, ports=None,
charm_func=None): charm_func=None):
"""Pause a unit by stopping the services and setting 'unit-paused' """Pause a unit by stopping the services and setting 'unit-paused'
@ -1599,6 +1645,7 @@ def pause_unit(assess_status_func, services=None, ports=None,
services=services, services=services,
charm_func=charm_func) charm_func=charm_func)
set_unit_paused() set_unit_paused()
if assess_status_func: if assess_status_func:
message = assess_status_func() message = assess_status_func()
if message: if message:

View File

@ -41,6 +41,7 @@ from subprocess import (
) )
from charmhelpers import deprecate from charmhelpers import deprecate
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
application_name,
config, config,
service_name, service_name,
local_unit, local_unit,
@ -162,6 +163,17 @@ def get_osd_settings(relation_name):
return _order_dict_by_key(osd_settings) return _order_dict_by_key(osd_settings)
def send_application_name(relid=None):
"""Send the application name down the relation.
:param relid: Relation id to set application name in.
:type relid: str
"""
relation_set(
relation_id=relid,
relation_settings={'application-name': application_name()})
def send_osd_settings(): def send_osd_settings():
"""Pass on requested OSD settings to osd units.""" """Pass on requested OSD settings to osd units."""
try: try:
@ -256,6 +268,7 @@ class BasePool(object):
'compression-max-blob-size': (int, None), 'compression-max-blob-size': (int, None),
'compression-max-blob-size-hdd': (int, None), 'compression-max-blob-size-hdd': (int, None),
'compression-max-blob-size-ssd': (int, None), 'compression-max-blob-size-ssd': (int, None),
'rbd-mirroring-mode': (str, ('image', 'pool'))
} }
def __init__(self, service, name=None, percent_data=None, app_name=None, def __init__(self, service, name=None, percent_data=None, app_name=None,
@ -1074,7 +1087,10 @@ def create_erasure_profile(service, profile_name,
erasure_plugin_technique=None): erasure_plugin_technique=None):
"""Create a new erasure code profile if one does not already exist for it. """Create a new erasure code profile if one does not already exist for it.
Updates the profile if it exists. Please refer to [0] for more details. Profiles are considered immutable so will not be updated if the named
profile already exists.
Please refer to [0] for more details.
0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ 0: http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@ -1110,6 +1126,11 @@ def create_erasure_profile(service, profile_name,
:type erasure_plugin_technique: str :type erasure_plugin_technique: str
:return: None. Can raise CalledProcessError, ValueError or AssertionError :return: None. Can raise CalledProcessError, ValueError or AssertionError
""" """
if erasure_profile_exists(service, profile_name):
log('EC profile {} exists, skipping update'.format(profile_name),
level=WARNING)
return
plugin_techniques = { plugin_techniques = {
'jerasure': [ 'jerasure': [
'reed_sol_van', 'reed_sol_van',
@ -1209,9 +1230,6 @@ def create_erasure_profile(service, profile_name,
if scalar_mds: if scalar_mds:
cmd.append('scalar-mds={}'.format(scalar_mds)) cmd.append('scalar-mds={}'.format(scalar_mds))
if erasure_profile_exists(service, profile_name):
cmd.append('--force')
check_call(cmd) check_call(cmd)
@ -1750,6 +1768,7 @@ class CephBrokerRq(object):
max_bytes=None, max_bytes=None,
max_objects=None, max_objects=None,
namespace=None, namespace=None,
rbd_mirroring_mode='pool',
weight=None): weight=None):
"""Build common part of a create pool operation. """Build common part of a create pool operation.
@ -1808,6 +1827,9 @@ class CephBrokerRq(object):
:type max_objects: Optional[int] :type max_objects: Optional[int]
:param namespace: Group namespace :param namespace: Group namespace
:type namespace: Optional[str] :type namespace: Optional[str]
:param rbd_mirroring_mode: Pool mirroring mode used when Ceph RBD
mirroring is enabled.
:type rbd_mirroring_mode: Optional[str]
:param weight: The percentage of data that is expected to be contained :param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs. in the pool from the total available space on the OSDs.
Used to calculate number of Placement Groups to create Used to calculate number of Placement Groups to create
@ -1832,6 +1854,7 @@ class CephBrokerRq(object):
'max-bytes': max_bytes, 'max-bytes': max_bytes,
'max-objects': max_objects, 'max-objects': max_objects,
'group-namespace': namespace, 'group-namespace': namespace,
'rbd-mirroring-mode': rbd_mirroring_mode,
'weight': weight, 'weight': weight,
} }
@ -2198,6 +2221,7 @@ def send_request_if_needed(request, relation='ceph'):
for rid in relation_ids(relation): for rid in relation_ids(relation):
log('Sending request {}'.format(request.request_id), level=DEBUG) log('Sending request {}'.format(request.request_id), level=DEBUG)
relation_set(relation_id=rid, broker_req=request.request) relation_set(relation_id=rid, broker_req=request.request)
relation_set(relation_id=rid, relation_settings={'unit-name': local_unit()})
def has_broker_rsp(rid=None, unit=None): def has_broker_rsp(rid=None, unit=None):

View File

@ -53,3 +53,41 @@ def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
return _retry_on_exception_inner_2 return _retry_on_exception_inner_2
return _retry_on_exception_inner_1 return _retry_on_exception_inner_1
def retry_on_predicate(num_retries, predicate_fun, base_delay=0):
"""Retry based on return value
The return value of the decorated function is passed to the given predicate_fun. If the
result of the predicate is False, retry the decorated function up to num_retries times
An exponential backoff up to base_delay^num_retries seconds can be introduced by setting
base_delay to a nonzero value. The default is to run with a zero (i.e. no) delay
:param num_retries: Max. number of retries to perform
:type num_retries: int
:param predicate_fun: Predicate function to determine if a retry is necessary
:type predicate_fun: callable
:param base_delay: Starting value in seconds for exponential delay, defaults to 0 (no delay)
:type base_delay: float
"""
def _retry_on_pred_inner_1(f):
def _retry_on_pred_inner_2(*args, **kwargs):
retries = num_retries
multiplier = 1
delay = base_delay
while True:
result = f(*args, **kwargs)
if predicate_fun(result) or retries <= 0:
return result
delay *= multiplier
multiplier += 1
log("Result {}, retrying '{}' {} more times (delay={})".format(
result, f.__name__, retries, delay), level=INFO)
retries -= 1
if delay:
time.sleep(delay)
return _retry_on_pred_inner_2
return _retry_on_pred_inner_1

View File

@ -19,6 +19,7 @@
# Nick Moffitt <nick.moffitt@canonical.com> # Nick Moffitt <nick.moffitt@canonical.com>
# Matthew Wedgwood <matthew.wedgwood@canonical.com> # Matthew Wedgwood <matthew.wedgwood@canonical.com>
import errno
import os import os
import re import re
import pwd import pwd
@ -59,6 +60,7 @@ elif __platform__ == "centos":
) # flake8: noqa -- ignore F401 for this import ) # flake8: noqa -- ignore F401 for this import
UPDATEDB_PATH = '/etc/updatedb.conf' UPDATEDB_PATH = '/etc/updatedb.conf'
CA_CERT_DIR = '/usr/local/share/ca-certificates'
def service_start(service_name, **kwargs): def service_start(service_name, **kwargs):
@ -677,7 +679,7 @@ def check_hash(path, checksum, hash_type='md5'):
:param str checksum: Value of the checksum used to validate the file. :param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`. :param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`, Can be any hash algorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc. such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum :raises ChecksumError: If the file fails the checksum
@ -825,7 +827,8 @@ def list_nics(nic_type=None):
if nic_type: if nic_type:
for int_type in int_types: for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8') ip_output = subprocess.check_output(
cmd).decode('UTF-8', errors='replace')
ip_output = ip_output.split('\n') ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line) ip_output = (line for line in ip_output if line)
for line in ip_output: for line in ip_output:
@ -841,7 +844,8 @@ def list_nics(nic_type=None):
interfaces.append(iface) interfaces.append(iface)
else: else:
cmd = ['ip', 'a'] cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = subprocess.check_output(
cmd).decode('UTF-8', errors='replace').split('\n')
ip_output = (line.strip() for line in ip_output if line) ip_output = (line.strip() for line in ip_output if line)
key = re.compile(r'^[0-9]+:\s+(.+):') key = re.compile(r'^[0-9]+:\s+(.+):')
@ -865,7 +869,8 @@ def set_nic_mtu(nic, mtu):
def get_nic_mtu(nic): def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface.""" """Return the Maximum Transmission Unit (MTU) for a network interface."""
cmd = ['ip', 'addr', 'show', nic] cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = subprocess.check_output(
cmd).decode('UTF-8', errors='replace').split('\n')
mtu = "" mtu = ""
for line in ip_output: for line in ip_output:
words = line.split() words = line.split()
@ -877,7 +882,7 @@ def get_nic_mtu(nic):
def get_nic_hwaddr(nic): def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface.""" """Return the Media Access Control (MAC) for a network interface."""
cmd = ['ip', '-o', '-0', 'addr', 'show', nic] cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8') ip_output = subprocess.check_output(cmd).decode('UTF-8', errors='replace')
hwaddr = "" hwaddr = ""
words = ip_output.split() words = ip_output.split()
if 'link/ether' in words: if 'link/ether' in words:
@ -889,7 +894,7 @@ def get_nic_hwaddr(nic):
def chdir(directory): def chdir(directory):
"""Change the current working directory to a different directory for a code """Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to block and return the previous directory after the block exits. Useful to
run commands from a specificed directory. run commands from a specified directory.
:param str directory: The directory path to change to for this context. :param str directory: The directory path to change to for this context.
""" """
@ -924,9 +929,13 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False):
for root, dirs, files in os.walk(path, followlinks=follow_links): for root, dirs, files in os.walk(path, followlinks=follow_links):
for name in dirs + files: for name in dirs + files:
full = os.path.join(root, name) full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full) try:
if not broken_symlink:
chown(full, uid, gid) chown(full, uid, gid)
except (IOError, OSError) as e:
# Intended to ignore "file not found". Catching both to be
# compatible with both Python 2.7 and 3.x.
if e.errno == errno.ENOENT:
pass
def lchownr(path, owner, group): def lchownr(path, owner, group):
@ -1074,7 +1083,7 @@ def install_ca_cert(ca_cert, name=None):
ca_cert = ca_cert.encode('utf8') ca_cert = ca_cert.encode('utf8')
if not name: if not name:
name = 'juju-{}'.format(charm_name()) name = 'juju-{}'.format(charm_name())
cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) cert_file = '{}/{}.crt'.format(CA_CERT_DIR, name)
new_hash = hashlib.md5(ca_cert).hexdigest() new_hash = hashlib.md5(ca_cert).hexdigest()
if file_hash(cert_file) == new_hash: if file_hash(cert_file) == new_hash:
return return

View File

@ -646,7 +646,7 @@ def _add_apt_repository(spec):
# passed as environment variables (See lp:1433761). This is not the case # passed as environment variables (See lp:1433761). This is not the case
# LTS and non-LTS releases below bionic. # LTS and non-LTS releases below bionic.
_run_with_retries(['add-apt-repository', '--yes', spec], _run_with_retries(['add-apt-repository', '--yes', spec],
cmd_env=env_proxy_settings(['https'])) cmd_env=env_proxy_settings(['https', 'http']))
def _add_cloud_pocket(pocket): def _add_cloud_pocket(pocket):

View File

@ -129,7 +129,7 @@ class Cache(object):
else: else:
data = line.split(None, 4) data = line.split(None, 4)
status = data.pop(0) status = data.pop(0)
if status != 'ii': if status not in ('ii', 'hi'):
continue continue
pkg = {} pkg = {}
pkg.update({k.lower(): v for k, v in zip(headings, data)}) pkg.update({k.lower(): v for k, v in zip(headings, data)})
@ -265,3 +265,48 @@ def version_compare(a, b):
raise RuntimeError('Unable to compare "{}" and "{}", according to ' raise RuntimeError('Unable to compare "{}" and "{}", according to '
'our logic they are neither greater, equal nor ' 'our logic they are neither greater, equal nor '
'less than each other.') 'less than each other.')
class PkgVersion():
"""Allow package versions to be compared.
For example::
>>> import charmhelpers.fetch as fetch
>>> (fetch.apt_pkg.PkgVersion('2:20.4.0') <
... fetch.apt_pkg.PkgVersion('2:20.5.0'))
True
>>> pkgs = [fetch.apt_pkg.PkgVersion('2:20.4.0'),
... fetch.apt_pkg.PkgVersion('2:21.4.0'),
... fetch.apt_pkg.PkgVersion('2:17.4.0')]
>>> pkgs.sort()
>>> pkgs
[2:17.4.0, 2:20.4.0, 2:21.4.0]
"""
def __init__(self, version):
self.version = version
def __lt__(self, other):
return version_compare(self.version, other.version) == -1
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return version_compare(self.version, other.version) == 1
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __eq__(self, other):
return version_compare(self.version, other.version) == 0
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.version
def __hash__(self):
return hash(repr(self))

View File

@ -37,6 +37,8 @@ importlib-resources<3.0.0; python_version < '3.6'
# dropped support for python 3.5: # dropped support for python 3.5:
osprofiler<2.7.0;python_version<'3.6' osprofiler<2.7.0;python_version<'3.6'
stevedore<1.31.0;python_version<'3.6' stevedore<1.31.0;python_version<'3.6'
debtcollector<1.22.0;python_version<'3.6'
oslo.utils<=3.41.0;python_version<'3.6'
coverage>=4.5.2 coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)