diff --git a/.gitignore b/.gitignore index c94de0a..b4ede3f 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ bin *.pyc func-results.json .stestr +*.charm diff --git a/.zuul.yaml b/.zuul.yaml index b3037e9..7ffc71c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,5 +1,4 @@ - project: templates: - - python35-charm-jobs - - openstack-python3-ussuri-jobs + - openstack-python3-charm-yoga-jobs - openstack-cover-jobs diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index fb8058e..bbf973d 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -6,6 +6,7 @@ include: - osplatform - contrib.openstack|inc=* - contrib.storage + - contrib.hardware.pci - contrib.hahelpers - contrib.network.ip - contrib.python diff --git a/charmcraft.yaml b/charmcraft.yaml index 1c09031..b1e7f9f 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -19,7 +19,15 @@ parts: - README.md bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/cinder-backup.charm b/cinder-backup.charm deleted file mode 100644 index 80e2b6a..0000000 Binary files a/cinder-backup.charm and /dev/null differ diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py index 1f57ed2..ddf3045 100644 --- a/hooks/charmhelpers/__init__.py +++ b/hooks/charmhelpers/__init__.py @@ -14,30 +14,15 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - import functools import inspect import subprocess -import sys -try: - import six # NOQA:F401 -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # NOQA:F401 try: import yaml # NOQA:F401 except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # NOQA:F401 diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index f0b629a..146beba 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -32,8 +32,6 @@ import time from socket import gethostname as get_unit_hostname -import six - from charmhelpers.core.hookenv import ( log, relation_ids, @@ -125,16 +123,16 @@ def is_crm_dc(): """ cmd = ['crm', 'status'] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + # Current DC: juju-lytrusty-machine-2 (168108163) + # - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True @@ -158,9 +156,8 @@ def is_crm_leader(resource, retry=False): return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError: status = None diff --git a/hooks/charmhelpers/contrib/hardware/__init__.py b/hooks/charmhelpers/contrib/hardware/__init__.py new file mode 100644 index 0000000..474a8f3 --- /dev/null +++ b/hooks/charmhelpers/contrib/hardware/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hooks/charmhelpers/contrib/hardware/pci.py b/hooks/charmhelpers/contrib/hardware/pci.py new file mode 100644 index 0000000..f6b1789 --- /dev/null +++ b/hooks/charmhelpers/contrib/hardware/pci.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +# +# Copyright 2016-2022 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import itertools +import logging +import os +import re +import shlex +import subprocess +import typing + + +def format_pci_addr(pci_addr: str) -> str: + """Format a PCI address with 0 fill for parts + + :param: pci_addr: unformatted PCI address + :type: str + :returns: formatted PCI address + :rtype: str + """ + domain, bus, slot_func = pci_addr.split(":") + slot, func = slot_func.split(".") + return "{}:{}:{}.{}".format( + domain.zfill(4), bus.zfill(2), slot.zfill(2), func + ) + + +def get_sysnet_interfaces_and_macs() -> list: + """Catalog interface information from local system + + each device dict contains: + + interface: logical name + mac_address: MAC address + pci_address: PCI address + state: Current interface state (up/down) + sriov: Boolean indicating whether interface is an SR-IOV + capable device. + sriov_totalvfs: Total VF capacity of device + sriov_numvfs: Configured VF capacity of device + + :returns: array of dict objects containing details of each interface + :rtype: list + """ + net_devs = [] + for sdir in itertools.chain( + glob.glob("/sys/bus/pci/devices/*/net/../"), + glob.glob("/sys/bus/pci/devices/*/virtio*/net/../")): + fq_path = os.path.realpath(sdir) + path = fq_path.split("/") + if "virtio" in path[-1]: + pci_address = path[-2] + else: + pci_address = path[-1] + ifname = get_sysnet_interface(sdir) + if not ifname: + logging.warn("Unable to determine interface name for PCI " + "device {}".format(pci_address)) + continue + device = { + "interface": ifname, + "mac_address": get_sysnet_mac(sdir, ifname), + "pci_address": pci_address, + "state": get_sysnet_device_state(sdir, ifname), + "sriov": is_sriov(sdir), + } + if device["sriov"]: + device["sriov_totalvfs"] = get_sriov_totalvfs(sdir) + device["sriov_numvfs"] = get_sriov_numvfs(sdir) + net_devs.append(device) + + return net_devs + + +def get_sysnet_mac(sysdir: str, ifname: str) -> str: + """Determine MAC address for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: MAC address of device + :rtype: str + """ + mac_addr_file = os.path.join(sysdir, "net", ifname, "address") + with open(mac_addr_file, "r") as f: + read_data = f.read() + return read_data.strip() + + +def get_sysnet_device_state(sysdir: str, ifname: str) -> str: + """Read operational state of a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: current device state + :rtype: str + """ + state_file = os.path.join(sysdir, "net", ifname, "operstate") + with open(state_file, "r") as f: + read_data = f.read() + return read_data.strip() + + +def is_sriov(sysdir: str) -> bool: + """Determine whether a device is SR-IOV capable + + :param: sysdir: path to device /sys directory + :type: str + :returns: whether device is SR-IOV capable or not + :rtype: bool + """ + return os.path.exists(os.path.join(sysdir, "sriov_totalvfs")) + + +def get_sriov_totalvfs(sysdir: str) -> int: + """Read total VF capacity for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: number of VF's the device supports + :rtype: int + """ + sriov_totalvfs_file = os.path.join(sysdir, "sriov_totalvfs") + with open(sriov_totalvfs_file, "r") as f: + read_data = f.read() + return int(read_data.strip()) + + +def get_sriov_numvfs(sysdir: str) -> int: + """Read configured VF capacity for a device + + :param: sysdir: path to device /sys directory + :type: str + :returns: number of VF's the device is configured with + :rtype: int + """ + sriov_numvfs_file = os.path.join(sysdir, "sriov_numvfs") + with open(sriov_numvfs_file, "r") as f: + read_data = f.read() + return int(read_data.strip()) + + +# https://github.com/libvirt/libvirt/commit/5b1c525b1f3608156884aed0dc5e925306c1e260 +PF_PHYS_PORT_NAME_REGEX = re.compile(r"(p[0-9]+$)|(p[0-9]+s[0-9]+$)", + re.IGNORECASE) + + +def _phys_port_name_is_pf(sysnetdir: str) -> typing.Optional[bool]: + try: + with open(os.path.join(sysnetdir, "phys_port_name"), "r") as fin: + return (PF_PHYS_PORT_NAME_REGEX.match(fin.read().strip()) + is not None) + except OSError: + return + + +def get_sysnet_interface(sysdir: str) -> typing.Optional[str]: + sysnetdir = os.path.join(sysdir, "net") + netdevs = os.listdir(sysnetdir) + # Return early in case the PCI device only has one netdev + if len(netdevs) == 1: + return netdevs[0] + + # When a PCI device has multiple netdevs we need to figure out which one + # represents the PF + for netdev in netdevs: + if _phys_port_name_is_pf(os.path.join(sysnetdir, netdev)): + return netdev + + +def get_pci_ethernet_addresses() -> list: + """Generate list of PCI addresses for all network adapters + + :returns: list of PCI addresses + :rtype: list + """ + cmd = ["lspci", "-m", "-D"] + lspci_output = subprocess.check_output(cmd).decode("UTF-8") + pci_addresses = [] + for line in lspci_output.split("\n"): + columns = shlex.split(line) + if len(columns) > 1 and columns[1] == "Ethernet controller": + pci_address = columns[0] + pci_addresses.append(format_pci_addr(pci_address)) + return pci_addresses + + +class PCINetDevice(object): + def __init__(self, pci_address): + self.pci_address = pci_address + self.interface_name = None + self.mac_address = None + self.state = None + self.sriov = False + self.sriov_totalvfs = None + self.sriov_numvfs = None + self.update_attributes() + + def update_attributes(self): + self.update_interface_info() + + def update_interface_info(self): + net_devices = get_sysnet_interfaces_and_macs() + for interface in net_devices: + if self.pci_address == interface["pci_address"]: + self.interface_name = interface["interface"] + self.mac_address = interface["mac_address"] + self.state = interface["state"] + self.sriov = interface["sriov"] + if self.sriov: + self.sriov_totalvfs = interface["sriov_totalvfs"] + self.sriov_numvfs = interface["sriov_numvfs"] + + def _set_sriov_numvfs(self, numvfs: int): + sdevice = os.path.join( + "/sys/bus/pci/devices", self.pci_address, "sriov_numvfs" + ) + with open(sdevice, "w") as sh: + sh.write(str(numvfs)) + self.update_attributes() + + def set_sriov_numvfs(self, numvfs: int) -> bool: + """Set the number of VF devices for a SR-IOV PF + + Assuming the device is an SR-IOV device, this function will attempt + to change the number of VF's created by the PF. + + @param numvfs: integer to set the current number of VF's to + @returns boolean indicating whether any changes where made + """ + if self.sriov and numvfs != self.sriov_numvfs: + # NOTE(fnordahl): run-time change of numvfs is disallowed + # without resetting to 0 first. + self._set_sriov_numvfs(0) + self._set_sriov_numvfs(numvfs) + return True + return False + + +class PCINetDevices(object): + def __init__(self): + self.pci_devices = [ + PCINetDevice(dev) for dev in get_pci_ethernet_addresses() + ] + + def update_devices(self): + for pcidev in self.pci_devices: + pcidev.update_attributes() + + def get_macs(self) -> list: + macs = [] + for pcidev in self.pci_devices: + if pcidev.mac_address: + macs.append(pcidev.mac_address) + return macs + + def get_device_from_mac(self, mac: str) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.mac_address == mac: + return pcidev + return None + + def get_device_from_pci_address(self, pci_addr: str) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.pci_address == pci_addr: + return pcidev + return None + + def get_device_from_interface_name( + self, interface_name: str + ) -> PCINetDevice: + for pcidev in self.pci_devices: + if pcidev.interface_name == interface_name: + return pcidev + return None diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index b356d64..de56584 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -15,7 +15,6 @@ import glob import re import subprocess -import six import socket from functools import partial @@ -39,20 +38,14 @@ try: import netifaces except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netifaces', fatal=True) - else: - apt_install('python3-netifaces', fatal=True) + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netaddr', fatal=True) - else: - apt_install('python3-netaddr', fatal=True) + apt_install('python3-netaddr', fatal=True) import netaddr @@ -462,15 +455,12 @@ def ns_query(address): try: import dns.resolver except ImportError: - if six.PY2: - apt_install('python-dnspython', fatal=True) - else: - apt_install('python3-dnspython', fatal=True) + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, six.string_types): + elif isinstance(address, str): rtype = 'A' else: return None @@ -513,10 +503,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - if six.PY2: - apt_install("python-dnspython", fatal=True) - else: - apt_install("python3-dnspython", fatal=True) + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 54081f0..32c69ff 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -30,8 +30,6 @@ from subprocess import ( check_output, CalledProcessError) -import six - import charmhelpers.contrib.storage.linux.ceph as ch_ceph from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( @@ -120,20 +118,12 @@ from charmhelpers.contrib.openstack.utils import ( ) from charmhelpers.core.unitdata import kv -try: - from sriov_netplan_shim import pci -except ImportError: - # The use of the function and contexts that require the pci module is - # optional. - pass +from charmhelpers.contrib.hardware import pci try: import psutil except ImportError: - if six.PY2: - apt_install('python-psutil', fatal=True) - else: - apt_install('python3-psutil', fatal=True) + apt_install('python3-psutil', fatal=True) import psutil CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -150,10 +140,7 @@ def ensure_packages(packages): def context_complete(ctxt): - _missing = [] - for k, v in six.iteritems(ctxt): - if v is None or v == '': - _missing.append(k) + _missing = [k for k, v in ctxt.items() if v is None or v == ''] if _missing: log('Missing required data: %s' % ' '.join(_missing), level=INFO) @@ -180,7 +167,7 @@ class OSContextGenerator(object): # Fresh start self.complete = False self.missing_data = [] - for k, v in six.iteritems(ctxt): + for k, v in ctxt.items(): if v is None or v == '': if k not in self.missing_data: self.missing_data.append(k) @@ -434,6 +421,9 @@ class IdentityServiceContext(OSContextGenerator): ('password', ctxt.get('admin_password', '')), ('signing_dir', ctxt.get('signing_dir', '')),)) + if ctxt.get('service_type'): + c.update((('service_type', ctxt.get('service_type')),)) + return c def __call__(self): @@ -476,6 +466,9 @@ class IdentityServiceContext(OSContextGenerator): 'internal_protocol': int_protocol, 'api_version': api_version}) + if rdata.get('service_type'): + ctxt['service_type'] = rdata.get('service_type') + if float(api_version) > 2: ctxt.update({ 'admin_domain_name': rdata.get('service_domain'), @@ -547,6 +540,9 @@ class IdentityCredentialsContext(IdentityServiceContext): 'api_version': api_version }) + if rdata.get('service_type'): + ctxt['service_type'] = rdata.get('service_type') + if float(api_version) > 2: ctxt.update({'admin_domain_name': rdata.get('domain')}) @@ -1111,10 +1107,14 @@ class ApacheSSLContext(OSContextGenerator): endpoint = resolve_address(net_type) addresses.append((addr, endpoint)) - return sorted(set(addresses)) + # Log the set of addresses to have a trail log and capture if tuples + # change over time in the same unit (LP: #1952414). + sorted_addresses = sorted(set(addresses)) + log('get_network_addresses: {}'.format(sorted_addresses)) + return sorted_addresses def __call__(self): - if isinstance(self.external_ports, six.string_types): + if isinstance(self.external_ports, str): self.external_ports = [self.external_ports] if not self.external_ports or not https(): @@ -1531,9 +1531,9 @@ class SubordinateConfigContext(OSContextGenerator): continue sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): + for k, v in sub_config.items(): if k == 'sections': - for section, config_list in six.iteritems(v): + for section, config_list in v.items(): log("adding section '%s'" % (section), level=DEBUG) if ctxt[k].get(section): @@ -1887,8 +1887,11 @@ class DataPortContext(NeutronPortContext): normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {normalized[port]: bridge for port, bridge in - six.iteritems(portmap) if port in normalized.keys()} + return { + normalized[port]: bridge + for port, bridge in portmap.items() + if port in normalized.keys() + } return None @@ -2291,15 +2294,10 @@ class HostInfoContext(OSContextGenerator): name = name or socket.gethostname() fqdn = '' - if six.PY2: - exc = socket.error - else: - exc = OSError - try: addrs = socket.getaddrinfo( name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) - except exc: + except OSError: pass else: for addr in addrs: @@ -2416,12 +2414,12 @@ class DHCPAgentContext(OSContextGenerator): existing_ovs_use_veth = None # If there is a dhcp_agent.ini file read the current setting if os.path.isfile(DHCP_AGENT_INI): - # config_ini does the right thing and returns None if the setting is - # commented. + # config_ini does the right thing and returns None if the setting + # is commented. existing_ovs_use_veth = ( config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) # Convert to Bool if necessary - if isinstance(existing_ovs_use_veth, six.string_types): + if isinstance(existing_ovs_use_veth, str): return bool_from_string(existing_ovs_use_veth) return existing_ovs_use_veth @@ -3126,7 +3124,7 @@ class SRIOVContext(OSContextGenerator): """Determine number of Virtual Functions (VFs) configured for device. :param device: Object describing a PCI Network interface card (NIC)/ - :type device: sriov_netplan_shim.pci.PCINetDevice + :type device: contrib.hardware.pci.PCINetDevice :param sriov_numvfs: Number of VFs requested for blanket configuration. :type sriov_numvfs: int :returns: Number of VFs to configure for device diff --git a/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py new file mode 100755 index 0000000..5f392b3 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/files/check_deferred_restarts.py @@ -0,0 +1,128 @@ +#!/usr/bin/python3 + +# Copyright 2014-2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Checks for services with deferred restarts. + +This Nagios check will parse /var/lib/policy-rd.d/ +to find any restarts that are currently deferred. +""" + +import argparse +import glob +import sys +import yaml + + +DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' + + +def get_deferred_events(): + """Return a list of deferred events dicts from policy-rc.d files. + + Events are read from DEFERRED_EVENTS_DIR and are of the form: + { + action: restart, + policy_requestor_name: rabbitmq-server, + policy_requestor_type: charm, + reason: 'Pkg update', + service: rabbitmq-server, + time: 1614328743 + } + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of deferred event dictionaries + :rtype: list + """ + deferred_events_files = glob.glob( + '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + deferred_events = [] + for event_file in deferred_events_files: + with open(event_file, 'r') as f: + event = yaml.safe_load(f) + deferred_events.append(event) + + return deferred_events + + +def get_deferred_restart_services(application=None): + """Returns a list of services with deferred restarts. + + :param str application: Name of the application that blocked the service restart. + If application is None, all services with deferred restarts + are returned. Services which are blocked by a non-charm + requestor are always returned. + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of services with deferred restarts belonging to application. + :rtype: list + """ + + deferred_restart_events = filter( + lambda e: e['action'] == 'restart', get_deferred_events()) + + deferred_restart_services = set() + for restart_event in deferred_restart_events: + if application: + if ( + restart_event['policy_requestor_type'] != 'charm' or + restart_event['policy_requestor_type'] == 'charm' and + restart_event['policy_requestor_name'] == application + ): + deferred_restart_services.add(restart_event['service']) + else: + deferred_restart_services.add(restart_event['service']) + + return list(deferred_restart_services) + + +def main(): + """Check for services with deferred restarts.""" + parser = argparse.ArgumentParser( + description='Check for services with deferred restarts') + parser.add_argument( + '--application', help='Check services belonging to this application only') + + args = parser.parse_args() + + services = set(get_deferred_restart_services(args.application)) + + if len(services) == 0: + print('OK: No deferred service restarts.') + sys.exit(0) + else: + print( + 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) + sys.exit(1) + + +if __name__ == '__main__': + try: + main() + except OSError as e: + print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) + sys.exit(1) + except yaml.YAMLError as e: + print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) + sys.exit(1) + except Exception as e: + print('CRITICAL: An unknown error occurred: {}'.format(str(e))) + sys.exit(1) diff --git a/hooks/charmhelpers/contrib/openstack/keystone.py b/hooks/charmhelpers/contrib/openstack/keystone.py index d7e02cc..5775aa4 100644 --- a/hooks/charmhelpers/contrib/openstack/keystone.py +++ b/hooks/charmhelpers/contrib/openstack/keystone.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # # Copyright 2017 Canonical Ltd # @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from charmhelpers.fetch import apt_install from charmhelpers.contrib.openstack.context import IdentityServiceContext from charmhelpers.core.hookenv import ( @@ -117,10 +115,7 @@ class KeystoneManager2(KeystoneManager): from keystoneclient.auth.identity import v2 from keystoneclient import session except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) + apt_install(["python3-keystoneclient"], fatal=True) from keystoneclient.v2_0 import client from keystoneclient.auth.identity import v2 @@ -151,10 +146,7 @@ class KeystoneManager3(KeystoneManager): from keystoneclient import session from keystoneclient.auth.identity import v3 except ImportError: - if six.PY2: - apt_install(["python-keystoneclient"], fatal=True) - else: - apt_install(["python3-keystoneclient"], fatal=True) + apt_install(["python3-keystoneclient"], fatal=True) from keystoneclient.v3 import client from keystoneclient.auth import token_endpoint diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index b41314c..4777246 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -14,7 +14,6 @@ # Various utilities for dealing with Neutron and the renaming from Quantum. -import six from subprocess import check_output from charmhelpers.core.hookenv import ( @@ -349,11 +348,4 @@ def parse_vlan_range_mappings(mappings): Returns dict of the form {provider: (start, end)}. """ _mappings = parse_mappings(mappings) - if not _mappings: - return {} - - mappings = {} - for p, r in six.iteritems(_mappings): - mappings[p] = tuple(r.split(':')) - - return mappings + return {p: tuple(r.split(':')) for p, r in _mappings.items()} diff --git a/hooks/charmhelpers/contrib/openstack/policyd.py b/hooks/charmhelpers/contrib/openstack/policyd.py index 6fa06f2..767943c 100644 --- a/hooks/charmhelpers/contrib/openstack/policyd.py +++ b/hooks/charmhelpers/contrib/openstack/policyd.py @@ -15,7 +15,6 @@ import collections import contextlib import os -import six import shutil import yaml import zipfile @@ -204,12 +203,6 @@ class BadPolicyYamlFile(Exception): return self.log_message -if six.PY2: - BadZipFile = zipfile.BadZipfile -else: - BadZipFile = zipfile.BadZipFile - - def is_policyd_override_valid_on_this_release(openstack_release): """Check that the charm is running on at least Ubuntu Xenial, and at least the queens release. @@ -487,10 +480,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): if blacklisted_keys_present: raise BadPolicyYamlFile("blacklisted keys {} present." .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, six.string_types) for k in keys): + if not all(isinstance(k, str) for k in keys): raise BadPolicyYamlFile("keys in yaml aren't all strings?") # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, six.string_types) for v in doc.values()): + if not all(isinstance(v, str) for v in doc.values()): raise BadPolicyYamlFile("values in yaml aren't all strings?") return doc @@ -530,8 +523,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir - for direntry in _scanner(path): + for direntry in os.scandir(path): # see if the path should be kept. if direntry.path in keep_paths: continue @@ -558,36 +550,6 @@ def maybe_create_directory_for(path, user, group): ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) -@contextlib.contextmanager -def _fallback_scandir(path): - """Fallback os.scandir implementation. - - provide a fallback implementation of os.scandir if this module ever gets - used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for - directory. - - :param path: the path to list the directories for - :type path: str - :returns: Generator that provides _FBDirectory objects - :rtype: ContextManager[_FBDirectory] - """ - for f in os.listdir(path): - yield _FBDirectory(f) - - -class _FBDirectory(object): - """Mock a scandir Directory object with enough to use in - clean_policyd_dir_for - """ - - def __init__(self, path): - self.path = path - - def is_dir(self): - return os.path.isdir(self.path) - - def path_for_policy_file(service, name): """Return the full path for a policy.d file that will be written to the service's policy.d directory. @@ -768,7 +730,7 @@ def process_policy_resource_file(resource_file, _group) # Every thing worked, so we mark up a success. completed = True - except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), level=POLICYD_LOG_LEVEL_DEFAULT) except IOError as e: diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 875e139..626ecba 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -82,7 +82,11 @@ backend {{ service }}_{{ frontend }} {% endif -%} {% endif -%} {% for unit, address in frontends[frontend]['backends'].items() -%} + {% if https -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check check-ssl verify none + {% else -%} server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endif -%} {% endfor %} {% endfor -%} {% endfor -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index 530719e..6ed869a 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -22,6 +22,8 @@ Listen {{ ext_port }} ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on RequestHeader set X-Forwarded-Proto "https" + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 {% endfor -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index 530719e..6ed869a 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -22,6 +22,8 @@ Listen {{ ext_port }} ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on RequestHeader set X-Forwarded-Proto "https" + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 {% endfor -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index 5dcebe7..c9b0152 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -9,4 +9,7 @@ project_name = {{ admin_tenant_name }} username = {{ admin_user }} password = {{ admin_password }} signing_dir = {{ signing_dir }} +{% if service_type -%} +service_type = {{ service_type }} +{% endif -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka index c281868..14c25b4 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka +++ b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -6,6 +6,9 @@ auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3 auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3 project_domain_name = {{ admin_domain_name }} user_domain_name = {{ admin_domain_name }} +{% if service_type -%} +service_type = {{ service_type }} +{% endif -%} {% else -%} auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf index b9ca396..6c4e37e 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -20,6 +20,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" @@ -46,6 +48,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ admin_script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" @@ -72,6 +76,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ public_script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf index b9ca396..6c4e37e 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -20,6 +20,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" @@ -46,6 +48,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ admin_script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" @@ -72,6 +76,8 @@ Listen {{ public_port }} WSGIScriptAlias / {{ public_script }} WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On + KeepAliveTimeout 75 + MaxKeepAliveRequests 1000 = 2.4> ErrorLogFormat "%{cu}t %M" diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 050f8af..3b7c6a9 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -14,8 +14,6 @@ import os -import six - from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, @@ -29,10 +27,7 @@ try: from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions @@ -62,7 +57,7 @@ def get_loader(templates_dir, os_release): order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in six.itervalues(OPENSTACK_CODENAMES)] + for rel in OPENSTACK_CODENAMES.values()] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, @@ -225,10 +220,7 @@ class OSConfigRenderer(object): # if this code is running, the object is created pre-install hook. # jinja2 shouldn't get touched until the module is reloaded on next # hook execution, with proper jinja2 bits successfully imported. - if six.PY2: - apt_install('python-jinja2') - else: - apt_install('python3-jinja2') + apt_install('python3-jinja2') def register(self, config_file, contexts, config_template=None): """ @@ -318,9 +310,7 @@ class OSConfigRenderer(object): log('Config not registered: %s' % config_file, level=ERROR) raise OSConfigException - _out = self.render(config_file) - if six.PY3: - _out = _out.encode('UTF-8') + _out = self.render(config_file).encode('UTF-8') with open(config_file, 'wb') as out: out.write(_out) @@ -331,7 +321,8 @@ class OSConfigRenderer(object): """ Write out all registered config files. """ - [self.write(k) for k in six.iterkeys(self.templates)] + for k in self.templates.keys(): + self.write(k) def set_release(self, openstack_release): """ @@ -347,8 +338,8 @@ class OSConfigRenderer(object): Returns a list of context interfaces that yield a complete context. ''' interfaces = [] - [interfaces.extend(i.complete_contexts()) - for i in six.itervalues(self.templates)] + for i in self.templates.values(): + interfaces.extend(i.complete_contexts()) return interfaces def get_incomplete_context_data(self, interfaces): @@ -360,7 +351,7 @@ class OSConfigRenderer(object): ''' incomplete_context_data = {} - for i in six.itervalues(self.templates): + for i in self.templates.values(): for context in i.contexts: for interface in interfaces: related = False diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 9cc96d6..c8747c1 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -25,7 +25,6 @@ import re import itertools import functools -import six import traceback import uuid import yaml @@ -362,6 +361,8 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel + if src in OPENSTACK_RELEASES: + return src if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -401,7 +402,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(version_map): + for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -411,7 +412,8 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): def get_os_version_codename_swift(codename): '''Determine OpenStack version number of swift from codename.''' - for k, v in six.iteritems(SWIFT_CODENAMES): + # for k, v in six.iteritems(SWIFT_CODENAMES): + for k, v in SWIFT_CODENAMES.items(): if k == codename: return v[-1] e = 'Could not derive swift version for '\ @@ -421,17 +423,17 @@ def get_os_version_codename_swift(codename): def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] if len(codenames) > 1: # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. for codename in reversed(codenames): releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in six.iteritems(releases) if codename in v] - ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) - if six.PY3: - ret = ret.decode('UTF-8') + release = [k for k, v in releases.items() if codename in v] + ret = (subprocess + .check_output(['apt-cache', 'policy', 'swift']) + .decode('UTF-8')) if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -441,7 +443,7 @@ def get_swift_codename(version): match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) - for codename, versions in six.iteritems(SWIFT_CODENAMES): + for codename, versions in SWIFT_CODENAMES.items(): for release_version in versions: if release_version.startswith(major_minor_version): return codename @@ -477,9 +479,7 @@ def get_os_codename_package(package, fatal=True): if snap_install_requested(): cmd = ['snap', 'list', package] try: - out = subprocess.check_output(cmd) - if six.PY3: - out = out.decode('UTF-8') + out = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return None lines = out.split('\n') @@ -549,16 +549,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES - for cname, version in six.iteritems(vers_map): + for cname, version in vers_map.items(): if cname == codename: return version[-1] else: vers_map = OPENSTACK_CODENAMES - for version, cname in six.iteritems(vers_map): + for version, cname in vers_map.items(): if cname == codename: return version - # e = "Could not determine OpenStack version for package: %s" % pkg - # error_out(e) def get_installed_os_version(): @@ -821,10 +819,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) with open(juju_rc_path, 'wt') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in six.iteritems(env_vars) if u != "script_path"] + rc_script.write("#!/bin/bash\n") + for u, p in env_vars.items(): + if u != "script_path": + rc_script.write('export %s=%s\n' % (u, p)) def openstack_upgrade_available(package): @@ -1039,7 +1037,7 @@ def _determine_os_workload_status( state, message, lambda: charm_func(configs)) if state is None: - state, message = _ows_check_services_running(services, ports) + state, message = ows_check_services_running(services, ports) if state is None: state = 'active' @@ -1213,7 +1211,12 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): return state, message +@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) def _ows_check_services_running(services, ports): + return ows_check_services_running(services, ports) + + +def ows_check_services_running(services, ports): """Check that the services that should be running are actually running and that any ports specified are being listened to. @@ -1413,45 +1416,75 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs, - force_upgrade=False): +def do_action_openstack_upgrade(package, upgrade_callback, configs): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets the corresponding action status as a result. - If the charm was installed from source we cannot upgrade it. For backwards compatibility a config flag (action-managed-upgrade) must be set for this code to run, otherwise a full service level upgrade will fire on config-changed. - @param package: package name for determining if upgrade available + @param package: package name for determining if openstack upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class - @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package) or force_upgrade: + if openstack_upgrade_available(package): if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') try: upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) + action_set({'outcome': 'success, upgrade completed'}) ret = True except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'outcome': 'upgrade failed, see traceback'}) action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' + action_fail('upgrade callback resulted in an ' 'unexpected error') else: action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + 'False, skipped upgrade'}) else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'no upgrade available'}) + + return ret + + +def do_action_package_upgrade(package, upgrade_callback, configs): + """Perform package upgrade within the current OpenStack release. + + Upgrades packages only if there is not an openstack upgrade available, + and sets the corresponding action status as a result. + + @param package: package name for determining if openstack upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if not openstack_upgrade_available(package): + juju_log('Upgrading packages') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('upgrade callback resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'upgrade skipped because an openstack upgrade ' + 'is available'}) return ret @@ -1849,21 +1882,20 @@ def pausable_restart_on_change(restart_map, stopstart=False, """ def wrap(f): - # py27 compatible nonlocal variable. When py3 only, replace with - # nonlocal keyword - __restart_map_cache = {'cache': None} + __restart_map_cache = None @functools.wraps(f) def wrapped_f(*args, **kwargs): + nonlocal __restart_map_cache if is_unit_paused_set(): return f(*args, **kwargs) - if __restart_map_cache['cache'] is None: - __restart_map_cache['cache'] = restart_map() \ + if __restart_map_cache is None: + __restart_map_cache = restart_map() \ if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( (lambda: f(*args, **kwargs)), - __restart_map_cache['cache'], + __restart_map_cache, stopstart, restart_functions, can_restart_now_f, @@ -1888,7 +1920,7 @@ def ordered(orderme): raise ValueError('argument must be a dict type') result = OrderedDict() - for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + for k, v in sorted(orderme.items(), key=lambda x: x[0]): if isinstance(v, dict): result[k] = ordered(v) else: diff --git a/hooks/charmhelpers/contrib/python.py b/hooks/charmhelpers/contrib/python.py index 84cba8c..fcded68 100644 --- a/hooks/charmhelpers/contrib/python.py +++ b/hooks/charmhelpers/contrib/python.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import - # deprecated aliases for backwards compatibility from charmhelpers.fetch.python import debug # noqa from charmhelpers.fetch.python import packages # noqa diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index c70aeb2..1b20b8f 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -23,7 +23,6 @@ import collections import errno import hashlib import math -import six import os import shutil @@ -218,7 +217,7 @@ def validator(value, valid_type, valid_range=None): "was given {} of type {}" .format(valid_range, type(valid_range))) # If we're dealing with strings - if isinstance(value, six.string_types): + if isinstance(value, str): assert value in valid_range, ( "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max @@ -434,9 +433,9 @@ class BasePool(object): :type mode: str """ # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) + validator(value=cache_pool, valid_type=str) validator( - value=mode, valid_type=six.string_types, + value=mode, valid_type=str, valid_range=["readonly", "writeback"]) check_call([ @@ -615,7 +614,8 @@ class Pool(BasePool): class ReplicatedPool(BasePool): def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None): + percent_data=None, app_name=None, op=None, + profile_name='replicated_rule'): """Initialize ReplicatedPool object. Pool information is either initialized from individual keyword @@ -632,6 +632,8 @@ class ReplicatedPool(BasePool): to this replicated pool. :type replicas: int :raises: KeyError + :param profile_name: Crush Profile to use + :type profile_name: Optional[str] """ # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. @@ -646,11 +648,20 @@ class ReplicatedPool(BasePool): # we will fail with KeyError if it is not provided. self.replicas = op['replicas'] self.pg_num = op.get('pg_num') + self.profile_name = op.get('crush-profile') or profile_name else: self.replicas = replicas or 2 self.pg_num = pg_num + self.profile_name = profile_name or 'replicated_rule' def _create(self): + # Validate if crush profile exists + if self.profile_name is None: + msg = ("Failed to discover crush profile named " + "{}".format(self.profile_name)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + # Do extra validation on pg_num with data from live cluster if self.pg_num: # Since the number of placement groups were specified, ensure @@ -668,12 +679,12 @@ class ReplicatedPool(BasePool): '--pg-num-min={}'.format( min(AUTOSCALER_DEFAULT_PGS, self.pg_num) ), - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] else: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] check_call(cmd) @@ -692,7 +703,7 @@ class ErasurePool(BasePool): def __init__(self, service, name=None, erasure_code_profile=None, percent_data=None, app_name=None, op=None, allow_ec_overwrites=False): - """Initialize ReplicatedPool object. + """Initialize ErasurePool object. Pool information is either initialized from individual keyword arguments or from a individual CephBrokerRq operation Dict. @@ -778,10 +789,11 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: - modules = check_output(cmd) - if six.PY3: - modules = modules.decode('UTF-8') + modules = check_output(cmd).decode('utf-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -814,10 +826,10 @@ def get_mon_map(service): ceph command fails. """ try: - mon_status = check_output(['ceph', '--id', service, - 'mon_status', '--format=json']) - if six.PY3: - mon_status = mon_status.decode('UTF-8') + octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 + mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' + mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, + '--format=json'])).decode('utf-8') try: return json.loads(mon_status) except ValueError as v: @@ -959,9 +971,7 @@ def get_erasure_profile(service, name): try: out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + name, '--format=json']).decode('utf-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -1164,8 +1174,7 @@ def create_erasure_profile(service, profile_name, 'nvme' ] - validator(erasure_plugin_name, six.string_types, - list(plugin_techniques.keys())) + validator(erasure_plugin_name, str, list(plugin_techniques.keys())) cmd = [ 'ceph', '--id', service, @@ -1176,7 +1185,7 @@ def create_erasure_profile(service, profile_name, ] if erasure_plugin_technique: - validator(erasure_plugin_technique, six.string_types, + validator(erasure_plugin_technique, str, plugin_techniques[erasure_plugin_name]) cmd.append('technique={}'.format(erasure_plugin_technique)) @@ -1189,7 +1198,7 @@ def create_erasure_profile(service, profile_name, failure_domain = 'rack' if failure_domain: - validator(failure_domain, six.string_types, failure_domains) + validator(failure_domain, str, failure_domains) # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain={}'.format(failure_domain)) @@ -1198,7 +1207,7 @@ def create_erasure_profile(service, profile_name, # device class new in luminous if luminous_or_later and device_class: - validator(device_class, six.string_types, device_classes) + validator(device_class, str, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', @@ -1213,7 +1222,7 @@ def create_erasure_profile(service, profile_name, raise ValueError("locality must be provided for lrc plugin") # LRC optional configuration if crush_locality: - validator(crush_locality, six.string_types, failure_domains) + validator(crush_locality, str, failure_domains) cmd.append('crush-locality={}'.format(crush_locality)) if erasure_plugin_name == 'shec': @@ -1241,8 +1250,8 @@ def rename_pool(service, old_name, new_name): :param new_name: Name to rename pool to. :type new_name: str """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) + validator(value=old_name, valid_type=str) + validator(value=new_name, valid_type=str) cmd = [ 'ceph', '--id', service, @@ -1260,7 +1269,7 @@ def erasure_profile_exists(service, name): :returns: True if it exists, False otherwise. :rtype: bool """ - validator(value=name, valid_type=six.string_types) + validator(value=name, valid_type=str) try: check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', @@ -1280,12 +1289,10 @@ def get_cache_mode(service, pool_name): :returns: Current cache mode. :rtype: Optional[int] """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) + validator(value=service, valid_type=str) + validator(value=pool_name, valid_type=str) out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + 'osd', 'dump', '--format=json']).decode('utf-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -1299,9 +1306,8 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output( + ['rados', '--id', service, 'lspools']).decode('utf-8') except CalledProcessError: return False @@ -1320,13 +1326,11 @@ def get_osds(service, device_class=None): out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', 'ls-osd', device_class, - '--format=json']) + '--format=json']).decode('utf-8') else: out = check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + '--format=json']).decode('utf-8') return json.loads(out) @@ -1343,9 +1347,7 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]) - if six.PY3: - out = out.decode('UTF-8') + service, '--pool', pool]).decode('utf-8') except CalledProcessError: return False @@ -1371,7 +1373,7 @@ def update_pool(client, pool, settings): :raises: CalledProcessError """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): + for k, v in settings.items(): check_call(cmd + [k, v]) @@ -1509,9 +1511,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output(['rbd', 'showmapped']).decode('utf-8') except CalledProcessError: return False @@ -1857,7 +1857,7 @@ class CephBrokerRq(object): } def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - **kwargs): + crush_profile=None, **kwargs): """Adds an operation to create a replicated pool. Refer to docstring for ``_partial_build_common_op_create`` for @@ -1871,6 +1871,10 @@ class CephBrokerRq(object): for pool. :type pg_num: int :raises: AssertionError if provided data is of invalid type/range + :param crush_profile: Name of crush profile to use. If not set the + ceph-mon unit handling the broker request will + set its default value. + :type crush_profile: Optional[str] """ if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') @@ -1880,6 +1884,7 @@ class CephBrokerRq(object): 'name': name, 'replicas': replica_count, 'pg_num': pg_num, + 'crush-profile': crush_profile } op.update(self._partial_build_common_op_create(**kwargs)) diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index 74bab40..04daea2 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -19,8 +19,6 @@ from subprocess import ( check_output, ) -import six - ################################################## # loopback device helpers. @@ -40,9 +38,7 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - output = check_output(cmd) - if six.PY3: - output = output.decode('utf-8') + output = check_output(cmd).decode('utf-8') devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] @@ -57,7 +53,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == file_path: return d @@ -71,7 +67,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == path: return d diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index e94247a..370c3e8 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -17,12 +17,11 @@ # Authors: # Charm Helpers Developers -from __future__ import print_function import copy from distutils.version import LooseVersion from enum import Enum from functools import wraps -from collections import namedtuple +from collections import namedtuple, UserDict import glob import os import json @@ -36,12 +35,6 @@ from subprocess import CalledProcessError from charmhelpers import deprecate -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -112,7 +105,7 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests @@ -132,7 +125,7 @@ def log(message, level=None): def function_log(message): """Write a function progress message""" command = ['function-log'] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing function-log should not cause failures in unit tests @@ -445,12 +438,6 @@ def config(scope=None): """ global _cache_config config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError try: if _cache_config is None: config_data = json.loads( @@ -459,7 +446,7 @@ def config(scope=None): if scope is not None: return _cache_config.get(scope) return _cache_config - except (exc_json, UnicodeDecodeError) as e: + except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: log('Unable to parse output from config-get: config_cmd_line="{}" ' 'message="{}"' .format(config_cmd_line, str(e)), level=ERROR) @@ -491,12 +478,26 @@ def relation_get(attribute=None, unit=None, rid=None, app=None): raise +@cached +def _relation_set_accepts_file(): + """Return True if the juju relation-set command accepts a file. + + Cache the result as it won't change during the execution of a hook, and + thus we can make relation_set() more efficient by only checking for the + first relation_set() call. + + :returns: True if relation_set accepts a file. + :rtype: bool + :raises: subprocess.CalledProcessError if the check fails. + """ + return "--file" in subprocess.check_output( + ["relation-set", "--help"], universal_newlines=True) + + def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) if app: relation_cmd_line.append('--app') if relation_id is not None: @@ -508,7 +509,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): # sites pass in things like dicts or numbers. if value is not None: settings[key] = "{}".format(value) - if accepts_file: + if _relation_set_accepts_file(): # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from @@ -1003,14 +1004,8 @@ def cmd_exists(cmd): @cached -@deprecate("moved to function_get()", log=log) def action_get(key=None): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_get`. - - Gets the value of an action parameter, or all key/value param pairs. - """ + """Gets the value of an action parameter, or all key/value param pairs.""" cmd = ['action-get'] if key is not None: cmd.append(key) @@ -1020,8 +1015,12 @@ def action_get(key=None): @cached +@deprecate("moved to action_get()", log=log) def function_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['function-get'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1034,22 +1033,20 @@ def function_get(key=None): return function_data -@deprecate("moved to function_set()", log=log) def action_set(values): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_set`. - - Sets the values to be returned after the action finishes. - """ + """Sets the values to be returned after the action finishes.""" cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +@deprecate("moved to action_set()", log=log) def function_set(values): - """Sets the values to be returned after the function finishes""" + """ + .. deprecated:: + Sets the values to be returned after the function finishes. + """ cmd = ['function-set'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1060,12 +1057,8 @@ def function_set(values): subprocess.check_call(cmd) -@deprecate("moved to function_fail()", log=log) def action_fail(message): """ - .. deprecated:: 0.20.7 - Alias for :func:`function_fail`. - Sets the action status to failed and sets the error message. The results set by action_set are preserved. @@ -1073,10 +1066,14 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +@deprecate("moved to action_fail()", log=log) def function_fail(message): - """Sets the function status to failed and sets the error message. + """ + .. deprecated:: + Sets the function status to failed and sets the error message. - The results set by function_set are preserved.""" + The results set by function_set are preserved. + """ cmd = ['function-fail'] # Fallback for older charms. if not cmd_exists('function-fail'): diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 994ec8a..ad2cab4 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -31,7 +31,6 @@ import subprocess import hashlib import functools import itertools -import six from contextlib import contextmanager from collections import OrderedDict, defaultdict @@ -115,6 +114,33 @@ def service_stop(service_name, **kwargs): return service('stop', service_name, **kwargs) +def service_enable(service_name, **kwargs): + """Enable a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_enable('ceph-osd', id=4) + + :param service_name: the name of the service to enable + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('enable', service_name, **kwargs) + + def service_restart(service_name, **kwargs): """Restart a system service. @@ -135,7 +161,7 @@ def service_restart(service_name, **kwargs): :param service_name: the name of the service to restart :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs + parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ @@ -263,7 +289,7 @@ def service(action, service_name, **kwargs): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -289,7 +315,7 @@ def service_running(service_name, **kwargs): if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output( @@ -564,7 +590,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): + if isinstance(content, str): content = content.encode('UTF-8') target.write(content) return @@ -967,7 +993,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): + if isinstance(nic_type, str): int_types = [nic_type] else: int_types = nic_type @@ -1081,8 +1107,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): try: chown(full, uid, gid) except (IOError, OSError) as e: - # Intended to ignore "file not found". Catching both to be - # compatible with both Python 2.7 and 3.x. + # Intended to ignore "file not found". if e.errno == errno.ENOENT: pass diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py index 9f88029..7c37c65 100644 --- a/hooks/charmhelpers/core/services/base.py +++ b/hooks/charmhelpers/core/services/base.py @@ -17,8 +17,6 @@ import json import inspect from collections import Iterable, OrderedDict -import six - from charmhelpers.core import host from charmhelpers.core import hookenv @@ -171,10 +169,7 @@ class ServiceManager(object): if not units: continue remote_service = units[0].split('/')[0] - if six.PY2: - argspec = inspect.getargspec(provider.provide_data) - else: - argspec = inspect.getfullargspec(provider.provide_data) + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 3e6e30d..5bf62dd 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -179,7 +179,7 @@ class RequiredConfig(dict): self.required_options = args self['config'] = hookenv.config() with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) + self.config = yaml.safe_load(fp).get('options', {}) def __bool__(self): for option in self.required_options: @@ -227,7 +227,7 @@ class StoredContext(dict): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) + data = yaml.safe_load(file_stream) if not data: raise OSError("%s is empty" % file_name) return data diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py index 28c6b3f..3136687 100644 --- a/hooks/charmhelpers/core/strutils.py +++ b/hooks/charmhelpers/core/strutils.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import re TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} @@ -27,8 +26,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY Returns True if value translates to True otherwise False. """ - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as boolean" % (value) raise ValueError(msg) @@ -61,8 +60,8 @@ def bytes_from_string(value): 'P': 5, 'PB': 5, } - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 9014015..cb0213d 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -43,9 +42,8 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. + Note: Using this requires python3-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -57,10 +55,7 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 9497ee0..1283f25 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -20,11 +20,7 @@ from charmhelpers.core.hookenv import ( log, ) -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse +from urllib.parse import urlparse, urlunparse # The order of this list is very important. Handlers should be listed in from @@ -134,14 +130,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, six.string_types): + if isinstance(sources, str): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] if len(sources) != len(keys): diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index d25587a..2cb2e88 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -26,26 +26,15 @@ from charmhelpers.payload.archive import ( ) from charmhelpers.core.host import mkdir, check_hash -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs +from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, +) +from urllib.parse import urlparse, urlunparse, parse_qs +from urllib.error import URLError def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: @@ -54,7 +43,6 @@ def splituser(host): def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' _passwdprog = re.compile('^([^:]*):(.*)$', re.S) match = _passwdprog.match(user) if match: @@ -150,10 +138,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available + algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError( diff --git a/hooks/charmhelpers/fetch/centos.py b/hooks/charmhelpers/fetch/centos.py index a91dcff..f849201 100644 --- a/hooks/charmhelpers/fetch/centos.py +++ b/hooks/charmhelpers/fetch/centos.py @@ -15,7 +15,6 @@ import subprocess import os import time -import six import yum from tempfile import NamedTemporaryFile @@ -42,7 +41,7 @@ def install(packages, options=None, fatal=False): if options is not None: cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -71,7 +70,7 @@ def update(fatal=False): def purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -83,7 +82,7 @@ def yum_search(packages): """Search for a package.""" output = {} cmd = ['yum', 'search'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) diff --git a/hooks/charmhelpers/fetch/python/debug.py b/hooks/charmhelpers/fetch/python/debug.py index 757135e..dd5cca8 100644 --- a/hooks/charmhelpers/fetch/python/debug.py +++ b/hooks/charmhelpers/fetch/python/debug.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import atexit import sys diff --git a/hooks/charmhelpers/fetch/python/packages.py b/hooks/charmhelpers/fetch/python/packages.py index 6004835..93f1fa3 100644 --- a/hooks/charmhelpers/fetch/python/packages.py +++ b/hooks/charmhelpers/fetch/python/packages.py @@ -16,7 +16,6 @@ # limitations under the License. import os -import six import subprocess import sys @@ -40,10 +39,7 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - if six.PY2: - apt_install('python-pip') - else: - apt_install('python3-pip') + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -140,12 +136,8 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - if six.PY2: - apt_install('python-virtualenv') - extra_flags = [] - else: - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py index cf8328f..e6f8a0a 100644 --- a/hooks/charmhelpers/fetch/ubuntu.py +++ b/hooks/charmhelpers/fetch/ubuntu.py @@ -13,10 +13,8 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re -import six import subprocess import sys import time @@ -361,7 +359,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -413,7 +411,7 @@ def apt_purge(packages, fatal=False): :raises: subprocess.CalledProcessError """ cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -440,7 +438,7 @@ def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -485,10 +483,7 @@ def import_key(key): if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): log("Writing provided PGP key in the binary format", level=DEBUG) - if six.PY3: - key_bytes = key.encode('utf-8') - else: - key_bytes = key + key_bytes = key.encode('utf-8') key_name = _get_keyid_by_gpg_key(key_bytes) key_gpg = _dearmor_gpg_key(key_bytes) _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) @@ -528,9 +523,8 @@ def _get_keyid_by_gpg_key(key_material): stderr=subprocess.PIPE, stdin=subprocess.PIPE) out, err = ps.communicate(input=key_material) - if six.PY3: - out = out.decode('utf-8') - err = err.decode('utf-8') + out = out.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material provided') # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) @@ -588,8 +582,7 @@ def _dearmor_gpg_key(key_asc): stdin=subprocess.PIPE) out, err = ps.communicate(input=key_asc) # no need to decode output as it is binary (invalid utf-8), only error - if six.PY3: - err = err.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material. Check your network setup' ' (MTU, routing, DNS) and/or proxy server settings' @@ -693,7 +686,7 @@ def add_source(source, key=None, fail_invalid=False): ]) if source is None: source = '' - for r, fn in six.iteritems(_mapping): + for r, fn in _mapping.items(): m = re.match(r, source) if m: if key: @@ -726,7 +719,7 @@ def _add_proposed(): """ release = get_distrib_codename() arch = platform.machine() - if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + if arch not in ARCH_TO_PROPOSED_POCKET.keys(): raise SourceConfigError("Arch {} not supported for (distro-)proposed" .format(arch)) with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -913,9 +906,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -957,9 +949,8 @@ def _run_apt_command(cmd, fatal=False, quiet=False): else: kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) @@ -989,7 +980,7 @@ def get_installed_version(package): Version object """ cache = apt_cache() - dpkg_result = cache._dpkg_list([package]).get(package, {}) + dpkg_result = cache.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') diff --git a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py index 436e177..6da355f 100644 --- a/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/hooks/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -40,6 +40,9 @@ import os import subprocess import sys +from charmhelpers import deprecate +from charmhelpers.core.hookenv import log + class _container(dict): """Simple container for attributes.""" @@ -79,7 +82,7 @@ class Cache(object): apt_result = self._apt_cache_show([package])[package] apt_result['name'] = apt_result.pop('package') pkg = Package(apt_result) - dpkg_result = self._dpkg_list([package]).get(package, {}) + dpkg_result = self.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') if installed_version: @@ -88,9 +91,29 @@ class Cache(object): pkg.architecture = dpkg_result.get('architecture') return pkg + @deprecate("use dpkg_list() instead.", "2022-05", log=log) def _dpkg_list(self, packages): + return self.dpkg_list(packages) + + def dpkg_list(self, packages): """Get data from system dpkg database for package. + Note that this method is also useful for querying package names + containing wildcards, for example + + apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) + + may return + + { + 'nvidia-vgpu-ubuntu-470': { + 'name': 'nvidia-vgpu-ubuntu-470', + 'version': '470.68', + 'architecture': 'amd64', + 'description': 'NVIDIA vGPU driver - version 470.68' + } + } + :param packages: Packages to get data from :type packages: List[str] :returns: Structured data about installed packages, keys like diff --git a/metadata.yaml b/metadata.yaml index 5086cc6..d733b74 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -9,7 +9,7 @@ tags: - miscellaneous series: - focal -- impish +- jammy subordinate: true provides: backup-backend: diff --git a/osci.yaml b/osci.yaml index 381367f..20916cc 100644 --- a/osci.yaml +++ b/osci.yaml @@ -1,7 +1,7 @@ - project: templates: - charm-unit-jobs-py38 - - charm-unit-jobs-py39 + - charm-unit-jobs-py310 - charm-xena-functional-jobs - charm-yoga-functional-jobs vars: diff --git a/requirements.txt b/requirements.txt index ead6e89..196ff38 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,16 @@ pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 +# Build requirements +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. + +# NOTE: newer versions of cryptography require a Rust compiler to build, +# see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +# +cryptography<3.4 + # Strange import error with newer netaddr: netaddr>0.7.16,<0.8.0 diff --git a/tests/bundles/focal-xena.yaml b/tests/bundles/focal-xena.yaml index 6e8b0c8..c80055b 100644 --- a/tests/bundles/focal-xena.yaml +++ b/tests/bundles/focal-xena.yaml @@ -114,6 +114,8 @@ applications: glance: charm: ch:glance num_units: 1 + options: + openstack-origin: *openstack-origin to: - '12' channel: yoga/edge @@ -121,6 +123,8 @@ applications: nova-compute: charm: ch:nova-compute num_units: 1 + options: + openstack-origin: *openstack-origin to: - '13' channel: yoga/edge diff --git a/tests/bundles/focal-yoga.yaml b/tests/bundles/focal-yoga.yaml index d50e2fe..13c65f0 100644 --- a/tests/bundles/focal-yoga.yaml +++ b/tests/bundles/focal-yoga.yaml @@ -114,6 +114,8 @@ applications: glance: charm: ch:glance num_units: 1 + options: + openstack-origin: *openstack-origin to: - '12' channel: yoga/edge @@ -121,6 +123,8 @@ applications: nova-compute: charm: ch:nova-compute num_units: 1 + options: + openstack-origin: *openstack-origin to: - '13' channel: yoga/edge diff --git a/tests/bundles/impish-xena.yaml b/tests/bundles/impish-xena.yaml index bb6c1b0..7691d95 100644 --- a/tests/bundles/impish-xena.yaml +++ b/tests/bundles/impish-xena.yaml @@ -114,6 +114,8 @@ applications: glance: charm: ch:glance num_units: 1 + options: + source: *openstack-origin to: - '12' channel: yoga/edge @@ -121,6 +123,8 @@ applications: nova-compute: charm: ch:nova-compute num_units: 1 + options: + source: *openstack-origin to: - '13' channel: yoga/edge diff --git a/tests/bundles/jammy-yoga.yaml b/tests/bundles/jammy-yoga.yaml index 5b977d1..58874e1 100644 --- a/tests/bundles/jammy-yoga.yaml +++ b/tests/bundles/jammy-yoga.yaml @@ -114,6 +114,8 @@ applications: glance: charm: ch:glance num_units: 1 + options: + openstack-origin: *openstack-origin to: - '12' channel: yoga/edge @@ -121,6 +123,8 @@ applications: nova-compute: charm: ch:nova-compute num_units: 1 + options: + openstack-origin: *openstack-origin to: - '13' channel: yoga/edge diff --git a/tox.ini b/tox.ini index 81fd249..c41077d 100644 --- a/tox.ini +++ b/tox.ini @@ -51,8 +51,8 @@ commands = charmcraft -v build {toxinidir}/rename.sh -[testenv:py35] -basepython = python3.5 +[testenv:py3] +basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt @@ -60,26 +60,25 @@ deps = -r{toxinidir}/requirements.txt basepython = python3.6 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt - -[testenv:py37] -basepython = python3.7 -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} [testenv:py38] basepython = python3.8 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} [testenv:py39] basepython = python3.9 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} -[testenv:py3] -basepython = python3 +[testenv:py310] +basepython = python3.10 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3