diff --git a/.zuul.yaml b/.zuul.yaml index fd189e2..7dd3db9 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,3 +1,3 @@ - project: templates: - - openstack-python3-ussuri-jobs + - openstack-python3-charm-yoga-jobs diff --git a/charmcraft.yaml b/charmcraft.yaml index 11d5f7c..68b9a01 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -22,7 +22,15 @@ parts: - README.md bases: - - name: ubuntu - channel: "20.04" - architectures: - - amd64 + - build-on: + - name: ubuntu + channel: "20.04" + architectures: + - amd64 + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] \ No newline at end of file diff --git a/charmhelpers/__init__.py b/charmhelpers/__init__.py index 1f57ed2..ddf3045 100644 --- a/charmhelpers/__init__.py +++ b/charmhelpers/__init__.py @@ -14,30 +14,15 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. -from __future__ import print_function -from __future__ import absolute_import - import functools import inspect import subprocess -import sys -try: - import six # NOQA:F401 -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # NOQA:F401 try: import yaml # NOQA:F401 except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # NOQA:F401 diff --git a/charmhelpers/cli/__init__.py b/charmhelpers/cli/__init__.py index 74ea729..2b0c4b7 100644 --- a/charmhelpers/cli/__init__.py +++ b/charmhelpers/cli/__init__.py @@ -16,9 +16,6 @@ import inspect import argparse import sys -import six -from six.moves import zip - import charmhelpers.core.unitdata @@ -149,10 +146,7 @@ class CommandLine(object): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - if six.PY2: - argspec = inspect.getargspec(arguments.func) - else: - argspec = inspect.getfullargspec(arguments.func) + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -177,10 +171,7 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - if six.PY2: - argspec = inspect.getargspec(func) - else: - argspec = inspect.getfullargspec(func) + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py index 8d1753c..bad7a53 100644 --- a/charmhelpers/contrib/charmsupport/nrpe.py +++ b/charmhelpers/contrib/charmsupport/nrpe.py @@ -28,6 +28,7 @@ import subprocess import yaml from charmhelpers.core.hookenv import ( + application_name, config, hook_name, local_unit, @@ -174,7 +175,8 @@ define service {{ if os.path.exists(os.path.join(path, parts[0])): command = os.path.join(path, parts[0]) if len(parts) > 1: - command += " " + " ".join(parts[1:]) + safe_args = [shlex.quote(arg) for arg in parts[1:]] + command += " " + " ".join(safe_args) return command log('Check command not found: {}'.format(parts[0])) return '' @@ -520,3 +522,39 @@ def remove_deprecated_check(nrpe, deprecated_services): for dep_svc in deprecated_services: log('Deprecated service: {}'.format(dep_svc)) nrpe.remove_check(shortname=dep_svc) + + +def add_deferred_restarts_check(nrpe): + """ + Add NRPE check for services with deferred restarts. + + :param NRPE nrpe: NRPE object to add check to + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Adding deferred restarts nrpe check: {}'.format(shortname)) + nrpe.add_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) + + +def remove_deferred_restarts_check(nrpe): + """ + Remove NRPE check for services with deferred service restarts. + + :param NRPE nrpe: NRPE object to remove check from + """ + unit_name = local_unit().replace('/', '-') + shortname = unit_name + '_deferred_restarts' + check_cmd = 'check_deferred_restarts.py --application {}'.format( + application_name()) + + log('Removing deferred restarts nrpe check: {}'.format(shortname)) + nrpe.remove_check( + shortname=shortname, + description='Check deferred service restarts {}'.format(unit_name), + check_cmd=check_cmd) diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py index f0b629a..146beba 100644 --- a/charmhelpers/contrib/hahelpers/cluster.py +++ b/charmhelpers/contrib/hahelpers/cluster.py @@ -32,8 +32,6 @@ import time from socket import gethostname as get_unit_hostname -import six - from charmhelpers.core.hookenv import ( log, relation_ids, @@ -125,16 +123,16 @@ def is_crm_dc(): """ cmd = ['crm', 'status'] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + # Current DC: juju-lytrusty-machine-2 (168108163) + # - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True @@ -158,9 +156,8 @@ def is_crm_leader(resource, retry=False): return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") + status = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('utf-8') except subprocess.CalledProcessError: status = None diff --git a/charmhelpers/contrib/hardening/apache/checks/config.py b/charmhelpers/contrib/hardening/apache/checks/config.py index 341da9e..e81a5f0 100644 --- a/charmhelpers/contrib/hardening/apache/checks/config.py +++ b/charmhelpers/contrib/hardening/apache/checks/config.py @@ -14,7 +14,6 @@ import os import re -import six import subprocess @@ -95,9 +94,7 @@ class ApacheConfContext(object): settings = utils.get_settings('apache') ctxt = settings['hardening'] - out = subprocess.check_output(['apache2', '-v']) - if six.PY3: - out = out.decode('utf-8') + out = subprocess.check_output(['apache2', '-v']).decode('utf-8') ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', out).group(1) ctxt['apache_icondir'] = '/usr/share/apache2/icons/' diff --git a/charmhelpers/contrib/hardening/audits/apache.py b/charmhelpers/contrib/hardening/audits/apache.py index c153762..31db8f6 100644 --- a/charmhelpers/contrib/hardening/audits/apache.py +++ b/charmhelpers/contrib/hardening/audits/apache.py @@ -15,8 +15,6 @@ import re import subprocess -import six - from charmhelpers.core.hookenv import ( log, INFO, @@ -35,7 +33,7 @@ class DisabledModuleAudit(BaseAudit): def __init__(self, modules): if modules is None: self.modules = [] - elif isinstance(modules, six.string_types): + elif isinstance(modules, str): self.modules = [modules] else: self.modules = modules @@ -68,9 +66,7 @@ class DisabledModuleAudit(BaseAudit): @staticmethod def _get_loaded_modules(): """Returns the modules which are enabled in Apache.""" - output = subprocess.check_output(['apache2ctl', '-M']) - if six.PY3: - output = output.decode('utf-8') + output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8') modules = [] for line in output.splitlines(): # Each line of the enabled module output looks like: diff --git a/charmhelpers/contrib/hardening/audits/apt.py b/charmhelpers/contrib/hardening/audits/apt.py index cad7bf7..1b22925 100644 --- a/charmhelpers/contrib/hardening/audits/apt.py +++ b/charmhelpers/contrib/hardening/audits/apt.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import # required for external apt import -from six import string_types - from charmhelpers.fetch import ( apt_cache, apt_purge @@ -51,7 +48,7 @@ class RestrictedPackages(BaseAudit): def __init__(self, pkgs, **kwargs): super(RestrictedPackages, self).__init__(**kwargs) - if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'): self.pkgs = pkgs.split() else: self.pkgs = pkgs diff --git a/charmhelpers/contrib/hardening/audits/file.py b/charmhelpers/contrib/hardening/audits/file.py index 257c635..84cc249 100644 --- a/charmhelpers/contrib/hardening/audits/file.py +++ b/charmhelpers/contrib/hardening/audits/file.py @@ -23,7 +23,6 @@ from subprocess import ( check_call, ) from traceback import format_exc -from six import string_types from stat import ( S_ISGID, S_ISUID @@ -63,7 +62,7 @@ class BaseFileAudit(BaseAudit): """ super(BaseFileAudit, self).__init__(*args, **kwargs) self.always_comply = always_comply - if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + if isinstance(paths, str) or not hasattr(paths, '__iter__'): self.paths = [paths] else: self.paths = paths diff --git a/charmhelpers/contrib/hardening/harden.py b/charmhelpers/contrib/hardening/harden.py index 63f21b9..45ad076 100644 --- a/charmhelpers/contrib/hardening/harden.py +++ b/charmhelpers/contrib/hardening/harden.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from collections import OrderedDict from charmhelpers.core.hookenv import ( @@ -53,18 +51,17 @@ def harden(overrides=None): overrides = [] def _harden_inner1(f): - # As this has to be py2.7 compat, we can't use nonlocal. Use a trick - # to capture the dictionary that can then be updated. - _logged = {'done': False} + _logged = False def _harden_inner2(*args, **kwargs): # knock out hardening via a config var; normally it won't get # disabled. + nonlocal _logged if _DISABLE_HARDENING_FOR_UNIT_TEST: return f(*args, **kwargs) - if not _logged['done']: + if not _logged: log("Hardening function '%s'" % (f.__name__), level=DEBUG) - _logged['done'] = True + _logged = True RUN_CATALOG = OrderedDict([('os', run_os_checks), ('ssh', run_ssh_checks), ('mysql', run_mysql_checks), @@ -74,7 +71,7 @@ def harden(overrides=None): if enabled: modules_to_run = [] # modules will always be performed in the following order - for module, func in six.iteritems(RUN_CATALOG): + for module, func in RUN_CATALOG.items(): if module in enabled: enabled.remove(module) modules_to_run.append(func) diff --git a/charmhelpers/contrib/hardening/host/checks/login.py b/charmhelpers/contrib/hardening/host/checks/login.py index fe2bc6e..fd500c8 100644 --- a/charmhelpers/contrib/hardening/host/checks/login.py +++ b/charmhelpers/contrib/hardening/host/checks/login.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six import string_types - from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.host import TEMPLATES_DIR from charmhelpers.contrib.hardening import utils @@ -41,7 +39,7 @@ class LoginContext(object): # a string assume it to be octal and turn it into an octal # string. umask = settings['environment']['umask'] - if not isinstance(umask, string_types): + if not isinstance(umask, str): umask = '%s' % oct(umask) ctxt = { diff --git a/charmhelpers/contrib/hardening/host/checks/sysctl.py b/charmhelpers/contrib/hardening/host/checks/sysctl.py index f1ea581..8a57d83 100644 --- a/charmhelpers/contrib/hardening/host/checks/sysctl.py +++ b/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -15,7 +15,6 @@ import os import platform import re -import six import subprocess from charmhelpers.core.hookenv import ( @@ -183,9 +182,9 @@ class SysCtlHardeningContext(object): ctxt['sysctl'][key] = d[2] or None - # Translate for python3 - return {'sysctl_settings': - [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + return { + 'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()] + } class SysctlConf(TemplatedFile): diff --git a/charmhelpers/contrib/hardening/mysql/checks/config.py b/charmhelpers/contrib/hardening/mysql/checks/config.py index a79f33b..8bf9f36 100644 --- a/charmhelpers/contrib/hardening/mysql/checks/config.py +++ b/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import subprocess from charmhelpers.core.hookenv import ( @@ -82,6 +81,6 @@ class MySQLConfContext(object): """ def __call__(self): settings = utils.get_settings('mysql') - # Translate for python3 - return {'mysql_settings': - [(k, v) for k, v in six.iteritems(settings['security'])]} + return { + 'mysql_settings': [(k, v) for k, v in settings['security'].items()] + } diff --git a/charmhelpers/contrib/hardening/templating.py b/charmhelpers/contrib/hardening/templating.py index 5b6765f..4dee546 100644 --- a/charmhelpers/contrib/hardening/templating.py +++ b/charmhelpers/contrib/hardening/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import six from charmhelpers.core.hookenv import ( log, @@ -27,10 +26,7 @@ except ImportError: from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_update apt_update(fatal=True) - if six.PY2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment diff --git a/charmhelpers/contrib/hardening/utils.py b/charmhelpers/contrib/hardening/utils.py index 56afa4b..f93851a 100644 --- a/charmhelpers/contrib/hardening/utils.py +++ b/charmhelpers/contrib/hardening/utils.py @@ -16,7 +16,6 @@ import glob import grp import os import pwd -import six import yaml from charmhelpers.core.hookenv import ( @@ -91,7 +90,7 @@ def _apply_overrides(settings, overrides, schema): :returns: dictionary of modules config with user overrides applied. """ if overrides: - for k, v in six.iteritems(overrides): + for k, v in overrides.items(): if k in schema: if schema[k] is None: settings[k] = v diff --git a/charmhelpers/contrib/network/ip.py b/charmhelpers/contrib/network/ip.py index b356d64..de56584 100644 --- a/charmhelpers/contrib/network/ip.py +++ b/charmhelpers/contrib/network/ip.py @@ -15,7 +15,6 @@ import glob import re import subprocess -import six import socket from functools import partial @@ -39,20 +38,14 @@ try: import netifaces except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netifaces', fatal=True) - else: - apt_install('python3-netifaces', fatal=True) + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - if six.PY2: - apt_install('python-netaddr', fatal=True) - else: - apt_install('python3-netaddr', fatal=True) + apt_install('python3-netaddr', fatal=True) import netaddr @@ -462,15 +455,12 @@ def ns_query(address): try: import dns.resolver except ImportError: - if six.PY2: - apt_install('python-dnspython', fatal=True) - else: - apt_install('python3-dnspython', fatal=True) + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, six.string_types): + elif isinstance(address, str): rtype = 'A' else: return None @@ -513,10 +503,7 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - if six.PY2: - apt_install("python-dnspython", fatal=True) - else: - apt_install("python3-dnspython", fatal=True) + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/charmhelpers/contrib/openstack/files/check_deferred_restarts.py new file mode 100755 index 0000000..5f392b3 --- /dev/null +++ b/charmhelpers/contrib/openstack/files/check_deferred_restarts.py @@ -0,0 +1,128 @@ +#!/usr/bin/python3 + +# Copyright 2014-2022 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Checks for services with deferred restarts. + +This Nagios check will parse /var/lib/policy-rd.d/ +to find any restarts that are currently deferred. +""" + +import argparse +import glob +import sys +import yaml + + +DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' + + +def get_deferred_events(): + """Return a list of deferred events dicts from policy-rc.d files. + + Events are read from DEFERRED_EVENTS_DIR and are of the form: + { + action: restart, + policy_requestor_name: rabbitmq-server, + policy_requestor_type: charm, + reason: 'Pkg update', + service: rabbitmq-server, + time: 1614328743 + } + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of deferred event dictionaries + :rtype: list + """ + deferred_events_files = glob.glob( + '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) + + deferred_events = [] + for event_file in deferred_events_files: + with open(event_file, 'r') as f: + event = yaml.safe_load(f) + deferred_events.append(event) + + return deferred_events + + +def get_deferred_restart_services(application=None): + """Returns a list of services with deferred restarts. + + :param str application: Name of the application that blocked the service restart. + If application is None, all services with deferred restarts + are returned. Services which are blocked by a non-charm + requestor are always returned. + + :raises OSError: Raised in case of a system error while reading a policy file + :raises yaml.YAMLError: Raised if parsing a policy file fails + + :returns: List of services with deferred restarts belonging to application. + :rtype: list + """ + + deferred_restart_events = filter( + lambda e: e['action'] == 'restart', get_deferred_events()) + + deferred_restart_services = set() + for restart_event in deferred_restart_events: + if application: + if ( + restart_event['policy_requestor_type'] != 'charm' or + restart_event['policy_requestor_type'] == 'charm' and + restart_event['policy_requestor_name'] == application + ): + deferred_restart_services.add(restart_event['service']) + else: + deferred_restart_services.add(restart_event['service']) + + return list(deferred_restart_services) + + +def main(): + """Check for services with deferred restarts.""" + parser = argparse.ArgumentParser( + description='Check for services with deferred restarts') + parser.add_argument( + '--application', help='Check services belonging to this application only') + + args = parser.parse_args() + + services = set(get_deferred_restart_services(args.application)) + + if len(services) == 0: + print('OK: No deferred service restarts.') + sys.exit(0) + else: + print( + 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) + sys.exit(1) + + +if __name__ == '__main__': + try: + main() + except OSError as e: + print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) + sys.exit(1) + except yaml.YAMLError as e: + print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) + sys.exit(1) + except Exception as e: + print('CRITICAL: An unknown error occurred: {}'.format(str(e))) + sys.exit(1) diff --git a/charmhelpers/contrib/openstack/policyd.py b/charmhelpers/contrib/openstack/policyd.py index 6fa06f2..767943c 100644 --- a/charmhelpers/contrib/openstack/policyd.py +++ b/charmhelpers/contrib/openstack/policyd.py @@ -15,7 +15,6 @@ import collections import contextlib import os -import six import shutil import yaml import zipfile @@ -204,12 +203,6 @@ class BadPolicyYamlFile(Exception): return self.log_message -if six.PY2: - BadZipFile = zipfile.BadZipfile -else: - BadZipFile = zipfile.BadZipFile - - def is_policyd_override_valid_on_this_release(openstack_release): """Check that the charm is running on at least Ubuntu Xenial, and at least the queens release. @@ -487,10 +480,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): if blacklisted_keys_present: raise BadPolicyYamlFile("blacklisted keys {} present." .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, six.string_types) for k in keys): + if not all(isinstance(k, str) for k in keys): raise BadPolicyYamlFile("keys in yaml aren't all strings?") # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, six.string_types) for v in doc.values()): + if not all(isinstance(v, str) for v in doc.values()): raise BadPolicyYamlFile("values in yaml aren't all strings?") return doc @@ -530,8 +523,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir - for direntry in _scanner(path): + for direntry in os.scandir(path): # see if the path should be kept. if direntry.path in keep_paths: continue @@ -558,36 +550,6 @@ def maybe_create_directory_for(path, user, group): ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) -@contextlib.contextmanager -def _fallback_scandir(path): - """Fallback os.scandir implementation. - - provide a fallback implementation of os.scandir if this module ever gets - used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, - and then mocks the is_dir() function using os.path.isdir() to check for - directory. - - :param path: the path to list the directories for - :type path: str - :returns: Generator that provides _FBDirectory objects - :rtype: ContextManager[_FBDirectory] - """ - for f in os.listdir(path): - yield _FBDirectory(f) - - -class _FBDirectory(object): - """Mock a scandir Directory object with enough to use in - clean_policyd_dir_for - """ - - def __init__(self, path): - self.path = path - - def is_dir(self): - return os.path.isdir(self.path) - - def path_for_policy_file(service, name): """Return the full path for a policy.d file that will be written to the service's policy.d directory. @@ -768,7 +730,7 @@ def process_policy_resource_file(resource_file, _group) # Every thing worked, so we mark up a success. completed = True - except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), level=POLICYD_LOG_LEVEL_DEFAULT) except IOError as e: diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index 9cc96d6..c8747c1 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -25,7 +25,6 @@ import re import itertools import functools -import six import traceback import uuid import yaml @@ -362,6 +361,8 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel + if src in OPENSTACK_RELEASES: + return src if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -401,7 +402,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(version_map): + for k, v in version_map.items(): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -411,7 +412,8 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): def get_os_version_codename_swift(codename): '''Determine OpenStack version number of swift from codename.''' - for k, v in six.iteritems(SWIFT_CODENAMES): + # for k, v in six.iteritems(SWIFT_CODENAMES): + for k, v in SWIFT_CODENAMES.items(): if k == codename: return v[-1] e = 'Could not derive swift version for '\ @@ -421,17 +423,17 @@ def get_os_version_codename_swift(codename): def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] if len(codenames) > 1: # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. for codename in reversed(codenames): releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in six.iteritems(releases) if codename in v] - ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) - if six.PY3: - ret = ret.decode('UTF-8') + release = [k for k, v in releases.items() if codename in v] + ret = (subprocess + .check_output(['apt-cache', 'policy', 'swift']) + .decode('UTF-8')) if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -441,7 +443,7 @@ def get_swift_codename(version): match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) - for codename, versions in six.iteritems(SWIFT_CODENAMES): + for codename, versions in SWIFT_CODENAMES.items(): for release_version in versions: if release_version.startswith(major_minor_version): return codename @@ -477,9 +479,7 @@ def get_os_codename_package(package, fatal=True): if snap_install_requested(): cmd = ['snap', 'list', package] try: - out = subprocess.check_output(cmd) - if six.PY3: - out = out.decode('UTF-8') + out = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return None lines = out.split('\n') @@ -549,16 +549,14 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES - for cname, version in six.iteritems(vers_map): + for cname, version in vers_map.items(): if cname == codename: return version[-1] else: vers_map = OPENSTACK_CODENAMES - for version, cname in six.iteritems(vers_map): + for version, cname in vers_map.items(): if cname == codename: return version - # e = "Could not determine OpenStack version for package: %s" % pkg - # error_out(e) def get_installed_os_version(): @@ -821,10 +819,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) with open(juju_rc_path, 'wt') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in six.iteritems(env_vars) if u != "script_path"] + rc_script.write("#!/bin/bash\n") + for u, p in env_vars.items(): + if u != "script_path": + rc_script.write('export %s=%s\n' % (u, p)) def openstack_upgrade_available(package): @@ -1039,7 +1037,7 @@ def _determine_os_workload_status( state, message, lambda: charm_func(configs)) if state is None: - state, message = _ows_check_services_running(services, ports) + state, message = ows_check_services_running(services, ports) if state is None: state = 'active' @@ -1213,7 +1211,12 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): return state, message +@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) def _ows_check_services_running(services, ports): + return ows_check_services_running(services, ports) + + +def ows_check_services_running(services, ports): """Check that the services that should be running are actually running and that any ports specified are being listened to. @@ -1413,45 +1416,75 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs, - force_upgrade=False): +def do_action_openstack_upgrade(package, upgrade_callback, configs): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets the corresponding action status as a result. - If the charm was installed from source we cannot upgrade it. For backwards compatibility a config flag (action-managed-upgrade) must be set for this code to run, otherwise a full service level upgrade will fire on config-changed. - @param package: package name for determining if upgrade available + @param package: package name for determining if openstack upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class - @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package) or force_upgrade: + if openstack_upgrade_available(package): if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') try: upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed.'}) + action_set({'outcome': 'success, upgrade completed'}) ret = True except Exception: - action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'outcome': 'upgrade failed, see traceback'}) action_set({'traceback': traceback.format_exc()}) - action_fail('do_openstack_upgrade resulted in an ' + action_fail('upgrade callback resulted in an ' 'unexpected error') else: action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade.'}) + 'False, skipped upgrade'}) else: - action_set({'outcome': 'no upgrade available.'}) + action_set({'outcome': 'no upgrade available'}) + + return ret + + +def do_action_package_upgrade(package, upgrade_callback, configs): + """Perform package upgrade within the current OpenStack release. + + Upgrades packages only if there is not an openstack upgrade available, + and sets the corresponding action status as a result. + + @param package: package name for determining if openstack upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if not openstack_upgrade_available(package): + juju_log('Upgrading packages') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('upgrade callback resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'upgrade skipped because an openstack upgrade ' + 'is available'}) return ret @@ -1849,21 +1882,20 @@ def pausable_restart_on_change(restart_map, stopstart=False, """ def wrap(f): - # py27 compatible nonlocal variable. When py3 only, replace with - # nonlocal keyword - __restart_map_cache = {'cache': None} + __restart_map_cache = None @functools.wraps(f) def wrapped_f(*args, **kwargs): + nonlocal __restart_map_cache if is_unit_paused_set(): return f(*args, **kwargs) - if __restart_map_cache['cache'] is None: - __restart_map_cache['cache'] = restart_map() \ + if __restart_map_cache is None: + __restart_map_cache = restart_map() \ if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( (lambda: f(*args, **kwargs)), - __restart_map_cache['cache'], + __restart_map_cache, stopstart, restart_functions, can_restart_now_f, @@ -1888,7 +1920,7 @@ def ordered(orderme): raise ValueError('argument must be a dict type') result = OrderedDict() - for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + for k, v in sorted(orderme.items(), key=lambda x: x[0]): if isinstance(v, dict): result[k] = ordered(v) else: diff --git a/charmhelpers/contrib/python.py b/charmhelpers/contrib/python.py index 84cba8c..fcded68 100644 --- a/charmhelpers/contrib/python.py +++ b/charmhelpers/contrib/python.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import - # deprecated aliases for backwards compatibility from charmhelpers.fetch.python import debug # noqa from charmhelpers.fetch.python import packages # noqa diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py index c70aeb2..1b20b8f 100644 --- a/charmhelpers/contrib/storage/linux/ceph.py +++ b/charmhelpers/contrib/storage/linux/ceph.py @@ -23,7 +23,6 @@ import collections import errno import hashlib import math -import six import os import shutil @@ -218,7 +217,7 @@ def validator(value, valid_type, valid_range=None): "was given {} of type {}" .format(valid_range, type(valid_range))) # If we're dealing with strings - if isinstance(value, six.string_types): + if isinstance(value, str): assert value in valid_range, ( "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max @@ -434,9 +433,9 @@ class BasePool(object): :type mode: str """ # Check the input types and values - validator(value=cache_pool, valid_type=six.string_types) + validator(value=cache_pool, valid_type=str) validator( - value=mode, valid_type=six.string_types, + value=mode, valid_type=str, valid_range=["readonly", "writeback"]) check_call([ @@ -615,7 +614,8 @@ class Pool(BasePool): class ReplicatedPool(BasePool): def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None): + percent_data=None, app_name=None, op=None, + profile_name='replicated_rule'): """Initialize ReplicatedPool object. Pool information is either initialized from individual keyword @@ -632,6 +632,8 @@ class ReplicatedPool(BasePool): to this replicated pool. :type replicas: int :raises: KeyError + :param profile_name: Crush Profile to use + :type profile_name: Optional[str] """ # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. @@ -646,11 +648,20 @@ class ReplicatedPool(BasePool): # we will fail with KeyError if it is not provided. self.replicas = op['replicas'] self.pg_num = op.get('pg_num') + self.profile_name = op.get('crush-profile') or profile_name else: self.replicas = replicas or 2 self.pg_num = pg_num + self.profile_name = profile_name or 'replicated_rule' def _create(self): + # Validate if crush profile exists + if self.profile_name is None: + msg = ("Failed to discover crush profile named " + "{}".format(self.profile_name)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + # Do extra validation on pg_num with data from live cluster if self.pg_num: # Since the number of placement groups were specified, ensure @@ -668,12 +679,12 @@ class ReplicatedPool(BasePool): '--pg-num-min={}'.format( min(AUTOSCALER_DEFAULT_PGS, self.pg_num) ), - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] else: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num) + self.name, str(self.pg_num), self.profile_name ] check_call(cmd) @@ -692,7 +703,7 @@ class ErasurePool(BasePool): def __init__(self, service, name=None, erasure_code_profile=None, percent_data=None, app_name=None, op=None, allow_ec_overwrites=False): - """Initialize ReplicatedPool object. + """Initialize ErasurePool object. Pool information is either initialized from individual keyword arguments or from a individual CephBrokerRq operation Dict. @@ -778,10 +789,11 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] + quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 + if quincy_or_later: + cmd.append('--format=json') try: - modules = check_output(cmd) - if six.PY3: - modules = modules.decode('UTF-8') + modules = check_output(cmd).decode('utf-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -814,10 +826,10 @@ def get_mon_map(service): ceph command fails. """ try: - mon_status = check_output(['ceph', '--id', service, - 'mon_status', '--format=json']) - if six.PY3: - mon_status = mon_status.decode('UTF-8') + octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 + mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' + mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, + '--format=json'])).decode('utf-8') try: return json.loads(mon_status) except ValueError as v: @@ -959,9 +971,7 @@ def get_erasure_profile(service, name): try: out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + name, '--format=json']).decode('utf-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -1164,8 +1174,7 @@ def create_erasure_profile(service, profile_name, 'nvme' ] - validator(erasure_plugin_name, six.string_types, - list(plugin_techniques.keys())) + validator(erasure_plugin_name, str, list(plugin_techniques.keys())) cmd = [ 'ceph', '--id', service, @@ -1176,7 +1185,7 @@ def create_erasure_profile(service, profile_name, ] if erasure_plugin_technique: - validator(erasure_plugin_technique, six.string_types, + validator(erasure_plugin_technique, str, plugin_techniques[erasure_plugin_name]) cmd.append('technique={}'.format(erasure_plugin_technique)) @@ -1189,7 +1198,7 @@ def create_erasure_profile(service, profile_name, failure_domain = 'rack' if failure_domain: - validator(failure_domain, six.string_types, failure_domains) + validator(failure_domain, str, failure_domains) # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain={}'.format(failure_domain)) @@ -1198,7 +1207,7 @@ def create_erasure_profile(service, profile_name, # device class new in luminous if luminous_or_later and device_class: - validator(device_class, six.string_types, device_classes) + validator(device_class, str, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', @@ -1213,7 +1222,7 @@ def create_erasure_profile(service, profile_name, raise ValueError("locality must be provided for lrc plugin") # LRC optional configuration if crush_locality: - validator(crush_locality, six.string_types, failure_domains) + validator(crush_locality, str, failure_domains) cmd.append('crush-locality={}'.format(crush_locality)) if erasure_plugin_name == 'shec': @@ -1241,8 +1250,8 @@ def rename_pool(service, old_name, new_name): :param new_name: Name to rename pool to. :type new_name: str """ - validator(value=old_name, valid_type=six.string_types) - validator(value=new_name, valid_type=six.string_types) + validator(value=old_name, valid_type=str) + validator(value=new_name, valid_type=str) cmd = [ 'ceph', '--id', service, @@ -1260,7 +1269,7 @@ def erasure_profile_exists(service, name): :returns: True if it exists, False otherwise. :rtype: bool """ - validator(value=name, valid_type=six.string_types) + validator(value=name, valid_type=str) try: check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', @@ -1280,12 +1289,10 @@ def get_cache_mode(service, pool_name): :returns: Current cache mode. :rtype: Optional[int] """ - validator(value=service, valid_type=six.string_types) - validator(value=pool_name, valid_type=six.string_types) + validator(value=service, valid_type=str) + validator(value=pool_name, valid_type=str) out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + 'osd', 'dump', '--format=json']).decode('utf-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -1299,9 +1306,8 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output( + ['rados', '--id', service, 'lspools']).decode('utf-8') except CalledProcessError: return False @@ -1320,13 +1326,11 @@ def get_osds(service, device_class=None): out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', 'ls-osd', device_class, - '--format=json']) + '--format=json']).decode('utf-8') else: out = check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']) - if six.PY3: - out = out.decode('UTF-8') + '--format=json']).decode('utf-8') return json.loads(out) @@ -1343,9 +1347,7 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]) - if six.PY3: - out = out.decode('UTF-8') + service, '--pool', pool]).decode('utf-8') except CalledProcessError: return False @@ -1371,7 +1373,7 @@ def update_pool(client, pool, settings): :raises: CalledProcessError """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in six.iteritems(settings): + for k, v in settings.items(): check_call(cmd + [k, v]) @@ -1509,9 +1511,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) - if six.PY3: - out = out.decode('UTF-8') + out = check_output(['rbd', 'showmapped']).decode('utf-8') except CalledProcessError: return False @@ -1857,7 +1857,7 @@ class CephBrokerRq(object): } def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - **kwargs): + crush_profile=None, **kwargs): """Adds an operation to create a replicated pool. Refer to docstring for ``_partial_build_common_op_create`` for @@ -1871,6 +1871,10 @@ class CephBrokerRq(object): for pool. :type pg_num: int :raises: AssertionError if provided data is of invalid type/range + :param crush_profile: Name of crush profile to use. If not set the + ceph-mon unit handling the broker request will + set its default value. + :type crush_profile: Optional[str] """ if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') @@ -1880,6 +1884,7 @@ class CephBrokerRq(object): 'name': name, 'replicas': replica_count, 'pg_num': pg_num, + 'crush-profile': crush_profile } op.update(self._partial_build_common_op_create(**kwargs)) diff --git a/charmhelpers/contrib/storage/linux/loopback.py b/charmhelpers/contrib/storage/linux/loopback.py index 74bab40..04daea2 100644 --- a/charmhelpers/contrib/storage/linux/loopback.py +++ b/charmhelpers/contrib/storage/linux/loopback.py @@ -19,8 +19,6 @@ from subprocess import ( check_output, ) -import six - ################################################## # loopback device helpers. @@ -40,9 +38,7 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - output = check_output(cmd) - if six.PY3: - output = output.decode('utf-8') + output = check_output(cmd).decode('utf-8') devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] @@ -57,7 +53,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == file_path: return d @@ -71,7 +67,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in six.iteritems(loopback_devices()): + for d, f in loopback_devices().items(): if f == path: return d diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py index e94247a..370c3e8 100644 --- a/charmhelpers/core/hookenv.py +++ b/charmhelpers/core/hookenv.py @@ -17,12 +17,11 @@ # Authors: # Charm Helpers Developers -from __future__ import print_function import copy from distutils.version import LooseVersion from enum import Enum from functools import wraps -from collections import namedtuple +from collections import namedtuple, UserDict import glob import os import json @@ -36,12 +35,6 @@ from subprocess import CalledProcessError from charmhelpers import deprecate -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -112,7 +105,7 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests @@ -132,7 +125,7 @@ def log(message, level=None): def function_log(message): """Write a function progress message""" command = ['function-log'] - if not isinstance(message, six.string_types): + if not isinstance(message, str): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing function-log should not cause failures in unit tests @@ -445,12 +438,6 @@ def config(scope=None): """ global _cache_config config_cmd_line = ['config-get', '--all', '--format=json'] - try: - # JSON Decode Exception for Python3.5+ - exc_json = json.decoder.JSONDecodeError - except AttributeError: - # JSON Decode Exception for Python2.7 through Python3.4 - exc_json = ValueError try: if _cache_config is None: config_data = json.loads( @@ -459,7 +446,7 @@ def config(scope=None): if scope is not None: return _cache_config.get(scope) return _cache_config - except (exc_json, UnicodeDecodeError) as e: + except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: log('Unable to parse output from config-get: config_cmd_line="{}" ' 'message="{}"' .format(config_cmd_line, str(e)), level=ERROR) @@ -491,12 +478,26 @@ def relation_get(attribute=None, unit=None, rid=None, app=None): raise +@cached +def _relation_set_accepts_file(): + """Return True if the juju relation-set command accepts a file. + + Cache the result as it won't change during the execution of a hook, and + thus we can make relation_set() more efficient by only checking for the + first relation_set() call. + + :returns: True if relation_set accepts a file. + :rtype: bool + :raises: subprocess.CalledProcessError if the check fails. + """ + return "--file" in subprocess.check_output( + ["relation-set", "--help"], universal_newlines=True) + + def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) if app: relation_cmd_line.append('--app') if relation_id is not None: @@ -508,7 +509,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): # sites pass in things like dicts or numbers. if value is not None: settings[key] = "{}".format(value) - if accepts_file: + if _relation_set_accepts_file(): # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from @@ -1003,14 +1004,8 @@ def cmd_exists(cmd): @cached -@deprecate("moved to function_get()", log=log) def action_get(key=None): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_get`. - - Gets the value of an action parameter, or all key/value param pairs. - """ + """Gets the value of an action parameter, or all key/value param pairs.""" cmd = ['action-get'] if key is not None: cmd.append(key) @@ -1020,8 +1015,12 @@ def action_get(key=None): @cached +@deprecate("moved to action_get()", log=log) def function_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['function-get'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1034,22 +1033,20 @@ def function_get(key=None): return function_data -@deprecate("moved to function_set()", log=log) def action_set(values): - """ - .. deprecated:: 0.20.7 - Alias for :func:`function_set`. - - Sets the values to be returned after the action finishes. - """ + """Sets the values to be returned after the action finishes.""" cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +@deprecate("moved to action_set()", log=log) def function_set(values): - """Sets the values to be returned after the function finishes""" + """ + .. deprecated:: + Sets the values to be returned after the function finishes. + """ cmd = ['function-set'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1060,12 +1057,8 @@ def function_set(values): subprocess.check_call(cmd) -@deprecate("moved to function_fail()", log=log) def action_fail(message): """ - .. deprecated:: 0.20.7 - Alias for :func:`function_fail`. - Sets the action status to failed and sets the error message. The results set by action_set are preserved. @@ -1073,10 +1066,14 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +@deprecate("moved to action_fail()", log=log) def function_fail(message): - """Sets the function status to failed and sets the error message. + """ + .. deprecated:: + Sets the function status to failed and sets the error message. - The results set by function_set are preserved.""" + The results set by function_set are preserved. + """ cmd = ['function-fail'] # Fallback for older charms. if not cmd_exists('function-fail'): diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py index 994ec8a..ad2cab4 100644 --- a/charmhelpers/core/host.py +++ b/charmhelpers/core/host.py @@ -31,7 +31,6 @@ import subprocess import hashlib import functools import itertools -import six from contextlib import contextmanager from collections import OrderedDict, defaultdict @@ -115,6 +114,33 @@ def service_stop(service_name, **kwargs): return service('stop', service_name, **kwargs) +def service_enable(service_name, **kwargs): + """Enable a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_enable('ceph-osd', id=4) + + :param service_name: the name of the service to enable + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('enable', service_name, **kwargs) + + def service_restart(service_name, **kwargs): """Restart a system service. @@ -135,7 +161,7 @@ def service_restart(service_name, **kwargs): :param service_name: the name of the service to restart :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs + parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ @@ -263,7 +289,7 @@ def service(action, service_name, **kwargs): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -289,7 +315,7 @@ def service_running(service_name, **kwargs): if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] - for key, value in six.iteritems(kwargs): + for key, value in kwargs.items(): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output( @@ -564,7 +590,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - if six.PY3 and isinstance(content, six.string_types): + if isinstance(content, str): content = content.encode('UTF-8') target.write(content) return @@ -967,7 +993,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): + if isinstance(nic_type, str): int_types = [nic_type] else: int_types = nic_type @@ -1081,8 +1107,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): try: chown(full, uid, gid) except (IOError, OSError) as e: - # Intended to ignore "file not found". Catching both to be - # compatible with both Python 2.7 and 3.x. + # Intended to ignore "file not found". if e.errno == errno.ENOENT: pass diff --git a/charmhelpers/core/services/base.py b/charmhelpers/core/services/base.py index 9f88029..7c37c65 100644 --- a/charmhelpers/core/services/base.py +++ b/charmhelpers/core/services/base.py @@ -17,8 +17,6 @@ import json import inspect from collections import Iterable, OrderedDict -import six - from charmhelpers.core import host from charmhelpers.core import hookenv @@ -171,10 +169,7 @@ class ServiceManager(object): if not units: continue remote_service = units[0].split('/')[0] - if six.PY2: - argspec = inspect.getargspec(provider.provide_data) - else: - argspec = inspect.getfullargspec(provider.provide_data) + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/charmhelpers/core/services/helpers.py b/charmhelpers/core/services/helpers.py index 3e6e30d..5bf62dd 100644 --- a/charmhelpers/core/services/helpers.py +++ b/charmhelpers/core/services/helpers.py @@ -179,7 +179,7 @@ class RequiredConfig(dict): self.required_options = args self['config'] = hookenv.config() with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) + self.config = yaml.safe_load(fp).get('options', {}) def __bool__(self): for option in self.required_options: @@ -227,7 +227,7 @@ class StoredContext(dict): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) + data = yaml.safe_load(file_stream) if not data: raise OSError("%s is empty" % file_name) return data diff --git a/charmhelpers/core/strutils.py b/charmhelpers/core/strutils.py index 28c6b3f..3136687 100644 --- a/charmhelpers/core/strutils.py +++ b/charmhelpers/core/strutils.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import re TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} @@ -27,8 +26,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY Returns True if value translates to True otherwise False. """ - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as boolean" % (value) raise ValueError(msg) @@ -61,8 +60,8 @@ def bytes_from_string(value): 'P': 5, 'PB': 5, } - if isinstance(value, six.string_types): - value = six.text_type(value) + if isinstance(value, str): + value = str(value) else: msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) diff --git a/charmhelpers/core/templating.py b/charmhelpers/core/templating.py index 9014015..cb0213d 100644 --- a/charmhelpers/core/templating.py +++ b/charmhelpers/core/templating.py @@ -13,7 +13,6 @@ # limitations under the License. import os -import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -43,9 +42,8 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python-jinja2 or python3-jinja2; if it is not - installed, calling this will attempt to use charmhelpers.fetch.apt_install - to install it. + Note: Using this requires python3-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -57,10 +55,7 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - if sys.version_info.major == 2: - apt_install('python-jinja2', fatal=True) - else: - apt_install('python3-jinja2', fatal=True) + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/charmhelpers/fetch/__init__.py b/charmhelpers/fetch/__init__.py index 9497ee0..1283f25 100644 --- a/charmhelpers/fetch/__init__.py +++ b/charmhelpers/fetch/__init__.py @@ -20,11 +20,7 @@ from charmhelpers.core.hookenv import ( log, ) -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse +from urllib.parse import urlparse, urlunparse # The order of this list is very important. Handlers should be listed in from @@ -134,14 +130,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, six.string_types): + if isinstance(sources, str): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] if len(sources) != len(keys): diff --git a/charmhelpers/fetch/archiveurl.py b/charmhelpers/fetch/archiveurl.py index d25587a..2cb2e88 100644 --- a/charmhelpers/fetch/archiveurl.py +++ b/charmhelpers/fetch/archiveurl.py @@ -26,26 +26,15 @@ from charmhelpers.payload.archive import ( ) from charmhelpers.core.host import mkdir, check_hash -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs +from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, +) +from urllib.parse import urlparse, urlunparse, parse_qs +from urllib.error import URLError def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: @@ -54,7 +43,6 @@ def splituser(host): def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' _passwdprog = re.compile('^([^:]*):(.*)$', re.S) match = _passwdprog.match(user) if match: @@ -150,10 +138,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available + algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError( diff --git a/charmhelpers/fetch/centos.py b/charmhelpers/fetch/centos.py index a91dcff..f849201 100644 --- a/charmhelpers/fetch/centos.py +++ b/charmhelpers/fetch/centos.py @@ -15,7 +15,6 @@ import subprocess import os import time -import six import yum from tempfile import NamedTemporaryFile @@ -42,7 +41,7 @@ def install(packages, options=None, fatal=False): if options is not None: cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -71,7 +70,7 @@ def update(fatal=False): def purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -83,7 +82,7 @@ def yum_search(packages): """Search for a package.""" output = {} cmd = ['yum', 'search'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) diff --git a/charmhelpers/fetch/python/debug.py b/charmhelpers/fetch/python/debug.py index 757135e..dd5cca8 100644 --- a/charmhelpers/fetch/python/debug.py +++ b/charmhelpers/fetch/python/debug.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import atexit import sys diff --git a/charmhelpers/fetch/python/packages.py b/charmhelpers/fetch/python/packages.py index 6004835..93f1fa3 100644 --- a/charmhelpers/fetch/python/packages.py +++ b/charmhelpers/fetch/python/packages.py @@ -16,7 +16,6 @@ # limitations under the License. import os -import six import subprocess import sys @@ -40,10 +39,7 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - if six.PY2: - apt_install('python-pip') - else: - apt_install('python3-pip') + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -140,12 +136,8 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - if six.PY2: - apt_install('python-virtualenv') - extra_flags = [] - else: - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py index cf8328f..e6f8a0a 100644 --- a/charmhelpers/fetch/ubuntu.py +++ b/charmhelpers/fetch/ubuntu.py @@ -13,10 +13,8 @@ # limitations under the License. from collections import OrderedDict -import os import platform import re -import six import subprocess import sys import time @@ -361,7 +359,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -413,7 +411,7 @@ def apt_purge(packages, fatal=False): :raises: subprocess.CalledProcessError """ cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -440,7 +438,7 @@ def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): + if isinstance(packages, str): cmd.append(packages) else: cmd.extend(packages) @@ -485,10 +483,7 @@ def import_key(key): if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): log("Writing provided PGP key in the binary format", level=DEBUG) - if six.PY3: - key_bytes = key.encode('utf-8') - else: - key_bytes = key + key_bytes = key.encode('utf-8') key_name = _get_keyid_by_gpg_key(key_bytes) key_gpg = _dearmor_gpg_key(key_bytes) _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) @@ -528,9 +523,8 @@ def _get_keyid_by_gpg_key(key_material): stderr=subprocess.PIPE, stdin=subprocess.PIPE) out, err = ps.communicate(input=key_material) - if six.PY3: - out = out.decode('utf-8') - err = err.decode('utf-8') + out = out.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material provided') # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) @@ -588,8 +582,7 @@ def _dearmor_gpg_key(key_asc): stdin=subprocess.PIPE) out, err = ps.communicate(input=key_asc) # no need to decode output as it is binary (invalid utf-8), only error - if six.PY3: - err = err.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material. Check your network setup' ' (MTU, routing, DNS) and/or proxy server settings' @@ -693,7 +686,7 @@ def add_source(source, key=None, fail_invalid=False): ]) if source is None: source = '' - for r, fn in six.iteritems(_mapping): + for r, fn in _mapping.items(): m = re.match(r, source) if m: if key: @@ -726,7 +719,7 @@ def _add_proposed(): """ release = get_distrib_codename() arch = platform.machine() - if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + if arch not in ARCH_TO_PROPOSED_POCKET.keys(): raise SourceConfigError("Arch {} not supported for (distro-)proposed" .format(arch)) with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -913,9 +906,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -957,9 +949,8 @@ def _run_apt_command(cmd, fatal=False, quiet=False): else: kwargs = {} if quiet: - devnull = os.devnull if six.PY2 else subprocess.DEVNULL - kwargs['stdout'] = devnull - kwargs['stderr'] = devnull + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) @@ -989,7 +980,7 @@ def get_installed_version(package): Version object """ cache = apt_cache() - dpkg_result = cache._dpkg_list([package]).get(package, {}) + dpkg_result = cache.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') diff --git a/charmhelpers/fetch/ubuntu_apt_pkg.py b/charmhelpers/fetch/ubuntu_apt_pkg.py index 436e177..6da355f 100644 --- a/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -40,6 +40,9 @@ import os import subprocess import sys +from charmhelpers import deprecate +from charmhelpers.core.hookenv import log + class _container(dict): """Simple container for attributes.""" @@ -79,7 +82,7 @@ class Cache(object): apt_result = self._apt_cache_show([package])[package] apt_result['name'] = apt_result.pop('package') pkg = Package(apt_result) - dpkg_result = self._dpkg_list([package]).get(package, {}) + dpkg_result = self.dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') if installed_version: @@ -88,9 +91,29 @@ class Cache(object): pkg.architecture = dpkg_result.get('architecture') return pkg + @deprecate("use dpkg_list() instead.", "2022-05", log=log) def _dpkg_list(self, packages): + return self.dpkg_list(packages) + + def dpkg_list(self, packages): """Get data from system dpkg database for package. + Note that this method is also useful for querying package names + containing wildcards, for example + + apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) + + may return + + { + 'nvidia-vgpu-ubuntu-470': { + 'name': 'nvidia-vgpu-ubuntu-470', + 'version': '470.68', + 'architecture': 'amd64', + 'description': 'NVIDIA vGPU driver - version 470.68' + } + } + :param packages: Packages to get data from :type packages: List[str] :returns: Structured data about installed packages, keys like diff --git a/lib/charms_ceph/utils.py b/lib/charms_ceph/utils.py index 025ab86..a22462e 100644 --- a/lib/charms_ceph/utils.py +++ b/lib/charms_ceph/utils.py @@ -1,4 +1,4 @@ -# Copyright 2017 Canonical Ltd +# Copyright 2017-2021 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -293,7 +293,7 @@ def get_link_speed(network_interface): def persist_settings(settings_dict): # Write all settings to /etc/hdparm.conf - """ This will persist the hard drive settings to the /etc/hdparm.conf file + """This will persist the hard drive settings to the /etc/hdparm.conf file The settings_dict should be in the form of {"uuid": {"key":"value"}} @@ -552,7 +552,7 @@ def get_osd_weight(osd_id): :returns: Float :raises: ValueError if the monmap fails to parse. - :raises: CalledProcessError if our ceph command fails. + :raises: CalledProcessError if our Ceph command fails. """ try: tree = str(subprocess @@ -560,7 +560,7 @@ def get_osd_weight(osd_id): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['nodes']: return None for device in json_tree['nodes']: @@ -619,12 +619,12 @@ def _flatten_roots(nodes, lookup_type='host'): def get_osd_tree(service): - """Returns the current osd map in JSON. + """Returns the current OSD map in JSON. :returns: List. :rtype: List[CrushLocation] :raises: ValueError if the monmap fails to parse. - Also raises CalledProcessError if our ceph command fails + Also raises CalledProcessError if our Ceph command fails """ try: tree = str(subprocess @@ -666,12 +666,12 @@ def _get_child_dirs(path): def _get_osd_num_from_dirname(dirname): """Parses the dirname and returns the OSD id. - Parses a string in the form of 'ceph-{osd#}' and returns the osd number + Parses a string in the form of 'ceph-{osd#}' and returns the OSD number from the directory name. :param dirname: the directory name to return the OSD number from - :return int: the osd number the directory name corresponds to - :raises ValueError: if the osd number cannot be parsed from the provided + :return int: the OSD number the directory name corresponds to + :raises ValueError: if the OSD number cannot be parsed from the provided directory name. """ match = re.search(r'ceph-(?P\d+)', dirname) @@ -686,7 +686,7 @@ def get_local_osd_ids(): to split the ID off of the directory name and return it in a list. - :returns: list. A list of osd identifiers + :returns: list. A list of OSD identifiers :raises: OSError if something goes wrong with listing the directory. """ osd_ids = [] @@ -875,12 +875,12 @@ DISK_FORMATS = [ ] CEPH_PARTITIONS = [ - '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation - '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal - '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data - '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data - '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal - '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation + '89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # Ceph encrypted disk in creation + '45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # Ceph encrypted journal + '4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # Ceph encrypted OSD data + '4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # Ceph OSD data + '45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # Ceph OSD journal + '89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # Ceph disk in creation ] @@ -984,7 +984,7 @@ def is_osd_disk(dev): def start_osds(devices): - # Scan for ceph block devices + # Scan for Ceph block devices rescan_osd_devices() if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and cmp_pkgrevno('ceph', '14.2.0') < 0): @@ -1229,12 +1229,6 @@ def get_named_key(name, caps=None, pool_list=None): 'get', key_name, ]).decode('UTF-8')).strip() - # NOTE(jamespage); - # Apply any changes to key capabilities, dealing with - # upgrades which requires new caps for operation. - upgrade_key_caps(key_name, - caps or _default_caps, - pool_list) return parse_key(output) except subprocess.CalledProcessError: # Couldn't get the key, time to create it! @@ -1270,7 +1264,7 @@ def get_named_key(name, caps=None, pool_list=None): def upgrade_key_caps(key, caps, pool_list=None): - """ Upgrade key to have capabilities caps """ + """Upgrade key to have capabilities caps""" if not is_leader(): # Not the MON leader OR not clustered return @@ -1304,11 +1298,11 @@ def use_bluestore(): def bootstrap_monitor_cluster(secret): - """Bootstrap local ceph mon into the ceph cluster + """Bootstrap local Ceph mon into the Ceph cluster :param secret: cephx secret to use for monitor authentication :type secret: str - :raises: Exception if ceph mon cannot be bootstrapped + :raises: Exception if Ceph mon cannot be bootstrapped """ hostname = socket.gethostname() path = '/var/lib/ceph/mon/ceph-{}'.format(hostname) @@ -1351,11 +1345,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker): :type: secret: str :param hostname: hostname of the local unit :type hostname: str - :param path: full path to ceph mon directory + :param path: full path to Ceph mon directory :type path: str - :param done: full path to 'done' marker for ceph mon + :param done: full path to 'done' marker for Ceph mon :type done: str - :param init_marker: full path to 'init' marker for ceph mon + :param init_marker: full path to 'init' marker for Ceph mon :type init_marker: str """ subprocess.check_call(['ceph-authtool', keyring, @@ -1415,13 +1409,13 @@ def create_keyrings(): owner=ceph_user(), group=ceph_user(), perms=0o400) else: - # NOTE(jamespage): Later ceph releases require explicit + # NOTE(jamespage): Later Ceph releases require explicit # call to ceph-create-keys to setup the # admin keys for the cluster; this command # will wait for quorum in the cluster before # returning. # NOTE(fnordahl): Explicitly run `ceph-create-keys` for older - # ceph releases too. This improves bootstrap + # Ceph releases too. This improves bootstrap # resilience as the charm will wait for # presence of peer units before attempting # to bootstrap. Note that charms deploying @@ -1503,9 +1497,9 @@ def find_least_used_utility_device(utility_devices, lvs=False): def get_devices(name): - """ Merge config and juju storage based devices + """Merge config and Juju storage based devices - :name: THe name of the device type, eg: wal, osd, journal + :name: The name of the device type, e.g.: wal, osd, journal :returns: Set(device names), which are strings """ if config(name): @@ -1520,11 +1514,11 @@ def get_devices(name): def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, - bluestore=False, key_manager=CEPH_KEY_MANAGER): + bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None): if dev.startswith('/dev'): osdize_dev(dev, osd_format, osd_journal, ignore_errors, encrypt, - bluestore, key_manager) + bluestore, key_manager, osd_id) else: if cmp_pkgrevno('ceph', '14.0.0') >= 0: log("Directory backed OSDs can not be created on Nautilus", @@ -1534,7 +1528,8 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False, def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, - encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER): + encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER, + osd_id=None): """ Prepare a block device for use as a Ceph OSD @@ -1547,7 +1542,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, :param: ignore_errors: Don't fail in the event of any errors during processing :param: encrypt: Encrypt block devices using 'key_manager' - :param: bluestore: Use bluestore native ceph block device format + :param: bluestore: Use bluestore native Ceph block device format :param: key_manager: Key management approach for encryption keys :raises subprocess.CalledProcessError: in the event that any supporting subprocess operation failed @@ -1599,7 +1594,8 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False, osd_journal, encrypt, bluestore, - key_manager) + key_manager, + osd_id) else: cmd = _ceph_disk(dev, osd_format, @@ -1683,7 +1679,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False): def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, - key_manager=CEPH_KEY_MANAGER): + key_manager=CEPH_KEY_MANAGER, osd_id=None): """ Prepare and activate a device for usage as a Ceph OSD using ceph-volume. @@ -1695,6 +1691,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, :param: encrypt: Use block device encryption :param: bluestore: Use bluestore storage for OSD :param: key_manager: dm-crypt Key Manager to use + :param: osd_id: The OSD-id to recycle, or None to create a new one :raises subprocess.CalledProcessError: in the event that any supporting LVM operation failed. :returns: list. 'ceph-volume' command and required parameters for @@ -1716,6 +1713,9 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False, if encrypt and key_manager == CEPH_KEY_MANAGER: cmd.append('--dmcrypt') + if osd_id is not None: + cmd.extend(['--osd-id', str(osd_id)]) + # On-disk journal volume creation if not osd_journal and not bluestore: journal_lv_type = 'journal' @@ -1840,7 +1840,7 @@ def get_conf(variable): Get the value of the given configuration variable from the cluster. - :param variable: ceph configuration variable + :param variable: Ceph configuration variable :returns: str. configured value for provided variable """ @@ -1860,7 +1860,7 @@ def calculate_volume_size(lv_type): :raises KeyError: if invalid lv_type is supplied :returns: int. Configured size in megabytes for volume type """ - # lv_type -> ceph configuration option + # lv_type -> Ceph configuration option _config_map = { 'db': 'bluestore_block_db_size', 'wal': 'bluestore_block_wal_size', @@ -1874,7 +1874,7 @@ def calculate_volume_size(lv_type): 'journal': 1024, } - # conversion of ceph config units to MB + # conversion of Ceph config units to MB _units = { 'db': 1048576, # Bytes -> MB 'wal': 1048576, # Bytes -> MB @@ -1907,7 +1907,7 @@ def _luks_uuid(dev): def _initialize_disk(dev, dev_uuid, encrypt=False, key_manager=CEPH_KEY_MANAGER): """ - Initialize a raw block device consuming 100% of the avaliable + Initialize a raw block device consuming 100% of the available disk space. Function assumes that block device has already been wiped. @@ -2004,7 +2004,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid, def osdize_dir(path, encrypt=False, bluestore=False): - """Ask ceph-disk to prepare a directory to become an osd. + """Ask ceph-disk to prepare a directory to become an OSD. :param path: str. The directory to osdize :param encrypt: bool. Should the OSD directory be encrypted at rest @@ -2074,11 +2074,11 @@ def get_running_osds(): def get_cephfs(service): """List the Ceph Filesystems that exist. - :param service: The service name to run the ceph command under - :returns: list. Returns a list of the ceph filesystems + :param service: The service name to run the Ceph command under + :returns: list. Returns a list of the Ceph filesystems """ if get_version() < 0.86: - # This command wasn't introduced until 0.86 ceph + # This command wasn't introduced until 0.86 Ceph return [] try: output = str(subprocess @@ -2157,7 +2157,7 @@ def roll_monitor_cluster(new_version, upgrade_key): sys.exit(1) log('monitor_list: {}'.format(monitor_list)) - # A sorted list of osd unit names + # A sorted list of OSD unit names mon_sorted_list = sorted(monitor_list) # Install packages immediately but defer restarts to when it's our time. @@ -2192,6 +2192,20 @@ def roll_monitor_cluster(new_version, upgrade_key): wait_for_all_monitors_to_upgrade(new_version=new_version, upgrade_key=upgrade_key) bootstrap_manager() + + # NOTE(jmcvaughn): + # Nautilus and later binaries use msgr2 by default, but existing + # clusters that have been upgraded from pre-Nautilus will not + # automatically have msgr2 enabled. Without this, Ceph will show + # a warning only (with no impact to operations), but newly added units + # will not be able to join the cluster. Therefore, we ensure it is + # enabled on upgrade for all versions including and after Nautilus + # (to cater for previous charm versions that will not have done this). + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 + if nautilus_or_later: + wait_for_all_monitors_to_upgrade(new_version=new_version, + upgrade_key=upgrade_key) + enable_msgr2() except ValueError: log("Failed to find {} in list {}.".format( my_name, mon_sorted_list)) @@ -2204,7 +2218,7 @@ def noop(): def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): - """Upgrade the current ceph monitor to the new version + """Upgrade the current Ceph monitor to the new version :param new_version: String version to upgrade to. """ @@ -2212,18 +2226,19 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): kick_function = noop current_version = get_version() status_set("maintenance", "Upgrading monitor") - log("Current ceph version is {}".format(current_version)) + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) # Needed to determine if whether to stop/start ceph-mgr luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0 - + # Needed to differentiate between systemd unit names + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0 kick_function() try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph source failed with message: {}".format( + log("Adding the Ceph source failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2246,7 +2261,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): try: if systemd(): - service_stop('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_stop(systemd_unit) log("restarting ceph-mgr.target maybe: {}" .format(luminous_or_later)) if luminous_or_later: @@ -2277,7 +2296,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): perms=0o755) if systemd(): - service_restart('ceph-mon') + if nautilus_or_later: + systemd_unit = 'ceph-mon@{}'.format(socket.gethostname()) + else: + systemd_unit = 'ceph-mon' + service_restart(systemd_unit) log("starting ceph-mgr.target maybe: {}".format(luminous_or_later)) if luminous_or_later: # due to BUG: #1849874 we have to force a restart to get it to @@ -2294,7 +2317,7 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True): def lock_and_roll(upgrade_key, service, my_name, version): - """Create a lock on the ceph monitor cluster and upgrade. + """Create a lock on the Ceph monitor cluster and upgrade. :param upgrade_key: str. The cephx key to use :param service: str. The cephx id to use @@ -2443,7 +2466,7 @@ class WatchDog(object): allow for other delays. There is a compatibility mode where if the otherside never kicks, then it - simply waits for the compatability timer. + simply waits for the compatibility timer. """ class WatchDogDeadException(Exception): @@ -2578,11 +2601,11 @@ class WatchDog(object): def get_upgrade_position(osd_sorted_list, match_name): - """Return the upgrade position for the given osd. + """Return the upgrade position for the given OSD. - :param osd_sorted_list: Osds sorted + :param osd_sorted_list: OSDs sorted :type osd_sorted_list: [str] - :param match_name: The osd name to match + :param match_name: The OSD name to match :type match_name: str :returns: The position of the name :rtype: int @@ -2591,20 +2614,20 @@ def get_upgrade_position(osd_sorted_list, match_name): for index, item in enumerate(osd_sorted_list): if item.name == match_name: return index - raise ValueError("osd name '{}' not found in get_upgrade_position list" + raise ValueError("OSD name '{}' not found in get_upgrade_position list" .format(match_name)) # Edge cases: # 1. Previous node dies on upgrade, can we retry? -# 2. This assumes that the osd failure domain is not set to osd. +# 2. This assumes that the OSD failure domain is not set to OSD. # It rolls an entire server at a time. def roll_osd_cluster(new_version, upgrade_key): """This is tricky to get right so here's what we're going to do. There's 2 possible cases: Either I'm first in line or not. If I'm not first in line I'll wait a random time between 5-30 seconds - and test to see if the previous osd is upgraded yet. + and test to see if the previous OSD is upgraded yet. TODO: If you're not in the same failure domain it's safe to upgrade 1. Examine all pools and adopt the most strict failure domain policy @@ -2620,7 +2643,7 @@ def roll_osd_cluster(new_version, upgrade_key): log('roll_osd_cluster called with {}'.format(new_version)) my_name = socket.gethostname() osd_tree = get_osd_tree(service=upgrade_key) - # A sorted list of osd unit names + # A sorted list of OSD unit names osd_sorted_list = sorted(osd_tree) log("osd_sorted_list: {}".format(osd_sorted_list)) @@ -2655,7 +2678,7 @@ def roll_osd_cluster(new_version, upgrade_key): def upgrade_osd(new_version, kick_function=None): - """Upgrades the current osd + """Upgrades the current OSD :param new_version: str. The new version to upgrade to """ @@ -2663,15 +2686,15 @@ def upgrade_osd(new_version, kick_function=None): kick_function = noop current_version = get_version() - status_set("maintenance", "Upgrading osd") - log("Current ceph version is {}".format(current_version)) + status_set("maintenance", "Upgrading OSD") + log("Current Ceph version is {}".format(current_version)) log("Upgrading to: {}".format(new_version)) try: add_source(config('source'), config('key')) apt_update(fatal=True) except subprocess.CalledProcessError as err: - log("Adding the ceph sources failed with message: {}".format( + log("Adding the Ceph sources failed with message: {}".format( err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2685,7 +2708,7 @@ def upgrade_osd(new_version, kick_function=None): kick_function() # If the upgrade does not need an ownership update of any of the - # directories in the osd service directory, then simply restart + # directories in the OSD service directory, then simply restart # all of the OSDs at the same time as this will be the fastest # way to update the code on the node. if not dirs_need_ownership_update('osd'): @@ -2700,7 +2723,7 @@ def upgrade_osd(new_version, kick_function=None): # Need to change the ownership of all directories which are not OSD # directories as well. # TODO - this should probably be moved to the general upgrade function - # and done before mon/osd. + # and done before mon/OSD. update_owner(CEPH_BASE_DIR, recurse_dirs=False) non_osd_dirs = filter(lambda x: not x == 'osd', os.listdir(CEPH_BASE_DIR)) @@ -2721,12 +2744,12 @@ def upgrade_osd(new_version, kick_function=None): _upgrade_single_osd(osd_num, osd_dir) except ValueError as ex: # Directory could not be parsed - junk directory? - log('Could not parse osd directory %s: %s' % (osd_dir, ex), + log('Could not parse OSD directory %s: %s' % (osd_dir, ex), WARNING) continue except (subprocess.CalledProcessError, IOError) as err: - log("Stopping ceph and upgrading packages failed " + log("Stopping Ceph and upgrading packages failed " "with message: {}".format(err)) status_set("blocked", "Upgrade to {} failed".format(new_version)) sys.exit(1) @@ -2753,7 +2776,7 @@ def _upgrade_single_osd(osd_num, osd_dir): def stop_osd(osd_num): """Stops the specified OSD number. - :param osd_num: the osd number to stop + :param osd_num: the OSD number to stop """ if systemd(): service_stop('ceph-osd@{}'.format(osd_num)) @@ -2764,7 +2787,7 @@ def stop_osd(osd_num): def start_osd(osd_num): """Starts the specified OSD number. - :param osd_num: the osd number to start. + :param osd_num: the OSD number to start. """ if systemd(): service_start('ceph-osd@{}'.format(osd_num)) @@ -2775,12 +2798,12 @@ def start_osd(osd_num): def disable_osd(osd_num): """Disables the specified OSD number. - Ensures that the specified osd will not be automatically started at the + Ensures that the specified OSD will not be automatically started at the next reboot of the system. Due to differences between init systems, - this method cannot make any guarantees that the specified osd cannot be + this method cannot make any guarantees that the specified OSD cannot be started manually. - :param osd_num: the osd id which should be disabled. + :param osd_num: the OSD id which should be disabled. :raises CalledProcessError: if an error occurs invoking the systemd cmd to disable the OSD :raises IOError, OSError: if the attempt to read/remove the ready file in @@ -2820,7 +2843,7 @@ def enable_osd(osd_num): :param osd_num: the osd id which should be enabled. :raises CalledProcessError: if the call to the systemd command issued fails when enabling the service - :raises IOError: if the attempt to write the ready file in an usptart + :raises IOError: if the attempt to write the ready file in an upstart enabled system fails """ if systemd(): @@ -2828,7 +2851,7 @@ def enable_osd(osd_num): subprocess.check_call(cmd) else: # When running on upstart, the OSDs are started via the ceph-osd-all - # upstart script which will only start the osd if it has a 'ready' + # upstart script which will only start the OSD if it has a 'ready' # file. Make sure that file exists. ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num), 'ready') @@ -2881,7 +2904,7 @@ def get_osd_state(osd_num, osd_goal_state=None): If osd_goal_state is not None, loop until the current OSD state matches the OSD goal state. - :param osd_num: the osd id to get state for + :param osd_num: the OSD id to get state for :param osd_goal_state: (Optional) string indicating state to wait for Defaults to None :returns: Returns a str, the OSD state. @@ -2942,7 +2965,7 @@ def maintain_osd_state(osd_num): Ensures the state of an OSD is the same at the end of a block nested in a with statement as it was at the beginning of the block. - :param osd_num: the osd id to maintain state for + :param osd_num: the OSD id to maintain state for """ osd_state = get_osd_state(osd_num) try: @@ -2969,9 +2992,9 @@ def maintain_all_osd_states(): def list_pools(client='admin'): """This will list the current pools that Ceph has - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Returns a list of available pools. :rtype: list :raises: subprocess.CalledProcessError if the subprocess fails to run. @@ -2996,9 +3019,9 @@ def get_pool_param(pool, param, client='admin'): :type pool: str :param param: Name of variable to get :type param: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Value of variable on pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3020,9 +3043,9 @@ def get_pool_erasure_profile(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Erasure code profile of pool or None :rtype: str or None :raises: subprocess.CalledProcessError @@ -3041,9 +3064,9 @@ def get_pool_quota(pool, client='admin'): :param pool: Name of pool to get variable from :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with quota variables :rtype: dict :raises: subprocess.CalledProcessError @@ -3066,9 +3089,9 @@ def get_pool_applications(pool='', client='admin'): :param pool: (Optional) Name of pool to get applications for Defaults to get for all pools :type pool: str - :param client: (Optional) client id for ceph key to use + :param client: (Optional) client id for Ceph key to use Defaults to ``admin`` - :type cilent: str + :type client: str :returns: Dictionary with pool name as key :rtype: dict :raises: subprocess.CalledProcessError @@ -3131,7 +3154,7 @@ def dirs_need_ownership_update(service): necessary due to the upgrade from Hammer to Jewel where the daemon user changes from root: to ceph:. - :param service: the name of the service folder to check (e.g. osd, mon) + :param service: the name of the service folder to check (e.g. OSD, mon) :returns: boolean. True if the directories need a change of ownership, False otherwise. :raises IOError: if an error occurs reading the file stats from one of @@ -3161,7 +3184,7 @@ def dirs_need_ownership_update(service): return False -# A dict of valid ceph upgrade paths. Mapping is old -> new +# A dict of valid Ceph upgrade paths. Mapping is old -> new UPGRADE_PATHS = collections.OrderedDict([ ('firefly', 'hammer'), ('hammer', 'jewel'), @@ -3173,7 +3196,7 @@ UPGRADE_PATHS = collections.OrderedDict([ ('pacific', 'quincy'), ]) -# Map UCA codenames to ceph codenames +# Map UCA codenames to Ceph codenames UCA_CODENAME_MAP = { 'icehouse': 'firefly', 'juno': 'firefly', @@ -3196,24 +3219,24 @@ UCA_CODENAME_MAP = { def pretty_print_upgrade_paths(): - """Pretty print supported upgrade paths for ceph""" + """Pretty print supported upgrade paths for Ceph""" return ["{} -> {}".format(key, value) for key, value in UPGRADE_PATHS.items()] def resolve_ceph_version(source): - """Resolves a version of ceph based on source configuration + """Resolves a version of Ceph based on source configuration based on Ubuntu Cloud Archive pockets. @param: source: source configuration option of charm - :returns: ceph release codename or None if not resolvable + :returns: Ceph release codename or None if not resolvable """ os_release = get_os_codename_install_source(source) return UCA_CODENAME_MAP.get(os_release) def get_ceph_pg_stat(): - """Returns the result of ceph pg stat. + """Returns the result of 'ceph pg stat'. :returns: dict """ @@ -3248,7 +3271,7 @@ def get_ceph_health(): .decode('UTF-8')) try: json_tree = json.loads(tree) - # Make sure children are present in the json + # Make sure children are present in the JSON if not json_tree['overall_status']: return None @@ -3265,7 +3288,7 @@ def get_ceph_health(): def reweight_osd(osd_num, new_weight): """Changes the crush weight of an OSD to the value specified. - :param osd_num: the osd id which should be changed + :param osd_num: the OSD id which should be changed :param new_weight: the new weight for the OSD :returns: bool. True if output looks right, else false. :raises CalledProcessError: if an error occurs invoking the systemd cmd @@ -3292,7 +3315,7 @@ def reweight_osd(osd_num, new_weight): def determine_packages(): """Determines packages for installation. - :returns: list of ceph packages + :returns: list of Ceph packages """ packages = PACKAGES.copy() if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan': @@ -3338,6 +3361,16 @@ def bootstrap_manager(): service_restart(unit) +def enable_msgr2(): + """ + Enables msgr2 + + :raises: subprocess.CalledProcessError if the command fails + """ + cmd = ['ceph', 'mon', 'enable-msgr2'] + subprocess.check_call(cmd) + + def osd_noout(enable): """Sets or unsets 'noout' @@ -3361,12 +3394,12 @@ def osd_noout(enable): class OSDConfigSetError(Exception): - """Error occured applying OSD settings.""" + """Error occurred applying OSD settings.""" pass def apply_osd_settings(settings): - """Applies the provided osd settings + """Applies the provided OSD settings Apply the provided settings to all local OSD unless settings are already present. Settings stop being applied on encountering an error. @@ -3391,7 +3424,7 @@ def apply_osd_settings(settings): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error retrieving osd setting: {}".format(out['error']), + log("Error retrieving OSD setting: {}".format(out['error']), level=ERROR) return False current_settings[key] = out[cli_key] @@ -3408,7 +3441,7 @@ def apply_osd_settings(settings): out = json.loads( subprocess.check_output(cmd.split()).decode('UTF-8')) if 'error' in out: - log("Error applying osd setting: {}".format(out['error']), + log("Error applying OSD setting: {}".format(out['error']), level=ERROR) raise OSDConfigSetError return True @@ -3478,7 +3511,7 @@ mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard') def ceph_config_set(name, value, who): - """Set a ceph config option + """Set a Ceph config option :param name: key to set :type name: str @@ -3496,7 +3529,7 @@ mgr_config_set = functools.partial(ceph_config_set, who='mgr') def ceph_config_get(name, who): - """Retrieve the value of a ceph config option + """Retrieve the value of a Ceph config option :param name: key to lookup :type name: str diff --git a/metadata.yaml b/metadata.yaml index 34e02da..3ffef3f 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -11,7 +11,7 @@ tags: - misc series: - focal -- impish +- jammy extra-bindings: public: cluster: diff --git a/tests/bundles/impish-xena-ec.yaml b/tests/bundles/impish-xena-ec.yaml deleted file mode 100644 index ff4096f..0000000 --- a/tests/bundles/impish-xena-ec.yaml +++ /dev/null @@ -1,228 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: impish - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - '16': - '17': - '18': - -applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: quincy/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 6 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - - '16' - - '17' - - '18' - channel: quincy/edge - - ceph-proxy: - charm: ../../ceph-proxy.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - to: - - '10' - channel: quincy/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: lrc - ec-profile-locality: 3 - channel: yoga/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: yoga/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: jerasure - to: - - '14' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - pool-type: erasure-coded - ec-profile-k: 4 - ec-profile-m: 2 - ec-profile-plugin: isa - libvirt-image-backend: rbd - to: - - '15' - channel: yoga/edge - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'glance:ceph' - - 'ceph-proxy:client' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' - - - - 'nova-compute:ceph' - - 'ceph-proxy:client' diff --git a/tests/bundles/impish-xena.yaml b/tests/bundles/impish-xena.yaml deleted file mode 100644 index 0710d61..0000000 --- a/tests/bundles/impish-xena.yaml +++ /dev/null @@ -1,199 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: impish - -comment: -- 'machines section to decide order of deployment. database sooner = faster' -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - '6': - '7': - '8': - '9': - '10': - '11': - '12': - '13': - '14': - '15': - -applications: - - cinder-mysql-router: - charm: ch:mysql-router - channel: latest/edge - glance-mysql-router: - charm: ch:mysql-router - channel: latest/edge - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - ceph-mon: - charm: ch:ceph-mon - num_units: 3 - options: - expected-osd-count: 3 - source: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: quincy/edge - - ceph-osd: - charm: ch:ceph-osd - num_units: 3 - storage: - osd-devices: 10G - options: - source: *openstack-origin - to: - - '6' - - '7' - - '8' - channel: quincy/edge - - ceph-proxy: - charm: ../../ceph-proxy.charm - num_units: 1 - options: - source: *openstack-origin - to: - - '9' - - ceph-radosgw: - charm: ch:ceph-radosgw - num_units: 1 - options: - source: *openstack-origin - to: - - '10' - channel: quincy/edge - - cinder: - charm: ch:cinder - num_units: 1 - options: - openstack-origin: *openstack-origin - block-device: "" - ephemeral-unmount: "" - glance-api-version: 2 - overwrite: "false" - constraints: mem=2048 - to: - - '11' - channel: yoga/edge - - cinder-ceph: - charm: ch:cinder-ceph - options: - restrict-ceph-pools: True - channel: yoga/edge - - keystone: - charm: ch:keystone - num_units: 1 - options: - openstack-origin: *openstack-origin - admin-password: openstack - constraints: mem=1024 - to: - - '12' - channel: yoga/edge - - rabbitmq-server: - charm: ch:rabbitmq-server - num_units: 1 - constraints: mem=1024 - options: - source: *openstack-origin - to: - - '13' - channel: latest/edge - - glance: - charm: ch:glance - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '14' - channel: yoga/edge - - nova-compute: - charm: ch:nova-compute - num_units: 1 - options: - openstack-origin: *openstack-origin - to: - - '15' - channel: yoga/edge - - -relations: - - - - 'ceph-osd:mon' - - 'ceph-mon:osd' - - - - 'ceph-proxy:radosgw' - - 'ceph-radosgw:mon' - - - - 'cinder:amqp' - - 'rabbitmq-server:amqp' - - - - 'cinder:shared-db' - - 'cinder-mysql-router:shared-db' - - - 'cinder-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'keystone-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'cinder:identity-service' - - 'keystone:identity-service' - - - - 'cinder-ceph:storage-backend' - - 'cinder:storage-backend' - - - - 'cinder-ceph:ceph' - - 'ceph-proxy:client' - - - - 'glance:image-service' - - 'nova-compute:image-service' - - - - 'glance:identity-service' - - 'keystone:identity-service' - - - - 'glance:shared-db' - - 'glance-mysql-router:shared-db' - - - 'glance-mysql-router:db-router' - - 'mysql-innodb-cluster:db-router' - - - - 'nova-compute:ceph-access' - - 'cinder-ceph:ceph-access' - - - - 'nova-compute:amqp' - - 'rabbitmq-server:amqp' diff --git a/tests/tests.yaml b/tests/tests.yaml index 691fcc4..fba0805 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -14,8 +14,6 @@ tests: gate_bundles: - focal-xena - erasure-coded: focal-xena-ec - - impish-xena - - erasure-coded: impish-xena-ec dev_bundles: - focal-yoga @@ -48,7 +46,5 @@ target_deploy_status: tests_options: force_deploy: - - impish-xena - - impish-xena-ec - jammy-yoga - jammy-yoga-ec