diff --git a/.zuul.yaml b/.zuul.yaml index 23bf5f6..7ffc71c 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-charm-zed-jobs + - openstack-python3-charm-yoga-jobs - openstack-cover-jobs diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index bdbe8d5..0000000 --- a/bindep.txt +++ /dev/null @@ -1,3 +0,0 @@ -libffi-dev [platform:dpkg] -libxml2-dev [platform:dpkg] -libxslt1-dev [platform:dpkg] diff --git a/charmcraft.yaml b/charmcraft.yaml index a89bb84..ea1d266 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -23,10 +23,13 @@ parts: bases: - build-on: - name: ubuntu - channel: "22.04" + channel: "20.04" architectures: - amd64 run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64, s390x, ppc64el, arm64] - name: ubuntu channel: "22.04" architectures: [amd64, s390x, ppc64el, arm64] diff --git a/charmhelpers/__init__.py b/charmhelpers/__init__.py index ddf3045..1f57ed2 100644 --- a/charmhelpers/__init__.py +++ b/charmhelpers/__init__.py @@ -14,15 +14,30 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + import functools import inspect import subprocess +import sys +try: + import six # NOQA:F401 +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # NOQA:F401 try: import yaml # NOQA:F401 except ImportError: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) import yaml # NOQA:F401 diff --git a/charmhelpers/cli/__init__.py b/charmhelpers/cli/__init__.py index 2b0c4b7..74ea729 100644 --- a/charmhelpers/cli/__init__.py +++ b/charmhelpers/cli/__init__.py @@ -16,6 +16,9 @@ import inspect import argparse import sys +import six +from six.moves import zip + import charmhelpers.core.unitdata @@ -146,7 +149,10 @@ class CommandLine(object): def run(self): "Run cli, processing arguments and executing subcommands." arguments = self.argument_parser.parse_args() - argspec = inspect.getfullargspec(arguments.func) + if six.PY2: + argspec = inspect.getargspec(arguments.func) + else: + argspec = inspect.getfullargspec(arguments.func) vargs = [] for arg in argspec.args: vargs.append(getattr(arguments, arg)) @@ -171,7 +177,10 @@ def describe_arguments(func): Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" - argspec = inspect.getfullargspec(func) + if six.PY2: + argspec = inspect.getargspec(func) + else: + argspec = inspect.getfullargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py index bad7a53..8d1753c 100644 --- a/charmhelpers/contrib/charmsupport/nrpe.py +++ b/charmhelpers/contrib/charmsupport/nrpe.py @@ -28,7 +28,6 @@ import subprocess import yaml from charmhelpers.core.hookenv import ( - application_name, config, hook_name, local_unit, @@ -175,8 +174,7 @@ define service {{ if os.path.exists(os.path.join(path, parts[0])): command = os.path.join(path, parts[0]) if len(parts) > 1: - safe_args = [shlex.quote(arg) for arg in parts[1:]] - command += " " + " ".join(safe_args) + command += " " + " ".join(parts[1:]) return command log('Check command not found: {}'.format(parts[0])) return '' @@ -522,39 +520,3 @@ def remove_deprecated_check(nrpe, deprecated_services): for dep_svc in deprecated_services: log('Deprecated service: {}'.format(dep_svc)) nrpe.remove_check(shortname=dep_svc) - - -def add_deferred_restarts_check(nrpe): - """ - Add NRPE check for services with deferred restarts. - - :param NRPE nrpe: NRPE object to add check to - """ - unit_name = local_unit().replace('/', '-') - shortname = unit_name + '_deferred_restarts' - check_cmd = 'check_deferred_restarts.py --application {}'.format( - application_name()) - - log('Adding deferred restarts nrpe check: {}'.format(shortname)) - nrpe.add_check( - shortname=shortname, - description='Check deferred service restarts {}'.format(unit_name), - check_cmd=check_cmd) - - -def remove_deferred_restarts_check(nrpe): - """ - Remove NRPE check for services with deferred service restarts. - - :param NRPE nrpe: NRPE object to remove check from - """ - unit_name = local_unit().replace('/', '-') - shortname = unit_name + '_deferred_restarts' - check_cmd = 'check_deferred_restarts.py --application {}'.format( - application_name()) - - log('Removing deferred restarts nrpe check: {}'.format(shortname)) - nrpe.remove_check( - shortname=shortname, - description='Check deferred service restarts {}'.format(unit_name), - check_cmd=check_cmd) diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py index 146beba..f0b629a 100644 --- a/charmhelpers/contrib/hahelpers/cluster.py +++ b/charmhelpers/contrib/hahelpers/cluster.py @@ -32,6 +32,8 @@ import time from socket import gethostname as get_unit_hostname +import six + from charmhelpers.core.hookenv import ( log, relation_ids, @@ -123,16 +125,16 @@ def is_crm_dc(): """ cmd = ['crm', 'status'] try: - status = subprocess.check_output( - cmd, stderr=subprocess.STDOUT).decode('utf-8') + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): - # Current DC: juju-lytrusty-machine-2 (168108163) - # - partition with quorum + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True @@ -156,8 +158,9 @@ def is_crm_leader(resource, retry=False): return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output( - cmd, stderr=subprocess.STDOUT).decode('utf-8') + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") except subprocess.CalledProcessError: status = None diff --git a/charmhelpers/contrib/network/ip.py b/charmhelpers/contrib/network/ip.py index f8edf37..b356d64 100644 --- a/charmhelpers/contrib/network/ip.py +++ b/charmhelpers/contrib/network/ip.py @@ -15,6 +15,7 @@ import glob import re import subprocess +import six import socket from functools import partial @@ -38,14 +39,20 @@ try: import netifaces except ImportError: apt_update(fatal=True) - apt_install('python3-netifaces', fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: apt_update(fatal=True) - apt_install('python3-netaddr', fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) import netaddr @@ -455,19 +462,22 @@ def ns_query(address): try: import dns.resolver except ImportError: - apt_install('python3-dnspython', fatal=True) + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) import dns.resolver if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, str): + elif isinstance(address, six.string_types): rtype = 'A' else: return None try: answers = dns.resolver.query(address, rtype) - except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): + except dns.resolver.NXDOMAIN: return None if answers: @@ -503,7 +513,10 @@ def get_hostname(address, fqdn=True): try: import dns.reversename except ImportError: - apt_install("python3-dnspython", fatal=True) + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) import dns.reversename rev = dns.reversename.from_address(address) diff --git a/charmhelpers/contrib/openstack/files/check_deferred_restarts.py b/charmhelpers/contrib/openstack/files/check_deferred_restarts.py deleted file mode 100755 index 5f392b3..0000000 --- a/charmhelpers/contrib/openstack/files/check_deferred_restarts.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/python3 - -# Copyright 2014-2022 Canonical Limited. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Checks for services with deferred restarts. - -This Nagios check will parse /var/lib/policy-rd.d/ -to find any restarts that are currently deferred. -""" - -import argparse -import glob -import sys -import yaml - - -DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d' - - -def get_deferred_events(): - """Return a list of deferred events dicts from policy-rc.d files. - - Events are read from DEFERRED_EVENTS_DIR and are of the form: - { - action: restart, - policy_requestor_name: rabbitmq-server, - policy_requestor_type: charm, - reason: 'Pkg update', - service: rabbitmq-server, - time: 1614328743 - } - - :raises OSError: Raised in case of a system error while reading a policy file - :raises yaml.YAMLError: Raised if parsing a policy file fails - - :returns: List of deferred event dictionaries - :rtype: list - """ - deferred_events_files = glob.glob( - '{}/*.deferred'.format(DEFERRED_EVENTS_DIR)) - - deferred_events = [] - for event_file in deferred_events_files: - with open(event_file, 'r') as f: - event = yaml.safe_load(f) - deferred_events.append(event) - - return deferred_events - - -def get_deferred_restart_services(application=None): - """Returns a list of services with deferred restarts. - - :param str application: Name of the application that blocked the service restart. - If application is None, all services with deferred restarts - are returned. Services which are blocked by a non-charm - requestor are always returned. - - :raises OSError: Raised in case of a system error while reading a policy file - :raises yaml.YAMLError: Raised if parsing a policy file fails - - :returns: List of services with deferred restarts belonging to application. - :rtype: list - """ - - deferred_restart_events = filter( - lambda e: e['action'] == 'restart', get_deferred_events()) - - deferred_restart_services = set() - for restart_event in deferred_restart_events: - if application: - if ( - restart_event['policy_requestor_type'] != 'charm' or - restart_event['policy_requestor_type'] == 'charm' and - restart_event['policy_requestor_name'] == application - ): - deferred_restart_services.add(restart_event['service']) - else: - deferred_restart_services.add(restart_event['service']) - - return list(deferred_restart_services) - - -def main(): - """Check for services with deferred restarts.""" - parser = argparse.ArgumentParser( - description='Check for services with deferred restarts') - parser.add_argument( - '--application', help='Check services belonging to this application only') - - args = parser.parse_args() - - services = set(get_deferred_restart_services(args.application)) - - if len(services) == 0: - print('OK: No deferred service restarts.') - sys.exit(0) - else: - print( - 'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services))) - sys.exit(1) - - -if __name__ == '__main__': - try: - main() - except OSError as e: - print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror)) - sys.exit(1) - except yaml.YAMLError as e: - print('CRITICAL: Failed to parse a policy file: {}'.format(str(e))) - sys.exit(1) - except Exception as e: - print('CRITICAL: An unknown error occurred: {}'.format(str(e))) - sys.exit(1) diff --git a/charmhelpers/contrib/openstack/policyd.py b/charmhelpers/contrib/openstack/policyd.py index 767943c..6fa06f2 100644 --- a/charmhelpers/contrib/openstack/policyd.py +++ b/charmhelpers/contrib/openstack/policyd.py @@ -15,6 +15,7 @@ import collections import contextlib import os +import six import shutil import yaml import zipfile @@ -203,6 +204,12 @@ class BadPolicyYamlFile(Exception): return self.log_message +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + def is_policyd_override_valid_on_this_release(openstack_release): """Check that the charm is running on at least Ubuntu Xenial, and at least the queens release. @@ -480,10 +487,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): if blacklisted_keys_present: raise BadPolicyYamlFile("blacklisted keys {} present." .format(", ".join(blacklisted_keys_present))) - if not all(isinstance(k, str) for k in keys): + if not all(isinstance(k, six.string_types) for k in keys): raise BadPolicyYamlFile("keys in yaml aren't all strings?") # check that the dictionary looks like a mapping of str to str - if not all(isinstance(v, str) for v in doc.values()): + if not all(isinstance(v, six.string_types) for v in doc.values()): raise BadPolicyYamlFile("values in yaml aren't all strings?") return doc @@ -523,7 +530,8 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) - for direntry in os.scandir(path): + _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir + for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: continue @@ -550,6 +558,36 @@ def maybe_create_directory_for(path, user, group): ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) +@contextlib.contextmanager +def _fallback_scandir(path): + """Fallback os.scandir implementation. + + provide a fallback implementation of os.scandir if this module ever gets + used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _FBDirectory objects + :rtype: ContextManager[_FBDirectory] + """ + for f in os.listdir(path): + yield _FBDirectory(f) + + +class _FBDirectory(object): + """Mock a scandir Directory object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + def path_for_policy_file(service, name): """Return the full path for a policy.d file that will be written to the service's policy.d directory. @@ -730,7 +768,7 @@ def process_policy_resource_file(resource_file, _group) # Every thing worked, so we mark up a success. completed = True - except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), level=POLICYD_LOG_LEVEL_DEFAULT) except IOError as e: diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index 1fa2814..9cc96d6 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -25,6 +25,7 @@ import re import itertools import functools +import six import traceback import uuid import yaml @@ -158,7 +159,6 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2021.1', 'wallaby'), ('2021.2', 'xena'), ('2022.1', 'yoga'), - ('2022.2', 'zed'), ]) # The ugly duckling - must list releases oldest to newest @@ -362,8 +362,6 @@ def get_os_codename_install_source(src): rel = '' if src is None: return rel - if src in OPENSTACK_RELEASES: - return src if src in ['distro', 'distro-proposed', 'proposed']: try: rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] @@ -401,23 +399,19 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES, - raise_exception=False): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in version_map.items(): + for k, v in six.iteritems(version_map): if v == codename: return k e = 'Could not derive OpenStack version for '\ 'codename: %s' % codename - if raise_exception: - raise ValueError(str(e)) error_out(e) def get_os_version_codename_swift(codename): '''Determine OpenStack version number of swift from codename.''' - # for k, v in six.iteritems(SWIFT_CODENAMES): - for k, v in SWIFT_CODENAMES.items(): + for k, v in six.iteritems(SWIFT_CODENAMES): if k == codename: return v[-1] e = 'Could not derive swift version for '\ @@ -427,17 +421,17 @@ def get_os_version_codename_swift(codename): def get_swift_codename(version): '''Determine OpenStack codename that corresponds to swift version.''' - codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v] + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] if len(codenames) > 1: # If more than one release codename contains this version we determine # the actual codename based on the highest available install source. for codename in reversed(codenames): releases = UBUNTU_OPENSTACK_RELEASE - release = [k for k, v in releases.items() if codename in v] - ret = (subprocess - .check_output(['apt-cache', 'policy', 'swift']) - .decode('UTF-8')) + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') if codename in ret or release[0] in ret: return codename elif len(codenames) == 1: @@ -447,7 +441,7 @@ def get_swift_codename(version): match = re.match(r'^(\d+)\.(\d+)', version) if match: major_minor_version = match.group(0) - for codename, versions in SWIFT_CODENAMES.items(): + for codename, versions in six.iteritems(SWIFT_CODENAMES): for release_version in versions: if release_version.startswith(major_minor_version): return codename @@ -483,7 +477,9 @@ def get_os_codename_package(package, fatal=True): if snap_install_requested(): cmd = ['snap', 'list', package] try: - out = subprocess.check_output(cmd).decode('UTF-8') + out = subprocess.check_output(cmd) + if six.PY3: + out = out.decode('UTF-8') except subprocess.CalledProcessError: return None lines = out.split('\n') @@ -553,14 +549,16 @@ def get_os_version_package(pkg, fatal=True): if 'swift' in pkg: vers_map = SWIFT_CODENAMES - for cname, version in vers_map.items(): + for cname, version in six.iteritems(vers_map): if cname == codename: return version[-1] else: vers_map = OPENSTACK_CODENAMES - for version, cname in vers_map.items(): + for version, cname in six.iteritems(vers_map): if cname == codename: return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) def get_installed_os_version(): @@ -823,10 +821,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): if not os.path.exists(os.path.dirname(juju_rc_path)): os.mkdir(os.path.dirname(juju_rc_path)) with open(juju_rc_path, 'wt') as rc_script: - rc_script.write("#!/bin/bash\n") - for u, p in env_vars.items(): - if u != "script_path": - rc_script.write('export %s=%s\n' % (u, p)) + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] def openstack_upgrade_available(package): @@ -1041,7 +1039,7 @@ def _determine_os_workload_status( state, message, lambda: charm_func(configs)) if state is None: - state, message = ows_check_services_running(services, ports) + state, message = _ows_check_services_running(services, ports) if state is None: state = 'active' @@ -1215,12 +1213,7 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): return state, message -@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log) def _ows_check_services_running(services, ports): - return ows_check_services_running(services, ports) - - -def ows_check_services_running(services, ports): """Check that the services that should be running are actually running and that any ports specified are being listened to. @@ -1420,75 +1413,45 @@ def incomplete_relation_data(configs, required_interfaces): for i in incomplete_relations} -def do_action_openstack_upgrade(package, upgrade_callback, configs): +def do_action_openstack_upgrade(package, upgrade_callback, configs, + force_upgrade=False): """Perform action-managed OpenStack upgrade. Upgrades packages to the configured openstack-origin version and sets the corresponding action status as a result. + If the charm was installed from source we cannot upgrade it. For backwards compatibility a config flag (action-managed-upgrade) must be set for this code to run, otherwise a full service level upgrade will fire on config-changed. - @param package: package name for determining if openstack upgrade available + @param package: package name for determining if upgrade available @param upgrade_callback: function callback to charm's upgrade function @param configs: templating object derived from OSConfigRenderer class + @param force_upgrade: perform dist-upgrade regardless of new openstack @return: True if upgrade successful; False if upgrade failed or skipped """ ret = False - if openstack_upgrade_available(package): + if openstack_upgrade_available(package) or force_upgrade: if config('action-managed-upgrade'): juju_log('Upgrading OpenStack release') try: upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed'}) + action_set({'outcome': 'success, upgrade completed.'}) ret = True except Exception: - action_set({'outcome': 'upgrade failed, see traceback'}) + action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'traceback': traceback.format_exc()}) - action_fail('upgrade callback resulted in an ' + action_fail('do_openstack_upgrade resulted in an ' 'unexpected error') else: action_set({'outcome': 'action-managed-upgrade config is ' - 'False, skipped upgrade'}) + 'False, skipped upgrade.'}) else: - action_set({'outcome': 'no upgrade available'}) - - return ret - - -def do_action_package_upgrade(package, upgrade_callback, configs): - """Perform package upgrade within the current OpenStack release. - - Upgrades packages only if there is not an openstack upgrade available, - and sets the corresponding action status as a result. - - @param package: package name for determining if openstack upgrade available - @param upgrade_callback: function callback to charm's upgrade function - @param configs: templating object derived from OSConfigRenderer class - - @return: True if upgrade successful; False if upgrade failed or skipped - """ - ret = False - - if not openstack_upgrade_available(package): - juju_log('Upgrading packages') - - try: - upgrade_callback(configs=configs) - action_set({'outcome': 'success, upgrade completed'}) - ret = True - except Exception: - action_set({'outcome': 'upgrade failed, see traceback'}) - action_set({'traceback': traceback.format_exc()}) - action_fail('upgrade callback resulted in an ' - 'unexpected error') - else: - action_set({'outcome': 'upgrade skipped because an openstack upgrade ' - 'is available'}) + action_set({'outcome': 'no upgrade available.'}) return ret @@ -1886,20 +1849,21 @@ def pausable_restart_on_change(restart_map, stopstart=False, """ def wrap(f): - __restart_map_cache = None + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} @functools.wraps(f) def wrapped_f(*args, **kwargs): - nonlocal __restart_map_cache if is_unit_paused_set(): return f(*args, **kwargs) - if __restart_map_cache is None: - __restart_map_cache = restart_map() \ + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ if callable(restart_map) else restart_map # otherwise, normal restart_on_change functionality return restart_on_change_helper( (lambda: f(*args, **kwargs)), - __restart_map_cache, + __restart_map_cache['cache'], stopstart, restart_functions, can_restart_now_f, @@ -1924,7 +1888,7 @@ def ordered(orderme): raise ValueError('argument must be a dict type') result = OrderedDict() - for k, v in sorted(orderme.items(), key=lambda x: x[0]): + for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): if isinstance(v, dict): result[k] = ordered(v) else: diff --git a/charmhelpers/contrib/python.py b/charmhelpers/contrib/python.py index fcded68..84cba8c 100644 --- a/charmhelpers/contrib/python.py +++ b/charmhelpers/contrib/python.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import + # deprecated aliases for backwards compatibility from charmhelpers.fetch.python import debug # noqa from charmhelpers.fetch.python import packages # noqa diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py index 1b20b8f..c70aeb2 100644 --- a/charmhelpers/contrib/storage/linux/ceph.py +++ b/charmhelpers/contrib/storage/linux/ceph.py @@ -23,6 +23,7 @@ import collections import errno import hashlib import math +import six import os import shutil @@ -217,7 +218,7 @@ def validator(value, valid_type, valid_range=None): "was given {} of type {}" .format(valid_range, type(valid_range))) # If we're dealing with strings - if isinstance(value, str): + if isinstance(value, six.string_types): assert value in valid_range, ( "{} is not in the list {}".format(value, valid_range)) # Integer, float should have a min and max @@ -433,9 +434,9 @@ class BasePool(object): :type mode: str """ # Check the input types and values - validator(value=cache_pool, valid_type=str) + validator(value=cache_pool, valid_type=six.string_types) validator( - value=mode, valid_type=str, + value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) check_call([ @@ -614,8 +615,7 @@ class Pool(BasePool): class ReplicatedPool(BasePool): def __init__(self, service, name=None, pg_num=None, replicas=None, - percent_data=None, app_name=None, op=None, - profile_name='replicated_rule'): + percent_data=None, app_name=None, op=None): """Initialize ReplicatedPool object. Pool information is either initialized from individual keyword @@ -632,8 +632,6 @@ class ReplicatedPool(BasePool): to this replicated pool. :type replicas: int :raises: KeyError - :param profile_name: Crush Profile to use - :type profile_name: Optional[str] """ # NOTE: Do not perform initialization steps that require live data from # a running cluster here. The *Pool classes may be used for validation. @@ -648,20 +646,11 @@ class ReplicatedPool(BasePool): # we will fail with KeyError if it is not provided. self.replicas = op['replicas'] self.pg_num = op.get('pg_num') - self.profile_name = op.get('crush-profile') or profile_name else: self.replicas = replicas or 2 self.pg_num = pg_num - self.profile_name = profile_name or 'replicated_rule' def _create(self): - # Validate if crush profile exists - if self.profile_name is None: - msg = ("Failed to discover crush profile named " - "{}".format(self.profile_name)) - log(msg, level=ERROR) - raise PoolCreationError(msg) - # Do extra validation on pg_num with data from live cluster if self.pg_num: # Since the number of placement groups were specified, ensure @@ -679,12 +668,12 @@ class ReplicatedPool(BasePool): '--pg-num-min={}'.format( min(AUTOSCALER_DEFAULT_PGS, self.pg_num) ), - self.name, str(self.pg_num), self.profile_name + self.name, str(self.pg_num) ] else: cmd = [ 'ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num), self.profile_name + self.name, str(self.pg_num) ] check_call(cmd) @@ -703,7 +692,7 @@ class ErasurePool(BasePool): def __init__(self, service, name=None, erasure_code_profile=None, percent_data=None, app_name=None, op=None, allow_ec_overwrites=False): - """Initialize ErasurePool object. + """Initialize ReplicatedPool object. Pool information is either initialized from individual keyword arguments or from a individual CephBrokerRq operation Dict. @@ -789,11 +778,10 @@ def enabled_manager_modules(): :rtype: List[str] """ cmd = ['ceph', 'mgr', 'module', 'ls'] - quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0 - if quincy_or_later: - cmd.append('--format=json') try: - modules = check_output(cmd).decode('utf-8') + modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -826,10 +814,10 @@ def get_mon_map(service): ceph command fails. """ try: - octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0 - mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status' - mon_status = (check_output(['ceph', '--id', service, mon_status_cmd, - '--format=json'])).decode('utf-8') + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') try: return json.loads(mon_status) except ValueError as v: @@ -971,7 +959,9 @@ def get_erasure_profile(service, name): try: out = check_output(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', - name, '--format=json']).decode('utf-8') + name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') return json.loads(out) except (CalledProcessError, OSError, ValueError): return None @@ -1174,7 +1164,8 @@ def create_erasure_profile(service, profile_name, 'nvme' ] - validator(erasure_plugin_name, str, list(plugin_techniques.keys())) + validator(erasure_plugin_name, six.string_types, + list(plugin_techniques.keys())) cmd = [ 'ceph', '--id', service, @@ -1185,7 +1176,7 @@ def create_erasure_profile(service, profile_name, ] if erasure_plugin_technique: - validator(erasure_plugin_technique, str, + validator(erasure_plugin_technique, six.string_types, plugin_techniques[erasure_plugin_name]) cmd.append('technique={}'.format(erasure_plugin_technique)) @@ -1198,7 +1189,7 @@ def create_erasure_profile(service, profile_name, failure_domain = 'rack' if failure_domain: - validator(failure_domain, str, failure_domains) + validator(failure_domain, six.string_types, failure_domains) # failure_domain changed in luminous if luminous_or_later: cmd.append('crush-failure-domain={}'.format(failure_domain)) @@ -1207,7 +1198,7 @@ def create_erasure_profile(service, profile_name, # device class new in luminous if luminous_or_later and device_class: - validator(device_class, str, device_classes) + validator(device_class, six.string_types, device_classes) cmd.append('crush-device-class={}'.format(device_class)) else: log('Skipping device class configuration (ceph < 12.0.0)', @@ -1222,7 +1213,7 @@ def create_erasure_profile(service, profile_name, raise ValueError("locality must be provided for lrc plugin") # LRC optional configuration if crush_locality: - validator(crush_locality, str, failure_domains) + validator(crush_locality, six.string_types, failure_domains) cmd.append('crush-locality={}'.format(crush_locality)) if erasure_plugin_name == 'shec': @@ -1250,8 +1241,8 @@ def rename_pool(service, old_name, new_name): :param new_name: Name to rename pool to. :type new_name: str """ - validator(value=old_name, valid_type=str) - validator(value=new_name, valid_type=str) + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) cmd = [ 'ceph', '--id', service, @@ -1269,7 +1260,7 @@ def erasure_profile_exists(service, name): :returns: True if it exists, False otherwise. :rtype: bool """ - validator(value=name, valid_type=str) + validator(value=name, valid_type=six.string_types) try: check_call(['ceph', '--id', service, 'osd', 'erasure-code-profile', 'get', @@ -1289,10 +1280,12 @@ def get_cache_mode(service, pool_name): :returns: Current cache mode. :rtype: Optional[int] """ - validator(value=service, valid_type=str) - validator(value=pool_name, valid_type=str) + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) out = check_output(['ceph', '--id', service, - 'osd', 'dump', '--format=json']).decode('utf-8') + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') try: osd_json = json.loads(out) for pool in osd_json['pools']: @@ -1306,8 +1299,9 @@ def get_cache_mode(service, pool_name): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output( - ['rados', '--id', service, 'lspools']).decode('utf-8') + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -1326,11 +1320,13 @@ def get_osds(service, device_class=None): out = check_output(['ceph', '--id', service, 'osd', 'crush', 'class', 'ls-osd', device_class, - '--format=json']).decode('utf-8') + '--format=json']) else: out = check_output(['ceph', '--id', service, 'osd', 'ls', - '--format=json']).decode('utf-8') + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') return json.loads(out) @@ -1347,7 +1343,9 @@ def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('utf-8') + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -1373,7 +1371,7 @@ def update_pool(client, pool, settings): :raises: CalledProcessError """ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] - for k, v in settings.items(): + for k, v in six.iteritems(settings): check_call(cmd + [k, v]) @@ -1511,7 +1509,9 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']).decode('utf-8') + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') except CalledProcessError: return False @@ -1857,7 +1857,7 @@ class CephBrokerRq(object): } def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, - crush_profile=None, **kwargs): + **kwargs): """Adds an operation to create a replicated pool. Refer to docstring for ``_partial_build_common_op_create`` for @@ -1871,10 +1871,6 @@ class CephBrokerRq(object): for pool. :type pg_num: int :raises: AssertionError if provided data is of invalid type/range - :param crush_profile: Name of crush profile to use. If not set the - ceph-mon unit handling the broker request will - set its default value. - :type crush_profile: Optional[str] """ if pg_num and kwargs.get('weight'): raise ValueError('pg_num and weight are mutually exclusive') @@ -1884,7 +1880,6 @@ class CephBrokerRq(object): 'name': name, 'replicas': replica_count, 'pg_num': pg_num, - 'crush-profile': crush_profile } op.update(self._partial_build_common_op_create(**kwargs)) diff --git a/charmhelpers/contrib/storage/linux/loopback.py b/charmhelpers/contrib/storage/linux/loopback.py index 04daea2..74bab40 100644 --- a/charmhelpers/contrib/storage/linux/loopback.py +++ b/charmhelpers/contrib/storage/linux/loopback.py @@ -19,6 +19,8 @@ from subprocess import ( check_output, ) +import six + ################################################## # loopback device helpers. @@ -38,7 +40,9 @@ def loopback_devices(): ''' loopbacks = {} cmd = ['losetup', '-a'] - output = check_output(cmd).decode('utf-8') + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] for dev, _, f in devs: loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] @@ -53,7 +57,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in loopback_devices().items(): + for d, f in six.iteritems(loopback_devices()): if f == file_path: return d @@ -67,7 +71,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in loopback_devices().items(): + for d, f in six.iteritems(loopback_devices()): if f == path: return d diff --git a/charmhelpers/contrib/templating/contexts.py b/charmhelpers/contrib/templating/contexts.py index a491fdb..c1adf94 100644 --- a/charmhelpers/contrib/templating/contexts.py +++ b/charmhelpers/contrib/templating/contexts.py @@ -20,6 +20,8 @@ import os import yaml +import six + import charmhelpers.core.hookenv @@ -91,8 +93,7 @@ def juju_state_to_yaml(yaml_path, namespace_separator=':', By default, hyphens are allowed in keys as this is supported by yaml, but for tools like ansible, hyphens are not valid [1]. - [1] http://www.ansibleworks.com/docs/playbooks_variables.html - #what-makes-a-valid-variable-name + [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name """ config = charmhelpers.core.hookenv.config() @@ -100,17 +101,16 @@ def juju_state_to_yaml(yaml_path, namespace_separator=':', # file resources etc. config['charm_dir'] = charm_dir config['local_unit'] = charmhelpers.core.hookenv.local_unit() - config['unit_private_address'] = ( - charmhelpers.core.hookenv.unit_private_ip()) + config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip() config['unit_public_address'] = charmhelpers.core.hookenv.unit_get( 'public-address' ) # Don't use non-standard tags for unicode which will not # work when salt uses yaml.load_safe. - yaml.add_representer(str, + yaml.add_representer(six.text_type, lambda dumper, value: dumper.represent_scalar( - 'tag:yaml.org,2002:str', value)) + six.u('tag:yaml.org,2002:str'), value)) yaml_dir = os.path.dirname(yaml_path) if not os.path.exists(yaml_dir): diff --git a/charmhelpers/contrib/templating/jinja.py b/charmhelpers/contrib/templating/jinja.py index de6a647..c6ad9d0 100644 --- a/charmhelpers/contrib/templating/jinja.py +++ b/charmhelpers/contrib/templating/jinja.py @@ -13,14 +13,18 @@ # limitations under the License. """ -Templating using the python3-jinja2 package. +Templating using the python-jinja2 package. """ +import six from charmhelpers.fetch import apt_install, apt_update try: import jinja2 except ImportError: apt_update(fatal=True) - apt_install(["python3-jinja2"], fatal=True) + if six.PY3: + apt_install(["python3-jinja2"], fatal=True) + else: + apt_install(["python-jinja2"], fatal=True) import jinja2 diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py index 370c3e8..e94247a 100644 --- a/charmhelpers/core/hookenv.py +++ b/charmhelpers/core/hookenv.py @@ -17,11 +17,12 @@ # Authors: # Charm Helpers Developers +from __future__ import print_function import copy from distutils.version import LooseVersion from enum import Enum from functools import wraps -from collections import namedtuple, UserDict +from collections import namedtuple import glob import os import json @@ -35,6 +36,12 @@ from subprocess import CalledProcessError from charmhelpers import deprecate +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -105,7 +112,7 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] - if not isinstance(message, str): + if not isinstance(message, six.string_types): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests @@ -125,7 +132,7 @@ def log(message, level=None): def function_log(message): """Write a function progress message""" command = ['function-log'] - if not isinstance(message, str): + if not isinstance(message, six.string_types): message = repr(message) command += [message[:SH_MAX_ARG]] # Missing function-log should not cause failures in unit tests @@ -438,6 +445,12 @@ def config(scope=None): """ global _cache_config config_cmd_line = ['config-get', '--all', '--format=json'] + try: + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError try: if _cache_config is None: config_data = json.loads( @@ -446,7 +459,7 @@ def config(scope=None): if scope is not None: return _cache_config.get(scope) return _cache_config - except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e: + except (exc_json, UnicodeDecodeError) as e: log('Unable to parse output from config-get: config_cmd_line="{}" ' 'message="{}"' .format(config_cmd_line, str(e)), level=ERROR) @@ -478,26 +491,12 @@ def relation_get(attribute=None, unit=None, rid=None, app=None): raise -@cached -def _relation_set_accepts_file(): - """Return True if the juju relation-set command accepts a file. - - Cache the result as it won't change during the execution of a hook, and - thus we can make relation_set() more efficient by only checking for the - first relation_set() call. - - :returns: True if relation_set accepts a file. - :rtype: bool - :raises: subprocess.CalledProcessError if the check fails. - """ - return "--file" in subprocess.check_output( - ["relation-set", "--help"], universal_newlines=True) - - def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) if app: relation_cmd_line.append('--app') if relation_id is not None: @@ -509,7 +508,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): # sites pass in things like dicts or numbers. if value is not None: settings[key] = "{}".format(value) - if _relation_set_accepts_file(): + if accepts_file: # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from @@ -1004,8 +1003,14 @@ def cmd_exists(cmd): @cached +@deprecate("moved to function_get()", log=log) def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs.""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['action-get'] if key is not None: cmd.append(key) @@ -1015,12 +1020,8 @@ def action_get(key=None): @cached -@deprecate("moved to action_get()", log=log) def function_get(key=None): - """ - .. deprecated:: - Gets the value of an action parameter, or all key/value param pairs. - """ + """Gets the value of an action parameter, or all key/value param pairs""" cmd = ['function-get'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1033,20 +1034,22 @@ def function_get(key=None): return function_data +@deprecate("moved to function_set()", log=log) def action_set(values): - """Sets the values to be returned after the action finishes.""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) -@deprecate("moved to action_set()", log=log) def function_set(values): - """ - .. deprecated:: - Sets the values to be returned after the function finishes. - """ + """Sets the values to be returned after the function finishes""" cmd = ['function-set'] # Fallback for older charms. if not cmd_exists('function-get'): @@ -1057,8 +1060,12 @@ def function_set(values): subprocess.check_call(cmd) +@deprecate("moved to function_fail()", log=log) def action_fail(message): """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + Sets the action status to failed and sets the error message. The results set by action_set are preserved. @@ -1066,14 +1073,10 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) -@deprecate("moved to action_fail()", log=log) def function_fail(message): - """ - .. deprecated:: - Sets the function status to failed and sets the error message. + """Sets the function status to failed and sets the error message. - The results set by function_set are preserved. - """ + The results set by function_set are preserved.""" cmd = ['function-fail'] # Fallback for older charms. if not cmd_exists('function-fail'): diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py index ef6c8ec..994ec8a 100644 --- a/charmhelpers/core/host.py +++ b/charmhelpers/core/host.py @@ -31,6 +31,7 @@ import subprocess import hashlib import functools import itertools +import six from contextlib import contextmanager from collections import OrderedDict, defaultdict @@ -114,33 +115,6 @@ def service_stop(service_name, **kwargs): return service('stop', service_name, **kwargs) -def service_enable(service_name, **kwargs): - """Enable a system service. - - The specified service name is managed via the system level init system. - Some init systems (e.g. upstart) require that additional arguments be - provided in order to directly control service instances whereas other init - systems allow for addressing instances of a service directly by name (e.g. - systemd). - - The kwargs allow for the additional parameters to be passed to underlying - init systems for those systems which require/allow for them. For example, - the ceph-osd upstart script requires the id parameter to be passed along - in order to identify which running daemon should be restarted. The follow- - ing example restarts the ceph-osd service for instance id=4: - - service_enable('ceph-osd', id=4) - - :param service_name: the name of the service to enable - :param **kwargs: additional parameters to pass to the init system when - managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs - are ignored for init systems not allowing additional - parameters via the commandline (systemd). - """ - return service('enable', service_name, **kwargs) - - def service_restart(service_name, **kwargs): """Restart a system service. @@ -161,7 +135,7 @@ def service_restart(service_name, **kwargs): :param service_name: the name of the service to restart :param **kwargs: additional parameters to pass to the init system when managing services. These will be passed as key=value - parameters to the init system's commandline. kwargs + parameters to the init system's commandline. kwargs are ignored for init systems not allowing additional parameters via the commandline (systemd). """ @@ -277,7 +251,7 @@ def service_resume(service_name, init_dir="/etc/init", return started -def service(action, service_name=None, **kwargs): +def service(action, service_name, **kwargs): """Control a system service. :param action: the action to take on the service @@ -286,12 +260,10 @@ def service(action, service_name=None, **kwargs): the form of key=value. """ if init_is_systemd(service_name=service_name): - cmd = ['systemctl', action] - if service_name is not None: - cmd.append(service_name) + cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] - for key, value in kwargs.items(): + for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0 @@ -317,7 +289,7 @@ def service_running(service_name, **kwargs): if os.path.exists(_UPSTART_CONF.format(service_name)): try: cmd = ['status', service_name] - for key, value in kwargs.items(): + for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) output = subprocess.check_output( @@ -592,7 +564,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444): with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) - if isinstance(content, str): + if six.PY3 and isinstance(content, six.string_types): content = content.encode('UTF-8') target.write(content) return @@ -995,7 +967,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): """Return a list of nics of given type(s)""" - if isinstance(nic_type, str): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type @@ -1109,7 +1081,8 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): try: chown(full, uid, gid) except (IOError, OSError) as e: - # Intended to ignore "file not found". + # Intended to ignore "file not found". Catching both to be + # compatible with both Python 2.7 and 3.x. if e.errno == errno.ENOENT: pass diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py index cc2d89f..0906c5c 100644 --- a/charmhelpers/core/host_factory/ubuntu.py +++ b/charmhelpers/core/host_factory/ubuntu.py @@ -30,7 +30,6 @@ UBUNTU_RELEASES = ( 'hirsute', 'impish', 'jammy', - 'kinetic', ) diff --git a/charmhelpers/core/services/base.py b/charmhelpers/core/services/base.py index 8d217b5..9f88029 100644 --- a/charmhelpers/core/services/base.py +++ b/charmhelpers/core/services/base.py @@ -15,8 +15,9 @@ import os import json import inspect -from collections import OrderedDict -from collections.abc import Iterable +from collections import Iterable, OrderedDict + +import six from charmhelpers.core import host from charmhelpers.core import hookenv @@ -170,7 +171,10 @@ class ServiceManager(object): if not units: continue remote_service = units[0].split('/')[0] - argspec = inspect.getfullargspec(provider.provide_data) + if six.PY2: + argspec = inspect.getargspec(provider.provide_data) + else: + argspec = inspect.getfullargspec(provider.provide_data) if len(argspec.args) > 1: data = provider.provide_data(remote_service, service_ready) else: diff --git a/charmhelpers/core/services/helpers.py b/charmhelpers/core/services/helpers.py index 5bf62dd..3e6e30d 100644 --- a/charmhelpers/core/services/helpers.py +++ b/charmhelpers/core/services/helpers.py @@ -179,7 +179,7 @@ class RequiredConfig(dict): self.required_options = args self['config'] = hookenv.config() with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.safe_load(fp).get('options', {}) + self.config = yaml.load(fp).get('options', {}) def __bool__(self): for option in self.required_options: @@ -227,7 +227,7 @@ class StoredContext(dict): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'r') as file_stream: - data = yaml.safe_load(file_stream) + data = yaml.load(file_stream) if not data: raise OSError("%s is empty" % file_name) return data diff --git a/charmhelpers/core/strutils.py b/charmhelpers/core/strutils.py index 3136687..28c6b3f 100644 --- a/charmhelpers/core/strutils.py +++ b/charmhelpers/core/strutils.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six import re TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} @@ -26,8 +27,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY Returns True if value translates to True otherwise False. """ - if isinstance(value, str): - value = str(value) + if isinstance(value, six.string_types): + value = six.text_type(value) else: msg = "Unable to interpret non-string value '%s' as boolean" % (value) raise ValueError(msg) @@ -60,8 +61,8 @@ def bytes_from_string(value): 'P': 5, 'PB': 5, } - if isinstance(value, str): - value = str(value) + if isinstance(value, six.string_types): + value = six.text_type(value) else: msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) diff --git a/charmhelpers/core/templating.py b/charmhelpers/core/templating.py index cb0213d..9014015 100644 --- a/charmhelpers/core/templating.py +++ b/charmhelpers/core/templating.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import sys from charmhelpers.core import host from charmhelpers.core import hookenv @@ -42,8 +43,9 @@ def render(source, target, context, owner='root', group='root', The rendered template will be written to the file as well as being returned as a string. - Note: Using this requires python3-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. """ try: from jinja2 import FileSystemLoader, Environment, exceptions @@ -55,7 +57,10 @@ def render(source, target, context, owner='root', group='root', 'charmhelpers.fetch to install it', level=hookenv.ERROR) raise - apt_install('python3-jinja2', fatal=True) + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) from jinja2 import FileSystemLoader, Environment, exceptions if template_loader: diff --git a/charmhelpers/fetch/__init__.py b/charmhelpers/fetch/__init__.py index 1283f25..9497ee0 100644 --- a/charmhelpers/fetch/__init__.py +++ b/charmhelpers/fetch/__init__.py @@ -20,7 +20,11 @@ from charmhelpers.core.hookenv import ( log, ) -from urllib.parse import urlparse, urlunparse +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse # The order of this list is very important. Handlers should be listed in from @@ -130,14 +134,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, str): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, str): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): diff --git a/charmhelpers/fetch/archiveurl.py b/charmhelpers/fetch/archiveurl.py index 0e35c90..d25587a 100644 --- a/charmhelpers/fetch/archiveurl.py +++ b/charmhelpers/fetch/archiveurl.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import os import hashlib import re @@ -25,21 +24,28 @@ from charmhelpers.payload.archive import ( get_archive_handler, extract, ) -from charmhelpers.core.hookenv import ( - env_proxy_settings, -) from charmhelpers.core.host import mkdir, check_hash -from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ProxyHandler -) -from urllib.parse import urlparse, urlunparse, parse_qs -from urllib.error import URLError +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: @@ -48,6 +54,7 @@ def splituser(host): def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' _passwdprog = re.compile('^([^:]*):(.*)$', re.S) match = _passwdprog.match(user) if match: @@ -55,20 +62,6 @@ def splitpasswd(user): return user, None -@contextlib.contextmanager -def proxy_env(): - """ - Creates a context which temporarily modifies the proxy settings in os.environ. - """ - restore = {**os.environ} # Copy the current os.environ - juju_proxies = env_proxy_settings() or {} - os.environ.update(**juju_proxies) # Insert or Update the os.environ - yield os.environ - for key in juju_proxies: - del os.environ[key] # remove any keys which were added or updated - os.environ.update(**restore) # restore any original values - - class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -99,7 +92,6 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) - handlers = [] if proto in ('http', 'https'): auth, barehost = splituser(netloc) if auth is not None: @@ -109,13 +101,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - handlers.append(HTTPBasicAuthHandler(passman)) - - with proxy_env(): - handlers.append(ProxyHandler()) - opener = build_opener(*handlers) - install_opener(opener) - response = urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'wb') as dest_file: dest_file.write(response.read()) @@ -161,7 +150,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - algorithms = hashlib.algorithms_available + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError( diff --git a/charmhelpers/fetch/centos.py b/charmhelpers/fetch/centos.py index f849201..a91dcff 100644 --- a/charmhelpers/fetch/centos.py +++ b/charmhelpers/fetch/centos.py @@ -15,6 +15,7 @@ import subprocess import os import time +import six import yum from tempfile import NamedTemporaryFile @@ -41,7 +42,7 @@ def install(packages, options=None, fatal=False): if options is not None: cmd.extend(options) cmd.append('install') - if isinstance(packages, str): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -70,7 +71,7 @@ def update(fatal=False): def purge(packages, fatal=False): """Purge one or more packages.""" cmd = ['yum', '--assumeyes', 'remove'] - if isinstance(packages, str): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -82,7 +83,7 @@ def yum_search(packages): """Search for a package.""" output = {} cmd = ['yum', 'search'] - if isinstance(packages, str): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) diff --git a/charmhelpers/fetch/python/debug.py b/charmhelpers/fetch/python/debug.py index dd5cca8..757135e 100644 --- a/charmhelpers/fetch/python/debug.py +++ b/charmhelpers/fetch/python/debug.py @@ -15,6 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function + import atexit import sys diff --git a/charmhelpers/fetch/python/packages.py b/charmhelpers/fetch/python/packages.py index 93f1fa3..6004835 100644 --- a/charmhelpers/fetch/python/packages.py +++ b/charmhelpers/fetch/python/packages.py @@ -16,6 +16,7 @@ # limitations under the License. import os +import six import subprocess import sys @@ -39,7 +40,10 @@ def pip_execute(*args, **kwargs): from pip import main as _pip_execute except ImportError: apt_update() - apt_install('python3-pip') + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') from pip import main as _pip_execute _pip_execute(*args, **kwargs) finally: @@ -136,8 +140,12 @@ def pip_list(): def pip_create_virtualenv(path=None): """Create an isolated Python environment.""" - apt_install(['python3-virtualenv', 'virtualenv']) - extra_flags = ['--python=python3'] + if six.PY2: + apt_install('python-virtualenv') + extra_flags = [] + else: + apt_install(['python3-virtualenv', 'virtualenv']) + extra_flags = ['--python=python3'] if path: venv_path = path diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py index 93b9276..cf8328f 100644 --- a/charmhelpers/fetch/ubuntu.py +++ b/charmhelpers/fetch/ubuntu.py @@ -13,8 +13,10 @@ # limitations under the License. from collections import OrderedDict +import os import platform import re +import six import subprocess import sys import time @@ -222,14 +224,6 @@ CLOUD_ARCHIVE_POCKETS = { 'yoga/proposed': 'focal-proposed/yoga', 'focal-yoga/proposed': 'focal-proposed/yoga', 'focal-proposed/yoga': 'focal-proposed/yoga', - # Zed - 'zed': 'jammy-updates/zed', - 'jammy-zed': 'jammy-updates/zed', - 'jammy-zed/updates': 'jammy-updates/zed', - 'jammy-updates/zed': 'jammy-updates/zed', - 'zed/proposed': 'jammy-proposed/zed', - 'jammy-zed/proposed': 'jammy-proposed/zed', - 'jammy-proposed/zed': 'jammy-proposed/zed', } @@ -256,7 +250,6 @@ OPENSTACK_RELEASES = ( 'wallaby', 'xena', 'yoga', - 'zed', ) @@ -283,7 +276,6 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('hirsute', 'wallaby'), ('impish', 'xena'), ('jammy', 'yoga'), - ('kinetic', 'zed'), ]) @@ -369,7 +361,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, str): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -421,7 +413,7 @@ def apt_purge(packages, fatal=False): :raises: subprocess.CalledProcessError """ cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, str): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -448,7 +440,7 @@ def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] - if isinstance(packages, str): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -493,7 +485,10 @@ def import_key(key): if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and '-----END PGP PUBLIC KEY BLOCK-----' in key): log("Writing provided PGP key in the binary format", level=DEBUG) - key_bytes = key.encode('utf-8') + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key key_name = _get_keyid_by_gpg_key(key_bytes) key_gpg = _dearmor_gpg_key(key_bytes) _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) @@ -533,8 +528,9 @@ def _get_keyid_by_gpg_key(key_material): stderr=subprocess.PIPE, stdin=subprocess.PIPE) out, err = ps.communicate(input=key_material) - out = out.decode('utf-8') - err = err.decode('utf-8') + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material provided') # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) @@ -592,7 +588,8 @@ def _dearmor_gpg_key(key_asc): stdin=subprocess.PIPE) out, err = ps.communicate(input=key_asc) # no need to decode output as it is binary (invalid utf-8), only error - err = err.decode('utf-8') + if six.PY3: + err = err.decode('utf-8') if 'gpg: no valid OpenPGP data found.' in err: raise GPGKeyError('Invalid GPG key material. Check your network setup' ' (MTU, routing, DNS) and/or proxy server settings' @@ -696,7 +693,7 @@ def add_source(source, key=None, fail_invalid=False): ]) if source is None: source = '' - for r, fn in _mapping.items(): + for r, fn in six.iteritems(_mapping): m = re.match(r, source) if m: if key: @@ -729,7 +726,7 @@ def _add_proposed(): """ release = get_distrib_codename() arch = platform.machine() - if arch not in ARCH_TO_PROPOSED_POCKET.keys(): + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): raise SourceConfigError("Arch {} not supported for (distro-)proposed" .format(arch)) with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: @@ -916,8 +913,9 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), kwargs = {} if quiet: - kwargs['stdout'] = subprocess.DEVNULL - kwargs['stderr'] = subprocess.DEVNULL + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull if not retry_message: retry_message = "Failed executing '{}'".format(" ".join(cmd)) @@ -959,8 +957,9 @@ def _run_apt_command(cmd, fatal=False, quiet=False): else: kwargs = {} if quiet: - kwargs['stdout'] = subprocess.DEVNULL - kwargs['stderr'] = subprocess.DEVNULL + devnull = os.devnull if six.PY2 else subprocess.DEVNULL + kwargs['stdout'] = devnull + kwargs['stderr'] = devnull subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) @@ -990,7 +989,7 @@ def get_installed_version(package): Version object """ cache = apt_cache() - dpkg_result = cache.dpkg_list([package]).get(package, {}) + dpkg_result = cache._dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') diff --git a/charmhelpers/fetch/ubuntu_apt_pkg.py b/charmhelpers/fetch/ubuntu_apt_pkg.py index 6da355f..436e177 100644 --- a/charmhelpers/fetch/ubuntu_apt_pkg.py +++ b/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -40,9 +40,6 @@ import os import subprocess import sys -from charmhelpers import deprecate -from charmhelpers.core.hookenv import log - class _container(dict): """Simple container for attributes.""" @@ -82,7 +79,7 @@ class Cache(object): apt_result = self._apt_cache_show([package])[package] apt_result['name'] = apt_result.pop('package') pkg = Package(apt_result) - dpkg_result = self.dpkg_list([package]).get(package, {}) + dpkg_result = self._dpkg_list([package]).get(package, {}) current_ver = None installed_version = dpkg_result.get('version') if installed_version: @@ -91,29 +88,9 @@ class Cache(object): pkg.architecture = dpkg_result.get('architecture') return pkg - @deprecate("use dpkg_list() instead.", "2022-05", log=log) def _dpkg_list(self, packages): - return self.dpkg_list(packages) - - def dpkg_list(self, packages): """Get data from system dpkg database for package. - Note that this method is also useful for querying package names - containing wildcards, for example - - apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*']) - - may return - - { - 'nvidia-vgpu-ubuntu-470': { - 'name': 'nvidia-vgpu-ubuntu-470', - 'version': '470.68', - 'architecture': 'amd64', - 'description': 'NVIDIA vGPU driver - version 470.68' - } - } - :param packages: Packages to get data from :type packages: List[str] :returns: Structured data about installed packages, keys like diff --git a/metadata.yaml b/metadata.yaml index bb795b7..d31618f 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -7,6 +7,7 @@ description: | tags: - misc series: +- focal - jammy requires: juju-info: diff --git a/osci.yaml b/osci.yaml index e9641dd..221eb97 100644 --- a/osci.yaml +++ b/osci.yaml @@ -1,9 +1,9 @@ - project: templates: + - charm-unit-jobs-py38 - charm-unit-jobs-py310 - - charm-zed-functional-jobs + - charm-yoga-functional-jobs vars: needs_charm_build: true charm_build_name: hacluster build_type: charmcraft - charmcraft_channel: 2.0/stable diff --git a/requirements.txt b/requirements.txt index 3b1cb7b..ead6e89 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,19 +11,14 @@ pbr==5.6.0 simplejson>=2.2.0 netifaces>=0.10.4 -# NOTE: newer versions of cryptography require a Rust compiler to build, -# see -# * https://github.com/openstack-charmers/zaza/issues/421 -# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html -# -cryptography<3.4 - # Strange import error with newer netaddr: netaddr>0.7.16,<0.8.0 Jinja2>=2.6 # BSD License (3 clause) six>=1.9.0 -dnspython +# dnspython 2.0.0 dropped py3.5 support +dnspython<2.0.0; python_version < '3.6' +dnspython; python_version >= '3.6' psutil>=1.1.1,<2.0.0 diff --git a/test-requirements.txt b/test-requirements.txt index 4ef87dc..823463d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,6 +8,7 @@ # all of its own requirements and if it doesn't, fix it there. # pyparsing<3.0.0 # aodhclient is pinned in zaza and needs pyparsing < 3.0.0, but cffi also needs it, so pin here. +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85 requests>=2.18.4 @@ -18,12 +19,26 @@ stestr>=2.2.0 # https://github.com/mtreinish/stestr/issues/145 cliff<3.0.0 +# Dependencies of stestr. Newer versions use keywords that didn't exist in +# python 3.5 yet (e.g. "ModuleNotFoundError") +importlib-metadata<3.0.0; python_version < '3.6' +importlib-resources<3.0.0; python_version < '3.6' + +# Some Zuul nodes sometimes pull newer versions of these dependencies which +# dropped support for python 3.5: +osprofiler<2.7.0;python_version<'3.6' +stevedore<1.31.0;python_version<'3.6' +debtcollector<1.22.0;python_version<'3.6' +oslo.utils<=3.41.0;python_version<'3.6' + coverage>=4.5.2 pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack # Needed for charm-glance: -git+https://opendev.org/openstack/tempest.git#egg=tempest +git+https://opendev.org/openstack/tempest.git#egg=tempest;python_version>='3.8' +tempest<31.0.0;python_version<'3.8' +tempest<24.0.0;python_version<'3.6' croniter # needed for charm-rabbitmq-server unit tests diff --git a/tests/bundles/jammy-zed.yaml b/tests/bundles/focal-yoga.yaml similarity index 93% rename from tests/bundles/jammy-zed.yaml rename to tests/bundles/focal-yoga.yaml index 9b1b337..0b270a1 100644 --- a/tests/bundles/jammy-zed.yaml +++ b/tests/bundles/focal-yoga.yaml @@ -1,7 +1,7 @@ variables: - openstack-origin: &openstack-origin cloud:jammy-zed + openstack-origin: &openstack-origin cloud:focal-yoga -series: jammy +series: focal machines: '0': diff --git a/tests/bundles/kinetic-zed.yaml b/tests/bundles/kinetic-zed.yaml deleted file mode 100644 index d74fede..0000000 --- a/tests/bundles/kinetic-zed.yaml +++ /dev/null @@ -1,57 +0,0 @@ -variables: - openstack-origin: &openstack-origin distro - -series: kinetic - -machines: - '0': - constraints: mem=3072M - '1': - constraints: mem=3072M - '2': - constraints: mem=3072M - '3': - '4': - '5': - -applications: - - keystone-mysql-router: - charm: ch:mysql-router - channel: latest/edge - - mysql-innodb-cluster: - charm: ch:mysql-innodb-cluster - num_units: 3 - options: - source: *openstack-origin - to: - - '0' - - '1' - - '2' - channel: latest/edge - - keystone: - charm: ch:keystone - num_units: 3 - options: - token-expiration: 60 - openstack-origin: *openstack-origin - to: - - '3' - - '4' - - '5' - channel: latest/edge - - hacluster: - charm: ../../hacluster.charm - subordinate-to: - - keystone - -relations: - - - 'keystone:shared-db' - - 'keystone-mysql-router:shared-db' - - - 'hacluster:ha' - - 'keystone:ha' - - - "keystone-mysql-router:db-router" - - "mysql-innodb-cluster:db-router" diff --git a/tests/tests.yaml b/tests/tests.yaml index b11fae9..49f94be 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -1,15 +1,13 @@ charm_name: hacluster smoke_bundles: - - jammy-yoga + - focal-yoga gate_bundles: - - jammy-yoga + - focal-yoga dev_bundles: - jammy-yoga - - jammy-zed - - kinetic-zed configure: - zaza.openstack.charm_tests.keystone.setup.add_demo_user @@ -23,4 +21,4 @@ tests_options: principle-app-name: keystone hacluster-charm-name: hacluster force_deploy: - - kinetic-zed + - jammy-yoga diff --git a/tox.ini b/tox.ini index bddbd1f..acbcb1f 100644 --- a/tox.ini +++ b/tox.ini @@ -48,9 +48,34 @@ basepython = python3 deps = -r{toxinidir}/build-requirements.txt commands = charmcraft clean - charmcraft -v pack + charmcraft -v build {toxinidir}/rename.sh +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py310] basepython = python3.10 deps = -r{toxinidir}/requirements.txt @@ -64,7 +89,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:pep8] basepython = python3 deps = flake8==3.9.2 - git+https://github.com/juju/charm-tools.git + charm-tools==2.8.3 commands = flake8 {posargs} hooks unit_tests tests actions lib files charm-proof