Charmhelper sync pulling coordinator

Change-Id: Ia92c8d8544534ffcab9a7fd31ba6f35a3bf076ca
This commit is contained in:
Liam Young 2022-01-06 14:10:08 +00:00
parent 2212383158
commit c63cd1b99f
46 changed files with 1074 additions and 436 deletions

View File

@ -2,6 +2,7 @@ destination: charmhelpers
repo: https://github.com/juju/charm-helpers repo: https://github.com/juju/charm-helpers
include: include:
- fetch - fetch
- coordinator
- core - core
- cli - cli
- osplatform - osplatform

View File

@ -14,30 +14,15 @@
# Bootstrap charm-helpers, installing its dependencies if necessary using # Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries. # only standard libraries.
from __future__ import print_function
from __future__ import absolute_import
import functools import functools
import inspect import inspect
import subprocess import subprocess
import sys
try:
import six # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # NOQA:F401
try: try:
import yaml # NOQA:F401 import yaml # NOQA:F401
except ImportError: except ImportError:
if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # NOQA:F401 import yaml # NOQA:F401

View File

@ -16,9 +16,6 @@ import inspect
import argparse import argparse
import sys import sys
import six
from six.moves import zip
import charmhelpers.core.unitdata import charmhelpers.core.unitdata
@ -149,10 +146,7 @@ class CommandLine(object):
def run(self): def run(self):
"Run cli, processing arguments and executing subcommands." "Run cli, processing arguments and executing subcommands."
arguments = self.argument_parser.parse_args() arguments = self.argument_parser.parse_args()
if six.PY2: argspec = inspect.getfullargspec(arguments.func)
argspec = inspect.getargspec(arguments.func)
else:
argspec = inspect.getfullargspec(arguments.func)
vargs = [] vargs = []
for arg in argspec.args: for arg in argspec.args:
vargs.append(getattr(arguments, arg)) vargs.append(getattr(arguments, arg))
@ -177,10 +171,7 @@ def describe_arguments(func):
Analyze a function's signature and return a data structure suitable for Analyze a function's signature and return a data structure suitable for
passing in as arguments to an argparse parser's add_argument() method.""" passing in as arguments to an argparse parser's add_argument() method."""
if six.PY2: argspec = inspect.getfullargspec(func)
argspec = inspect.getargspec(func)
else:
argspec = inspect.getfullargspec(func)
# we should probably raise an exception somewhere if func includes **kwargs # we should probably raise an exception somewhere if func includes **kwargs
if argspec.defaults: if argspec.defaults:
positional_args = argspec.args[:-len(argspec.defaults)] positional_args = argspec.args[:-len(argspec.defaults)]

View File

@ -28,6 +28,7 @@ import subprocess
import yaml import yaml
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
application_name,
config, config,
hook_name, hook_name,
local_unit, local_unit,
@ -174,7 +175,8 @@ define service {{
if os.path.exists(os.path.join(path, parts[0])): if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0]) command = os.path.join(path, parts[0])
if len(parts) > 1: if len(parts) > 1:
command += " " + " ".join(parts[1:]) safe_args = [shlex.quote(arg) for arg in parts[1:]]
command += " " + " ".join(safe_args)
return command return command
log('Check command not found: {}'.format(parts[0])) log('Check command not found: {}'.format(parts[0]))
return '' return ''
@ -520,3 +522,39 @@ def remove_deprecated_check(nrpe, deprecated_services):
for dep_svc in deprecated_services: for dep_svc in deprecated_services:
log('Deprecated service: {}'.format(dep_svc)) log('Deprecated service: {}'.format(dep_svc))
nrpe.remove_check(shortname=dep_svc) nrpe.remove_check(shortname=dep_svc)
def add_deferred_restarts_check(nrpe):
"""
Add NRPE check for services with deferred restarts.
:param NRPE nrpe: NRPE object to add check to
"""
unit_name = local_unit().replace('/', '-')
shortname = unit_name + '_deferred_restarts'
check_cmd = 'check_deferred_restarts.py --application {}'.format(
application_name())
log('Adding deferred restarts nrpe check: {}'.format(shortname))
nrpe.add_check(
shortname=shortname,
description='Check deferred service restarts {}'.format(unit_name),
check_cmd=check_cmd)
def remove_deferred_restarts_check(nrpe):
"""
Remove NRPE check for services with deferred service restarts.
:param NRPE nrpe: NRPE object to remove check from
"""
unit_name = local_unit().replace('/', '-')
shortname = unit_name + '_deferred_restarts'
check_cmd = 'check_deferred_restarts.py --application {}'.format(
application_name())
log('Removing deferred restarts nrpe check: {}'.format(shortname))
nrpe.remove_check(
shortname=shortname,
description='Check deferred service restarts {}'.format(unit_name),
check_cmd=check_cmd)

View File

@ -32,8 +32,6 @@ import time
from socket import gethostname as get_unit_hostname from socket import gethostname as get_unit_hostname
import six
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
relation_ids, relation_ids,
@ -125,16 +123,16 @@ def is_crm_dc():
""" """
cmd = ['crm', 'status'] cmd = ['crm', 'status']
try: try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) status = subprocess.check_output(
if not isinstance(status, six.text_type): cmd, stderr=subprocess.STDOUT).decode('utf-8')
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError as ex: except subprocess.CalledProcessError as ex:
raise CRMDCNotFound(str(ex)) raise CRMDCNotFound(str(ex))
current_dc = '' current_dc = ''
for line in status.split('\n'): for line in status.split('\n'):
if line.startswith('Current DC'): if line.startswith('Current DC'):
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum # Current DC: juju-lytrusty-machine-2 (168108163)
# - partition with quorum
current_dc = line.split(':')[1].split()[0] current_dc = line.split(':')[1].split()[0]
if current_dc == get_unit_hostname(): if current_dc == get_unit_hostname():
return True return True
@ -158,9 +156,8 @@ def is_crm_leader(resource, retry=False):
return is_crm_dc() return is_crm_dc()
cmd = ['crm', 'resource', 'show', resource] cmd = ['crm', 'resource', 'show', resource]
try: try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) status = subprocess.check_output(
if not isinstance(status, six.text_type): cmd, stderr=subprocess.STDOUT).decode('utf-8')
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
status = None status = None

View File

@ -14,7 +14,6 @@
import os import os
import re import re
import six
import subprocess import subprocess
@ -95,9 +94,7 @@ class ApacheConfContext(object):
settings = utils.get_settings('apache') settings = utils.get_settings('apache')
ctxt = settings['hardening'] ctxt = settings['hardening']
out = subprocess.check_output(['apache2', '-v']) out = subprocess.check_output(['apache2', '-v']).decode('utf-8')
if six.PY3:
out = out.decode('utf-8')
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
out).group(1) out).group(1)
ctxt['apache_icondir'] = '/usr/share/apache2/icons/' ctxt['apache_icondir'] = '/usr/share/apache2/icons/'

View File

@ -15,8 +15,6 @@
import re import re
import subprocess import subprocess
import six
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
INFO, INFO,
@ -35,7 +33,7 @@ class DisabledModuleAudit(BaseAudit):
def __init__(self, modules): def __init__(self, modules):
if modules is None: if modules is None:
self.modules = [] self.modules = []
elif isinstance(modules, six.string_types): elif isinstance(modules, str):
self.modules = [modules] self.modules = [modules]
else: else:
self.modules = modules self.modules = modules
@ -68,9 +66,7 @@ class DisabledModuleAudit(BaseAudit):
@staticmethod @staticmethod
def _get_loaded_modules(): def _get_loaded_modules():
"""Returns the modules which are enabled in Apache.""" """Returns the modules which are enabled in Apache."""
output = subprocess.check_output(['apache2ctl', '-M']) output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8')
if six.PY3:
output = output.decode('utf-8')
modules = [] modules = []
for line in output.splitlines(): for line in output.splitlines():
# Each line of the enabled module output looks like: # Each line of the enabled module output looks like:

View File

@ -12,9 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import # required for external apt import
from six import string_types
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_cache, apt_cache,
apt_purge apt_purge
@ -51,7 +48,7 @@ class RestrictedPackages(BaseAudit):
def __init__(self, pkgs, **kwargs): def __init__(self, pkgs, **kwargs):
super(RestrictedPackages, self).__init__(**kwargs) super(RestrictedPackages, self).__init__(**kwargs)
if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'):
self.pkgs = pkgs.split() self.pkgs = pkgs.split()
else: else:
self.pkgs = pkgs self.pkgs = pkgs

View File

@ -23,7 +23,6 @@ from subprocess import (
check_call, check_call,
) )
from traceback import format_exc from traceback import format_exc
from six import string_types
from stat import ( from stat import (
S_ISGID, S_ISGID,
S_ISUID S_ISUID
@ -63,7 +62,7 @@ class BaseFileAudit(BaseAudit):
""" """
super(BaseFileAudit, self).__init__(*args, **kwargs) super(BaseFileAudit, self).__init__(*args, **kwargs)
self.always_comply = always_comply self.always_comply = always_comply
if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): if isinstance(paths, str) or not hasattr(paths, '__iter__'):
self.paths = [paths] self.paths = [paths]
else: else:
self.paths = paths self.paths = paths

View File

@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import six
from collections import OrderedDict from collections import OrderedDict
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
@ -53,18 +51,17 @@ def harden(overrides=None):
overrides = [] overrides = []
def _harden_inner1(f): def _harden_inner1(f):
# As this has to be py2.7 compat, we can't use nonlocal. Use a trick _logged = False
# to capture the dictionary that can then be updated.
_logged = {'done': False}
def _harden_inner2(*args, **kwargs): def _harden_inner2(*args, **kwargs):
# knock out hardening via a config var; normally it won't get # knock out hardening via a config var; normally it won't get
# disabled. # disabled.
nonlocal _logged
if _DISABLE_HARDENING_FOR_UNIT_TEST: if _DISABLE_HARDENING_FOR_UNIT_TEST:
return f(*args, **kwargs) return f(*args, **kwargs)
if not _logged['done']: if not _logged:
log("Hardening function '%s'" % (f.__name__), level=DEBUG) log("Hardening function '%s'" % (f.__name__), level=DEBUG)
_logged['done'] = True _logged = True
RUN_CATALOG = OrderedDict([('os', run_os_checks), RUN_CATALOG = OrderedDict([('os', run_os_checks),
('ssh', run_ssh_checks), ('ssh', run_ssh_checks),
('mysql', run_mysql_checks), ('mysql', run_mysql_checks),
@ -74,7 +71,7 @@ def harden(overrides=None):
if enabled: if enabled:
modules_to_run = [] modules_to_run = []
# modules will always be performed in the following order # modules will always be performed in the following order
for module, func in six.iteritems(RUN_CATALOG): for module, func in RUN_CATALOG.items():
if module in enabled: if module in enabled:
enabled.remove(module) enabled.remove(module)
modules_to_run.append(func) modules_to_run.append(func)

View File

@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from six import string_types
from charmhelpers.contrib.hardening.audits.file import TemplatedFile from charmhelpers.contrib.hardening.audits.file import TemplatedFile
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils from charmhelpers.contrib.hardening import utils
@ -41,7 +39,7 @@ class LoginContext(object):
# a string assume it to be octal and turn it into an octal # a string assume it to be octal and turn it into an octal
# string. # string.
umask = settings['environment']['umask'] umask = settings['environment']['umask']
if not isinstance(umask, string_types): if not isinstance(umask, str):
umask = '%s' % oct(umask) umask = '%s' % oct(umask)
ctxt = { ctxt = {

View File

@ -15,7 +15,6 @@
import os import os
import platform import platform
import re import re
import six
import subprocess import subprocess
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
@ -183,9 +182,9 @@ class SysCtlHardeningContext(object):
ctxt['sysctl'][key] = d[2] or None ctxt['sysctl'][key] = d[2] or None
# Translate for python3 return {
return {'sysctl_settings': 'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()]
[(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} }
class SysctlConf(TemplatedFile): class SysctlConf(TemplatedFile):

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import six
import subprocess import subprocess
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
@ -82,6 +81,6 @@ class MySQLConfContext(object):
""" """
def __call__(self): def __call__(self):
settings = utils.get_settings('mysql') settings = utils.get_settings('mysql')
# Translate for python3 return {
return {'mysql_settings': 'mysql_settings': [(k, v) for k, v in settings['security'].items()]
[(k, v) for k, v in six.iteritems(settings['security'])]} }

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import os import os
import six
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
@ -27,10 +26,7 @@ except ImportError:
from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_install
from charmhelpers.fetch import apt_update from charmhelpers.fetch import apt_update
apt_update(fatal=True) apt_update(fatal=True)
if six.PY2: apt_install('python3-jinja2', fatal=True)
apt_install('python-jinja2', fatal=True)
else:
apt_install('python3-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment from jinja2 import FileSystemLoader, Environment

View File

@ -16,7 +16,6 @@ import glob
import grp import grp
import os import os
import pwd import pwd
import six
import yaml import yaml
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
@ -91,7 +90,7 @@ def _apply_overrides(settings, overrides, schema):
:returns: dictionary of modules config with user overrides applied. :returns: dictionary of modules config with user overrides applied.
""" """
if overrides: if overrides:
for k, v in six.iteritems(overrides): for k, v in overrides.items():
if k in schema: if k in schema:
if schema[k] is None: if schema[k] is None:
settings[k] = v settings[k] = v

View File

@ -15,7 +15,6 @@
import glob import glob
import re import re
import subprocess import subprocess
import six
import socket import socket
from functools import partial from functools import partial
@ -39,20 +38,14 @@ try:
import netifaces import netifaces
except ImportError: except ImportError:
apt_update(fatal=True) apt_update(fatal=True)
if six.PY2: apt_install('python3-netifaces', fatal=True)
apt_install('python-netifaces', fatal=True)
else:
apt_install('python3-netifaces', fatal=True)
import netifaces import netifaces
try: try:
import netaddr import netaddr
except ImportError: except ImportError:
apt_update(fatal=True) apt_update(fatal=True)
if six.PY2: apt_install('python3-netaddr', fatal=True)
apt_install('python-netaddr', fatal=True)
else:
apt_install('python3-netaddr', fatal=True)
import netaddr import netaddr
@ -462,15 +455,12 @@ def ns_query(address):
try: try:
import dns.resolver import dns.resolver
except ImportError: except ImportError:
if six.PY2: apt_install('python3-dnspython', fatal=True)
apt_install('python-dnspython', fatal=True)
else:
apt_install('python3-dnspython', fatal=True)
import dns.resolver import dns.resolver
if isinstance(address, dns.name.Name): if isinstance(address, dns.name.Name):
rtype = 'PTR' rtype = 'PTR'
elif isinstance(address, six.string_types): elif isinstance(address, str):
rtype = 'A' rtype = 'A'
else: else:
return None return None
@ -513,10 +503,7 @@ def get_hostname(address, fqdn=True):
try: try:
import dns.reversename import dns.reversename
except ImportError: except ImportError:
if six.PY2: apt_install("python3-dnspython", fatal=True)
apt_install("python-dnspython", fatal=True)
else:
apt_install("python3-dnspython", fatal=True)
import dns.reversename import dns.reversename
rev = dns.reversename.from_address(address) rev = dns.reversename.from_address(address)

View File

@ -30,8 +30,6 @@ from subprocess import (
check_output, check_output,
CalledProcessError) CalledProcessError)
import six
import charmhelpers.contrib.storage.linux.ceph as ch_ceph import charmhelpers.contrib.storage.linux.ceph as ch_ceph
from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
@ -130,10 +128,7 @@ except ImportError:
try: try:
import psutil import psutil
except ImportError: except ImportError:
if six.PY2: apt_install('python3-psutil', fatal=True)
apt_install('python-psutil', fatal=True)
else:
apt_install('python3-psutil', fatal=True)
import psutil import psutil
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -150,10 +145,7 @@ def ensure_packages(packages):
def context_complete(ctxt): def context_complete(ctxt):
_missing = [] _missing = [k for k, v in ctxt.items() if v is None or v == '']
for k, v in six.iteritems(ctxt):
if v is None or v == '':
_missing.append(k)
if _missing: if _missing:
log('Missing required data: %s' % ' '.join(_missing), level=INFO) log('Missing required data: %s' % ' '.join(_missing), level=INFO)
@ -180,7 +172,7 @@ class OSContextGenerator(object):
# Fresh start # Fresh start
self.complete = False self.complete = False
self.missing_data = [] self.missing_data = []
for k, v in six.iteritems(ctxt): for k, v in ctxt.items():
if v is None or v == '': if v is None or v == '':
if k not in self.missing_data: if k not in self.missing_data:
self.missing_data.append(k) self.missing_data.append(k)
@ -1111,10 +1103,14 @@ class ApacheSSLContext(OSContextGenerator):
endpoint = resolve_address(net_type) endpoint = resolve_address(net_type)
addresses.append((addr, endpoint)) addresses.append((addr, endpoint))
return sorted(set(addresses)) # Log the set of addresses to have a trail log and capture if tuples
# change over time in the same unit (LP: #1952414).
sorted_addresses = sorted(set(addresses))
log('get_network_addresses: {}'.format(sorted_addresses))
return sorted_addresses
def __call__(self): def __call__(self):
if isinstance(self.external_ports, six.string_types): if isinstance(self.external_ports, str):
self.external_ports = [self.external_ports] self.external_ports = [self.external_ports]
if not self.external_ports or not https(): if not self.external_ports or not https():
@ -1531,9 +1527,9 @@ class SubordinateConfigContext(OSContextGenerator):
continue continue
sub_config = sub_config[self.config_file] sub_config = sub_config[self.config_file]
for k, v in six.iteritems(sub_config): for k, v in sub_config.items():
if k == 'sections': if k == 'sections':
for section, config_list in six.iteritems(v): for section, config_list in v.items():
log("adding section '%s'" % (section), log("adding section '%s'" % (section),
level=DEBUG) level=DEBUG)
if ctxt[k].get(section): if ctxt[k].get(section):
@ -1887,8 +1883,11 @@ class DataPortContext(NeutronPortContext):
normalized.update({port: port for port in resolved normalized.update({port: port for port in resolved
if port in ports}) if port in ports})
if resolved: if resolved:
return {normalized[port]: bridge for port, bridge in return {
six.iteritems(portmap) if port in normalized.keys()} normalized[port]: bridge
for port, bridge in portmap.items()
if port in normalized.keys()
}
return None return None
@ -2291,15 +2290,10 @@ class HostInfoContext(OSContextGenerator):
name = name or socket.gethostname() name = name or socket.gethostname()
fqdn = '' fqdn = ''
if six.PY2:
exc = socket.error
else:
exc = OSError
try: try:
addrs = socket.getaddrinfo( addrs = socket.getaddrinfo(
name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)
except exc: except OSError:
pass pass
else: else:
for addr in addrs: for addr in addrs:
@ -2416,12 +2410,12 @@ class DHCPAgentContext(OSContextGenerator):
existing_ovs_use_veth = None existing_ovs_use_veth = None
# If there is a dhcp_agent.ini file read the current setting # If there is a dhcp_agent.ini file read the current setting
if os.path.isfile(DHCP_AGENT_INI): if os.path.isfile(DHCP_AGENT_INI):
# config_ini does the right thing and returns None if the setting is # config_ini does the right thing and returns None if the setting
# commented. # is commented.
existing_ovs_use_veth = ( existing_ovs_use_veth = (
config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth"))
# Convert to Bool if necessary # Convert to Bool if necessary
if isinstance(existing_ovs_use_veth, six.string_types): if isinstance(existing_ovs_use_veth, str):
return bool_from_string(existing_ovs_use_veth) return bool_from_string(existing_ovs_use_veth)
return existing_ovs_use_veth return existing_ovs_use_veth

View File

@ -0,0 +1,128 @@
#!/usr/bin/python3
# Copyright 2014-2022 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks for services with deferred restarts.
This Nagios check will parse /var/lib/policy-rd.d/
to find any restarts that are currently deferred.
"""
import argparse
import glob
import sys
import yaml
DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d'
def get_deferred_events():
"""Return a list of deferred events dicts from policy-rc.d files.
Events are read from DEFERRED_EVENTS_DIR and are of the form:
{
action: restart,
policy_requestor_name: rabbitmq-server,
policy_requestor_type: charm,
reason: 'Pkg update',
service: rabbitmq-server,
time: 1614328743
}
:raises OSError: Raised in case of a system error while reading a policy file
:raises yaml.YAMLError: Raised if parsing a policy file fails
:returns: List of deferred event dictionaries
:rtype: list
"""
deferred_events_files = glob.glob(
'{}/*.deferred'.format(DEFERRED_EVENTS_DIR))
deferred_events = []
for event_file in deferred_events_files:
with open(event_file, 'r') as f:
event = yaml.safe_load(f)
deferred_events.append(event)
return deferred_events
def get_deferred_restart_services(application=None):
"""Returns a list of services with deferred restarts.
:param str application: Name of the application that blocked the service restart.
If application is None, all services with deferred restarts
are returned. Services which are blocked by a non-charm
requestor are always returned.
:raises OSError: Raised in case of a system error while reading a policy file
:raises yaml.YAMLError: Raised if parsing a policy file fails
:returns: List of services with deferred restarts belonging to application.
:rtype: list
"""
deferred_restart_events = filter(
lambda e: e['action'] == 'restart', get_deferred_events())
deferred_restart_services = set()
for restart_event in deferred_restart_events:
if application:
if (
restart_event['policy_requestor_type'] != 'charm' or
restart_event['policy_requestor_type'] == 'charm' and
restart_event['policy_requestor_name'] == application
):
deferred_restart_services.add(restart_event['service'])
else:
deferred_restart_services.add(restart_event['service'])
return list(deferred_restart_services)
def main():
"""Check for services with deferred restarts."""
parser = argparse.ArgumentParser(
description='Check for services with deferred restarts')
parser.add_argument(
'--application', help='Check services belonging to this application only')
args = parser.parse_args()
services = set(get_deferred_restart_services(args.application))
if len(services) == 0:
print('OK: No deferred service restarts.')
sys.exit(0)
else:
print(
'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services)))
sys.exit(1)
if __name__ == '__main__':
try:
main()
except OSError as e:
print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror))
sys.exit(1)
except yaml.YAMLError as e:
print('CRITICAL: Failed to parse a policy file: {}'.format(str(e)))
sys.exit(1)
except Exception as e:
print('CRITICAL: An unknown error occurred: {}'.format(str(e)))
sys.exit(1)

View File

@ -1,4 +1,3 @@
#!/usr/bin/python
# #
# Copyright 2017 Canonical Ltd # Copyright 2017 Canonical Ltd
# #
@ -14,7 +13,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import six
from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_install
from charmhelpers.contrib.openstack.context import IdentityServiceContext from charmhelpers.contrib.openstack.context import IdentityServiceContext
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
@ -117,10 +115,7 @@ class KeystoneManager2(KeystoneManager):
from keystoneclient.auth.identity import v2 from keystoneclient.auth.identity import v2
from keystoneclient import session from keystoneclient import session
except ImportError: except ImportError:
if six.PY2: apt_install(["python3-keystoneclient"], fatal=True)
apt_install(["python-keystoneclient"], fatal=True)
else:
apt_install(["python3-keystoneclient"], fatal=True)
from keystoneclient.v2_0 import client from keystoneclient.v2_0 import client
from keystoneclient.auth.identity import v2 from keystoneclient.auth.identity import v2
@ -151,10 +146,7 @@ class KeystoneManager3(KeystoneManager):
from keystoneclient import session from keystoneclient import session
from keystoneclient.auth.identity import v3 from keystoneclient.auth.identity import v3
except ImportError: except ImportError:
if six.PY2: apt_install(["python3-keystoneclient"], fatal=True)
apt_install(["python-keystoneclient"], fatal=True)
else:
apt_install(["python3-keystoneclient"], fatal=True)
from keystoneclient.v3 import client from keystoneclient.v3 import client
from keystoneclient.auth import token_endpoint from keystoneclient.auth import token_endpoint

View File

@ -14,7 +14,6 @@
# Various utilities for dealing with Neutron and the renaming from Quantum. # Various utilities for dealing with Neutron and the renaming from Quantum.
import six
from subprocess import check_output from subprocess import check_output
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
@ -349,11 +348,4 @@ def parse_vlan_range_mappings(mappings):
Returns dict of the form {provider: (start, end)}. Returns dict of the form {provider: (start, end)}.
""" """
_mappings = parse_mappings(mappings) _mappings = parse_mappings(mappings)
if not _mappings: return {p: tuple(r.split(':')) for p, r in _mappings.items()}
return {}
mappings = {}
for p, r in six.iteritems(_mappings):
mappings[p] = tuple(r.split(':'))
return mappings

View File

@ -15,7 +15,6 @@
import collections import collections
import contextlib import contextlib
import os import os
import six
import shutil import shutil
import yaml import yaml
import zipfile import zipfile
@ -204,12 +203,6 @@ class BadPolicyYamlFile(Exception):
return self.log_message return self.log_message
if six.PY2:
BadZipFile = zipfile.BadZipfile
else:
BadZipFile = zipfile.BadZipFile
def is_policyd_override_valid_on_this_release(openstack_release): def is_policyd_override_valid_on_this_release(openstack_release):
"""Check that the charm is running on at least Ubuntu Xenial, and at """Check that the charm is running on at least Ubuntu Xenial, and at
least the queens release. least the queens release.
@ -487,10 +480,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None):
if blacklisted_keys_present: if blacklisted_keys_present:
raise BadPolicyYamlFile("blacklisted keys {} present." raise BadPolicyYamlFile("blacklisted keys {} present."
.format(", ".join(blacklisted_keys_present))) .format(", ".join(blacklisted_keys_present)))
if not all(isinstance(k, six.string_types) for k in keys): if not all(isinstance(k, str) for k in keys):
raise BadPolicyYamlFile("keys in yaml aren't all strings?") raise BadPolicyYamlFile("keys in yaml aren't all strings?")
# check that the dictionary looks like a mapping of str to str # check that the dictionary looks like a mapping of str to str
if not all(isinstance(v, six.string_types) for v in doc.values()): if not all(isinstance(v, str) for v in doc.values()):
raise BadPolicyYamlFile("values in yaml aren't all strings?") raise BadPolicyYamlFile("values in yaml aren't all strings?")
return doc return doc
@ -530,8 +523,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None):
hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG)
if not os.path.exists(path): if not os.path.exists(path):
ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) ch_host.mkdir(path, owner=_user, group=_group, perms=0o775)
_scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir for direntry in os.scandir(path):
for direntry in _scanner(path):
# see if the path should be kept. # see if the path should be kept.
if direntry.path in keep_paths: if direntry.path in keep_paths:
continue continue
@ -558,36 +550,6 @@ def maybe_create_directory_for(path, user, group):
ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) ch_host.mkdir(_dir, owner=user, group=group, perms=0o775)
@contextlib.contextmanager
def _fallback_scandir(path):
"""Fallback os.scandir implementation.
provide a fallback implementation of os.scandir if this module ever gets
used in a py2 or py34 charm. Uses os.listdir() to get the names in the path,
and then mocks the is_dir() function using os.path.isdir() to check for
directory.
:param path: the path to list the directories for
:type path: str
:returns: Generator that provides _FBDirectory objects
:rtype: ContextManager[_FBDirectory]
"""
for f in os.listdir(path):
yield _FBDirectory(f)
class _FBDirectory(object):
"""Mock a scandir Directory object with enough to use in
clean_policyd_dir_for
"""
def __init__(self, path):
self.path = path
def is_dir(self):
return os.path.isdir(self.path)
def path_for_policy_file(service, name): def path_for_policy_file(service, name):
"""Return the full path for a policy.d file that will be written to the """Return the full path for a policy.d file that will be written to the
service's policy.d directory. service's policy.d directory.
@ -768,7 +730,7 @@ def process_policy_resource_file(resource_file,
_group) _group)
# Every thing worked, so we mark up a success. # Every thing worked, so we mark up a success.
completed = True completed = True
except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e:
hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), hookenv.log("Processing {} failed: {}".format(resource_file, str(e)),
level=POLICYD_LOG_LEVEL_DEFAULT) level=POLICYD_LOG_LEVEL_DEFAULT)
except IOError as e: except IOError as e:

View File

@ -82,7 +82,11 @@ backend {{ service }}_{{ frontend }}
{% endif -%} {% endif -%}
{% endif -%} {% endif -%}
{% for unit, address in frontends[frontend]['backends'].items() -%} {% for unit, address in frontends[frontend]['backends'].items() -%}
{% if https -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check check-ssl verify none
{% else -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endif -%}
{% endfor %} {% endfor %}
{% endfor -%} {% endfor -%}
{% endfor -%} {% endfor -%}

View File

@ -22,6 +22,8 @@ Listen {{ ext_port }}
ProxyPassReverse / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on ProxyPreserveHost on
RequestHeader set X-Forwarded-Proto "https" RequestHeader set X-Forwarded-Proto "https"
KeepAliveTimeout 75
MaxKeepAliveRequests 1000
</VirtualHost> </VirtualHost>
{% endfor -%} {% endfor -%}
<Proxy *> <Proxy *>

View File

@ -22,6 +22,8 @@ Listen {{ ext_port }}
ProxyPassReverse / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on ProxyPreserveHost on
RequestHeader set X-Forwarded-Proto "https" RequestHeader set X-Forwarded-Proto "https"
KeepAliveTimeout 75
MaxKeepAliveRequests 1000
</VirtualHost> </VirtualHost>
{% endfor -%} {% endfor -%}
<Proxy *> <Proxy *>

View File

@ -20,6 +20,8 @@ Listen {{ public_port }}
WSGIScriptAlias / {{ script }} WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On WSGIPassAuthorization On
KeepAliveTimeout 75
MaxKeepAliveRequests 1000
<IfVersion >= 2.4> <IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M" ErrorLogFormat "%{cu}t %M"
</IfVersion> </IfVersion>
@ -46,6 +48,8 @@ Listen {{ public_port }}
WSGIScriptAlias / {{ admin_script }} WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On WSGIPassAuthorization On
KeepAliveTimeout 75
MaxKeepAliveRequests 1000
<IfVersion >= 2.4> <IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M" ErrorLogFormat "%{cu}t %M"
</IfVersion> </IfVersion>
@ -72,6 +76,8 @@ Listen {{ public_port }}
WSGIScriptAlias / {{ public_script }} WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On WSGIPassAuthorization On
KeepAliveTimeout 75
MaxKeepAliveRequests 1000
<IfVersion >= 2.4> <IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M" ErrorLogFormat "%{cu}t %M"
</IfVersion> </IfVersion>

View File

@ -20,6 +20,8 @@ Listen {{ public_port }}
WSGIScriptAlias / {{ script }} WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On WSGIPassAuthorization On
KeepAliveTimeout 75
MaxKeepAliveRequests 1000
<IfVersion >= 2.4> <IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M" ErrorLogFormat "%{cu}t %M"
</IfVersion> </IfVersion>
@ -46,6 +48,8 @@ Listen {{ public_port }}
WSGIScriptAlias / {{ admin_script }} WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On WSGIPassAuthorization On
KeepAliveTimeout 75
MaxKeepAliveRequests 1000
<IfVersion >= 2.4> <IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M" ErrorLogFormat "%{cu}t %M"
</IfVersion> </IfVersion>
@ -72,6 +76,8 @@ Listen {{ public_port }}
WSGIScriptAlias / {{ public_script }} WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL} WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On WSGIPassAuthorization On
KeepAliveTimeout 75
MaxKeepAliveRequests 1000
<IfVersion >= 2.4> <IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M" ErrorLogFormat "%{cu}t %M"
</IfVersion> </IfVersion>

View File

@ -14,8 +14,6 @@
import os import os
import six
from charmhelpers.fetch import apt_install, apt_update from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
@ -29,10 +27,7 @@ try:
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
except ImportError: except ImportError:
apt_update(fatal=True) apt_update(fatal=True)
if six.PY2: apt_install('python3-jinja2', fatal=True)
apt_install('python-jinja2', fatal=True)
else:
apt_install('python3-jinja2', fatal=True)
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
@ -62,7 +57,7 @@ def get_loader(templates_dir, os_release):
order by OpenStack release. order by OpenStack release.
""" """
tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in six.itervalues(OPENSTACK_CODENAMES)] for rel in OPENSTACK_CODENAMES.values()]
if not os.path.isdir(templates_dir): if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir, log('Templates directory not found @ %s.' % templates_dir,
@ -225,10 +220,7 @@ class OSConfigRenderer(object):
# if this code is running, the object is created pre-install hook. # if this code is running, the object is created pre-install hook.
# jinja2 shouldn't get touched until the module is reloaded on next # jinja2 shouldn't get touched until the module is reloaded on next
# hook execution, with proper jinja2 bits successfully imported. # hook execution, with proper jinja2 bits successfully imported.
if six.PY2: apt_install('python3-jinja2')
apt_install('python-jinja2')
else:
apt_install('python3-jinja2')
def register(self, config_file, contexts, config_template=None): def register(self, config_file, contexts, config_template=None):
""" """
@ -318,9 +310,7 @@ class OSConfigRenderer(object):
log('Config not registered: %s' % config_file, level=ERROR) log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException raise OSConfigException
_out = self.render(config_file) _out = self.render(config_file).encode('UTF-8')
if six.PY3:
_out = _out.encode('UTF-8')
with open(config_file, 'wb') as out: with open(config_file, 'wb') as out:
out.write(_out) out.write(_out)
@ -331,7 +321,8 @@ class OSConfigRenderer(object):
""" """
Write out all registered config files. Write out all registered config files.
""" """
[self.write(k) for k in six.iterkeys(self.templates)] for k in self.templates.keys():
self.write(k)
def set_release(self, openstack_release): def set_release(self, openstack_release):
""" """
@ -347,8 +338,8 @@ class OSConfigRenderer(object):
Returns a list of context interfaces that yield a complete context. Returns a list of context interfaces that yield a complete context.
''' '''
interfaces = [] interfaces = []
[interfaces.extend(i.complete_contexts()) for i in self.templates.values():
for i in six.itervalues(self.templates)] interfaces.extend(i.complete_contexts())
return interfaces return interfaces
def get_incomplete_context_data(self, interfaces): def get_incomplete_context_data(self, interfaces):
@ -360,7 +351,7 @@ class OSConfigRenderer(object):
''' '''
incomplete_context_data = {} incomplete_context_data = {}
for i in six.itervalues(self.templates): for i in self.templates.values():
for context in i.contexts: for context in i.contexts:
for interface in interfaces: for interface in interfaces:
related = False related = False

View File

@ -25,7 +25,6 @@ import re
import itertools import itertools
import functools import functools
import six
import traceback import traceback
import uuid import uuid
import yaml import yaml
@ -362,6 +361,8 @@ def get_os_codename_install_source(src):
rel = '' rel = ''
if src is None: if src is None:
return rel return rel
if src in OPENSTACK_RELEASES:
return src
if src in ['distro', 'distro-proposed', 'proposed']: if src in ['distro', 'distro-proposed', 'proposed']:
try: try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@ -401,7 +402,7 @@ def get_os_codename_version(vers):
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
'''Determine OpenStack version number from codename.''' '''Determine OpenStack version number from codename.'''
for k, v in six.iteritems(version_map): for k, v in version_map.items():
if v == codename: if v == codename:
return k return k
e = 'Could not derive OpenStack version for '\ e = 'Could not derive OpenStack version for '\
@ -411,7 +412,8 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
def get_os_version_codename_swift(codename): def get_os_version_codename_swift(codename):
'''Determine OpenStack version number of swift from codename.''' '''Determine OpenStack version number of swift from codename.'''
for k, v in six.iteritems(SWIFT_CODENAMES): # for k, v in six.iteritems(SWIFT_CODENAMES):
for k, v in SWIFT_CODENAMES.items():
if k == codename: if k == codename:
return v[-1] return v[-1]
e = 'Could not derive swift version for '\ e = 'Could not derive swift version for '\
@ -421,17 +423,17 @@ def get_os_version_codename_swift(codename):
def get_swift_codename(version): def get_swift_codename(version):
'''Determine OpenStack codename that corresponds to swift version.''' '''Determine OpenStack codename that corresponds to swift version.'''
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v]
if len(codenames) > 1: if len(codenames) > 1:
# If more than one release codename contains this version we determine # If more than one release codename contains this version we determine
# the actual codename based on the highest available install source. # the actual codename based on the highest available install source.
for codename in reversed(codenames): for codename in reversed(codenames):
releases = UBUNTU_OPENSTACK_RELEASE releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v] release = [k for k, v in releases.items() if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) ret = (subprocess
if six.PY3: .check_output(['apt-cache', 'policy', 'swift'])
ret = ret.decode('UTF-8') .decode('UTF-8'))
if codename in ret or release[0] in ret: if codename in ret or release[0] in ret:
return codename return codename
elif len(codenames) == 1: elif len(codenames) == 1:
@ -441,7 +443,7 @@ def get_swift_codename(version):
match = re.match(r'^(\d+)\.(\d+)', version) match = re.match(r'^(\d+)\.(\d+)', version)
if match: if match:
major_minor_version = match.group(0) major_minor_version = match.group(0)
for codename, versions in six.iteritems(SWIFT_CODENAMES): for codename, versions in SWIFT_CODENAMES.items():
for release_version in versions: for release_version in versions:
if release_version.startswith(major_minor_version): if release_version.startswith(major_minor_version):
return codename return codename
@ -477,9 +479,7 @@ def get_os_codename_package(package, fatal=True):
if snap_install_requested(): if snap_install_requested():
cmd = ['snap', 'list', package] cmd = ['snap', 'list', package]
try: try:
out = subprocess.check_output(cmd) out = subprocess.check_output(cmd).decode('UTF-8')
if six.PY3:
out = out.decode('UTF-8')
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
return None return None
lines = out.split('\n') lines = out.split('\n')
@ -549,16 +549,14 @@ def get_os_version_package(pkg, fatal=True):
if 'swift' in pkg: if 'swift' in pkg:
vers_map = SWIFT_CODENAMES vers_map = SWIFT_CODENAMES
for cname, version in six.iteritems(vers_map): for cname, version in vers_map.items():
if cname == codename: if cname == codename:
return version[-1] return version[-1]
else: else:
vers_map = OPENSTACK_CODENAMES vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map): for version, cname in vers_map.items():
if cname == codename: if cname == codename:
return version return version
# e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e)
def get_installed_os_version(): def get_installed_os_version():
@ -821,10 +819,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
if not os.path.exists(os.path.dirname(juju_rc_path)): if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path)) os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wt') as rc_script: with open(juju_rc_path, 'wt') as rc_script:
rc_script.write( rc_script.write("#!/bin/bash\n")
"#!/bin/bash\n") for u, p in env_vars.items():
[rc_script.write('export %s=%s\n' % (u, p)) if u != "script_path":
for u, p in six.iteritems(env_vars) if u != "script_path"] rc_script.write('export %s=%s\n' % (u, p))
def openstack_upgrade_available(package): def openstack_upgrade_available(package):
@ -1039,7 +1037,7 @@ def _determine_os_workload_status(
state, message, lambda: charm_func(configs)) state, message, lambda: charm_func(configs))
if state is None: if state is None:
state, message = _ows_check_services_running(services, ports) state, message = ows_check_services_running(services, ports)
if state is None: if state is None:
state = 'active' state = 'active'
@ -1213,7 +1211,12 @@ def _ows_check_charm_func(state, message, charm_func_with_configs):
return state, message return state, message
@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log)
def _ows_check_services_running(services, ports): def _ows_check_services_running(services, ports):
return ows_check_services_running(services, ports)
def ows_check_services_running(services, ports):
"""Check that the services that should be running are actually running """Check that the services that should be running are actually running
and that any ports specified are being listened to. and that any ports specified are being listened to.
@ -1413,45 +1416,75 @@ def incomplete_relation_data(configs, required_interfaces):
for i in incomplete_relations} for i in incomplete_relations}
def do_action_openstack_upgrade(package, upgrade_callback, configs, def do_action_openstack_upgrade(package, upgrade_callback, configs):
force_upgrade=False):
"""Perform action-managed OpenStack upgrade. """Perform action-managed OpenStack upgrade.
Upgrades packages to the configured openstack-origin version and sets Upgrades packages to the configured openstack-origin version and sets
the corresponding action status as a result. the corresponding action status as a result.
If the charm was installed from source we cannot upgrade it.
For backwards compatibility a config flag (action-managed-upgrade) must For backwards compatibility a config flag (action-managed-upgrade) must
be set for this code to run, otherwise a full service level upgrade will be set for this code to run, otherwise a full service level upgrade will
fire on config-changed. fire on config-changed.
@param package: package name for determining if upgrade available @param package: package name for determining if openstack upgrade available
@param upgrade_callback: function callback to charm's upgrade function @param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class @param configs: templating object derived from OSConfigRenderer class
@param force_upgrade: perform dist-upgrade regardless of new openstack
@return: True if upgrade successful; False if upgrade failed or skipped @return: True if upgrade successful; False if upgrade failed or skipped
""" """
ret = False ret = False
if openstack_upgrade_available(package) or force_upgrade: if openstack_upgrade_available(package):
if config('action-managed-upgrade'): if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release') juju_log('Upgrading OpenStack release')
try: try:
upgrade_callback(configs=configs) upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'}) action_set({'outcome': 'success, upgrade completed'})
ret = True ret = True
except Exception: except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'outcome': 'upgrade failed, see traceback'})
action_set({'traceback': traceback.format_exc()}) action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an ' action_fail('upgrade callback resulted in an '
'unexpected error') 'unexpected error')
else: else:
action_set({'outcome': 'action-managed-upgrade config is ' action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'}) 'False, skipped upgrade'})
else: else:
action_set({'outcome': 'no upgrade available.'}) action_set({'outcome': 'no upgrade available'})
return ret
def do_action_package_upgrade(package, upgrade_callback, configs):
"""Perform package upgrade within the current OpenStack release.
Upgrades packages only if there is not an openstack upgrade available,
and sets the corresponding action status as a result.
@param package: package name for determining if openstack upgrade available
@param upgrade_callback: function callback to charm's upgrade function
@param configs: templating object derived from OSConfigRenderer class
@return: True if upgrade successful; False if upgrade failed or skipped
"""
ret = False
if not openstack_upgrade_available(package):
juju_log('Upgrading packages')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback'})
action_set({'traceback': traceback.format_exc()})
action_fail('upgrade callback resulted in an '
'unexpected error')
else:
action_set({'outcome': 'upgrade skipped because an openstack upgrade '
'is available'})
return ret return ret
@ -1849,21 +1882,20 @@ def pausable_restart_on_change(restart_map, stopstart=False,
""" """
def wrap(f): def wrap(f):
# py27 compatible nonlocal variable. When py3 only, replace with __restart_map_cache = None
# nonlocal keyword
__restart_map_cache = {'cache': None}
@functools.wraps(f) @functools.wraps(f)
def wrapped_f(*args, **kwargs): def wrapped_f(*args, **kwargs):
nonlocal __restart_map_cache
if is_unit_paused_set(): if is_unit_paused_set():
return f(*args, **kwargs) return f(*args, **kwargs)
if __restart_map_cache['cache'] is None: if __restart_map_cache is None:
__restart_map_cache['cache'] = restart_map() \ __restart_map_cache = restart_map() \
if callable(restart_map) else restart_map if callable(restart_map) else restart_map
# otherwise, normal restart_on_change functionality # otherwise, normal restart_on_change functionality
return restart_on_change_helper( return restart_on_change_helper(
(lambda: f(*args, **kwargs)), (lambda: f(*args, **kwargs)),
__restart_map_cache['cache'], __restart_map_cache,
stopstart, stopstart,
restart_functions, restart_functions,
can_restart_now_f, can_restart_now_f,
@ -1888,7 +1920,7 @@ def ordered(orderme):
raise ValueError('argument must be a dict type') raise ValueError('argument must be a dict type')
result = OrderedDict() result = OrderedDict()
for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): for k, v in sorted(orderme.items(), key=lambda x: x[0]):
if isinstance(v, dict): if isinstance(v, dict):
result[k] = ordered(v) result[k] = ordered(v)
else: else:

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import json import json
import six
from charmhelpers.core.hookenv import relation_id as current_relation_id from charmhelpers.core.hookenv import relation_id as current_relation_id
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
@ -229,7 +228,7 @@ def peer_echo(includes=None, force=False):
if ex in echo_data: if ex in echo_data:
echo_data.pop(ex) echo_data.pop(ex)
else: else:
for attribute, value in six.iteritems(rdata): for attribute, value in rdata.items():
for include in includes: for include in includes:
if include in attribute: if include in attribute:
echo_data[attribute] = value echo_data[attribute] = value
@ -255,8 +254,8 @@ def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
relation_settings=relation_settings, relation_settings=relation_settings,
**kwargs) **kwargs)
if is_relation_made(peer_relation_name): if is_relation_made(peer_relation_name):
for key, value in six.iteritems(dict(list(kwargs.items()) + items = dict(list(kwargs.items()) + list(relation_settings.items()))
list(relation_settings.items()))): for key, value in items.items():
key_prefix = relation_id or current_relation_id() key_prefix = relation_id or current_relation_id()
peer_store(key_prefix + delimiter + key, peer_store(key_prefix + delimiter + key,
value, value,

View File

@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import
# deprecated aliases for backwards compatibility # deprecated aliases for backwards compatibility
from charmhelpers.fetch.python import debug # noqa from charmhelpers.fetch.python import debug # noqa
from charmhelpers.fetch.python import packages # noqa from charmhelpers.fetch.python import packages # noqa

View File

@ -23,7 +23,6 @@ import collections
import errno import errno
import hashlib import hashlib
import math import math
import six
import os import os
import shutil import shutil
@ -218,7 +217,7 @@ def validator(value, valid_type, valid_range=None):
"was given {} of type {}" "was given {} of type {}"
.format(valid_range, type(valid_range))) .format(valid_range, type(valid_range)))
# If we're dealing with strings # If we're dealing with strings
if isinstance(value, six.string_types): if isinstance(value, str):
assert value in valid_range, ( assert value in valid_range, (
"{} is not in the list {}".format(value, valid_range)) "{} is not in the list {}".format(value, valid_range))
# Integer, float should have a min and max # Integer, float should have a min and max
@ -434,9 +433,9 @@ class BasePool(object):
:type mode: str :type mode: str
""" """
# Check the input types and values # Check the input types and values
validator(value=cache_pool, valid_type=six.string_types) validator(value=cache_pool, valid_type=str)
validator( validator(
value=mode, valid_type=six.string_types, value=mode, valid_type=str,
valid_range=["readonly", "writeback"]) valid_range=["readonly", "writeback"])
check_call([ check_call([
@ -779,9 +778,7 @@ def enabled_manager_modules():
""" """
cmd = ['ceph', 'mgr', 'module', 'ls'] cmd = ['ceph', 'mgr', 'module', 'ls']
try: try:
modules = check_output(cmd) modules = check_output(cmd).decode('utf-8')
if six.PY3:
modules = modules.decode('UTF-8')
except CalledProcessError as e: except CalledProcessError as e:
log("Failed to list ceph modules: {}".format(e), WARNING) log("Failed to list ceph modules: {}".format(e), WARNING)
return [] return []
@ -814,10 +811,10 @@ def get_mon_map(service):
ceph command fails. ceph command fails.
""" """
try: try:
mon_status = check_output(['ceph', '--id', service, octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0
'mon_status', '--format=json']) mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status'
if six.PY3: mon_status = (check_output(['ceph', '--id', service, mon_status_cmd,
mon_status = mon_status.decode('UTF-8') '--format=json'])).decode('utf-8')
try: try:
return json.loads(mon_status) return json.loads(mon_status)
except ValueError as v: except ValueError as v:
@ -959,9 +956,7 @@ def get_erasure_profile(service, name):
try: try:
out = check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get', 'osd', 'erasure-code-profile', 'get',
name, '--format=json']) name, '--format=json']).decode('utf-8')
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out) return json.loads(out)
except (CalledProcessError, OSError, ValueError): except (CalledProcessError, OSError, ValueError):
return None return None
@ -1164,8 +1159,7 @@ def create_erasure_profile(service, profile_name,
'nvme' 'nvme'
] ]
validator(erasure_plugin_name, six.string_types, validator(erasure_plugin_name, str, list(plugin_techniques.keys()))
list(plugin_techniques.keys()))
cmd = [ cmd = [
'ceph', '--id', service, 'ceph', '--id', service,
@ -1176,7 +1170,7 @@ def create_erasure_profile(service, profile_name,
] ]
if erasure_plugin_technique: if erasure_plugin_technique:
validator(erasure_plugin_technique, six.string_types, validator(erasure_plugin_technique, str,
plugin_techniques[erasure_plugin_name]) plugin_techniques[erasure_plugin_name])
cmd.append('technique={}'.format(erasure_plugin_technique)) cmd.append('technique={}'.format(erasure_plugin_technique))
@ -1189,7 +1183,7 @@ def create_erasure_profile(service, profile_name,
failure_domain = 'rack' failure_domain = 'rack'
if failure_domain: if failure_domain:
validator(failure_domain, six.string_types, failure_domains) validator(failure_domain, str, failure_domains)
# failure_domain changed in luminous # failure_domain changed in luminous
if luminous_or_later: if luminous_or_later:
cmd.append('crush-failure-domain={}'.format(failure_domain)) cmd.append('crush-failure-domain={}'.format(failure_domain))
@ -1198,7 +1192,7 @@ def create_erasure_profile(service, profile_name,
# device class new in luminous # device class new in luminous
if luminous_or_later and device_class: if luminous_or_later and device_class:
validator(device_class, six.string_types, device_classes) validator(device_class, str, device_classes)
cmd.append('crush-device-class={}'.format(device_class)) cmd.append('crush-device-class={}'.format(device_class))
else: else:
log('Skipping device class configuration (ceph < 12.0.0)', log('Skipping device class configuration (ceph < 12.0.0)',
@ -1213,7 +1207,7 @@ def create_erasure_profile(service, profile_name,
raise ValueError("locality must be provided for lrc plugin") raise ValueError("locality must be provided for lrc plugin")
# LRC optional configuration # LRC optional configuration
if crush_locality: if crush_locality:
validator(crush_locality, six.string_types, failure_domains) validator(crush_locality, str, failure_domains)
cmd.append('crush-locality={}'.format(crush_locality)) cmd.append('crush-locality={}'.format(crush_locality))
if erasure_plugin_name == 'shec': if erasure_plugin_name == 'shec':
@ -1241,8 +1235,8 @@ def rename_pool(service, old_name, new_name):
:param new_name: Name to rename pool to. :param new_name: Name to rename pool to.
:type new_name: str :type new_name: str
""" """
validator(value=old_name, valid_type=six.string_types) validator(value=old_name, valid_type=str)
validator(value=new_name, valid_type=six.string_types) validator(value=new_name, valid_type=str)
cmd = [ cmd = [
'ceph', '--id', service, 'ceph', '--id', service,
@ -1260,7 +1254,7 @@ def erasure_profile_exists(service, name):
:returns: True if it exists, False otherwise. :returns: True if it exists, False otherwise.
:rtype: bool :rtype: bool
""" """
validator(value=name, valid_type=six.string_types) validator(value=name, valid_type=str)
try: try:
check_call(['ceph', '--id', service, check_call(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get', 'osd', 'erasure-code-profile', 'get',
@ -1280,12 +1274,10 @@ def get_cache_mode(service, pool_name):
:returns: Current cache mode. :returns: Current cache mode.
:rtype: Optional[int] :rtype: Optional[int]
""" """
validator(value=service, valid_type=six.string_types) validator(value=service, valid_type=str)
validator(value=pool_name, valid_type=six.string_types) validator(value=pool_name, valid_type=str)
out = check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'dump', '--format=json']) 'osd', 'dump', '--format=json']).decode('utf-8')
if six.PY3:
out = out.decode('UTF-8')
try: try:
osd_json = json.loads(out) osd_json = json.loads(out)
for pool in osd_json['pools']: for pool in osd_json['pools']:
@ -1299,9 +1291,8 @@ def get_cache_mode(service, pool_name):
def pool_exists(service, name): def pool_exists(service, name):
"""Check to see if a RADOS pool already exists.""" """Check to see if a RADOS pool already exists."""
try: try:
out = check_output(['rados', '--id', service, 'lspools']) out = check_output(
if six.PY3: ['rados', '--id', service, 'lspools']).decode('utf-8')
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
@ -1320,13 +1311,11 @@ def get_osds(service, device_class=None):
out = check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'crush', 'class', 'osd', 'crush', 'class',
'ls-osd', device_class, 'ls-osd', device_class,
'--format=json']) '--format=json']).decode('utf-8')
else: else:
out = check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'ls', 'osd', 'ls',
'--format=json']) '--format=json']).decode('utf-8')
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out) return json.loads(out)
@ -1343,9 +1332,7 @@ def rbd_exists(service, pool, rbd_img):
"""Check to see if a RADOS block device exists.""" """Check to see if a RADOS block device exists."""
try: try:
out = check_output(['rbd', 'list', '--id', out = check_output(['rbd', 'list', '--id',
service, '--pool', pool]) service, '--pool', pool]).decode('utf-8')
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
@ -1371,7 +1358,7 @@ def update_pool(client, pool, settings):
:raises: CalledProcessError :raises: CalledProcessError
""" """
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
for k, v in six.iteritems(settings): for k, v in settings.items():
check_call(cmd + [k, v]) check_call(cmd + [k, v])
@ -1509,9 +1496,7 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name): def image_mapped(name):
"""Determine whether a RADOS block device is mapped locally.""" """Determine whether a RADOS block device is mapped locally."""
try: try:
out = check_output(['rbd', 'showmapped']) out = check_output(['rbd', 'showmapped']).decode('utf-8')
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False

View File

@ -19,8 +19,6 @@ from subprocess import (
check_output, check_output,
) )
import six
################################################## ##################################################
# loopback device helpers. # loopback device helpers.
@ -40,9 +38,7 @@ def loopback_devices():
''' '''
loopbacks = {} loopbacks = {}
cmd = ['losetup', '-a'] cmd = ['losetup', '-a']
output = check_output(cmd) output = check_output(cmd).decode('utf-8')
if six.PY3:
output = output.decode('utf-8')
devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
for dev, _, f in devs: for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
@ -57,7 +53,7 @@ def create_loopback(file_path):
''' '''
file_path = os.path.abspath(file_path) file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path]) check_call(['losetup', '--find', file_path])
for d, f in six.iteritems(loopback_devices()): for d, f in loopback_devices().items():
if f == file_path: if f == file_path:
return d return d
@ -71,7 +67,7 @@ def ensure_loopback_device(path, size):
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0) :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
''' '''
for d, f in six.iteritems(loopback_devices()): for d, f in loopback_devices().items():
if f == path: if f == path:
return d return d

605
charmhelpers/coordinator.py Normal file
View File

@ -0,0 +1,605 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
The coordinator module allows you to use Juju's leadership feature to
coordinate operations between units of a service.
Behavior is defined in subclasses of coordinator.BaseCoordinator.
One implementation is provided (coordinator.Serial), which allows an
operation to be run on a single unit at a time, on a first come, first
served basis. You can trivially define more complex behavior by
subclassing BaseCoordinator or Serial.
:author: Stuart Bishop <stuart.bishop@canonical.com>
Services Framework Usage
========================
Ensure a peers relation is defined in metadata.yaml. Instantiate a
BaseCoordinator subclass before invoking ServiceManager.manage().
Ensure that ServiceManager.manage() is wired up to the leader-elected,
leader-settings-changed, peers relation-changed and peers
relation-departed hooks in addition to any other hooks you need, or your
service will deadlock.
Ensure calls to acquire() are guarded, so that locks are only requested
when they are really needed (and thus hooks only triggered when necessary).
Failing to do this and calling acquire() unconditionally will put your unit
into a hook loop. Calls to granted() do not need to be guarded.
For example::
from charmhelpers.core import hookenv, services
from charmhelpers import coordinator
def maybe_restart(servicename):
serial = coordinator.Serial()
if needs_restart():
serial.acquire('restart')
if serial.granted('restart'):
hookenv.service_restart(servicename)
services = [dict(service='servicename',
data_ready=[maybe_restart])]
if __name__ == '__main__':
_ = coordinator.Serial() # Must instantiate before manager.manage()
manager = services.ServiceManager(services)
manager.manage()
You can implement a similar pattern using a decorator. If the lock has
not been granted, an attempt to acquire() it will be made if the guard
function returns True. If the lock has been granted, the decorated function
is run as normal::
from charmhelpers.core import hookenv, services
from charmhelpers import coordinator
serial = coordinator.Serial() # Global, instatiated on module import.
def needs_restart():
[ ... Introspect state. Return True if restart is needed ... ]
@serial.require('restart', needs_restart)
def maybe_restart(servicename):
hookenv.service_restart(servicename)
services = [dict(service='servicename',
data_ready=[maybe_restart])]
if __name__ == '__main__':
manager = services.ServiceManager(services)
manager.manage()
Traditional Usage
=================
Ensure a peers relation is defined in metadata.yaml.
If you are using charmhelpers.core.hookenv.Hooks, ensure that a
BaseCoordinator subclass is instantiated before calling Hooks.execute.
If you are not using charmhelpers.core.hookenv.Hooks, ensure
that a BaseCoordinator subclass is instantiated and its handle()
method called at the start of all your hooks.
For example::
import sys
from charmhelpers.core import hookenv
from charmhelpers import coordinator
hooks = hookenv.Hooks()
def maybe_restart():
serial = coordinator.Serial()
if serial.granted('restart'):
hookenv.service_restart('myservice')
@hooks.hook
def config_changed():
update_config()
serial = coordinator.Serial()
if needs_restart():
serial.acquire('restart'):
maybe_restart()
# Cluster hooks must be wired up.
@hooks.hook('cluster-relation-changed', 'cluster-relation-departed')
def cluster_relation_changed():
maybe_restart()
# Leader hooks must be wired up.
@hooks.hook('leader-elected', 'leader-settings-changed')
def leader_settings_changed():
maybe_restart()
[ ... repeat for *all* other hooks you are using ... ]
if __name__ == '__main__':
_ = coordinator.Serial() # Must instantiate before execute()
hooks.execute(sys.argv)
You can also use the require decorator. If the lock has not been granted,
an attempt to acquire() it will be made if the guard function returns True.
If the lock has been granted, the decorated function is run as normal::
from charmhelpers.core import hookenv
hooks = hookenv.Hooks()
serial = coordinator.Serial() # Must instantiate before execute()
@require('restart', needs_restart)
def maybe_restart():
hookenv.service_restart('myservice')
@hooks.hook('install', 'config-changed', 'upgrade-charm',
# Peers and leader hooks must be wired up.
'cluster-relation-changed', 'cluster-relation-departed',
'leader-elected', 'leader-settings-changed')
def default_hook():
[...]
maybe_restart()
if __name__ == '__main__':
hooks.execute()
Details
=======
A simple API is provided similar to traditional locking APIs. A lock
may be requested using the acquire() method, and the granted() method
may be used do to check if a lock previously requested by acquire() has
been granted. It doesn't matter how many times acquire() is called in a
hook.
Locks are released at the end of the hook they are acquired in. This may
be the current hook if the unit is leader and the lock is free. It is
more likely a future hook (probably leader-settings-changed, possibly
the peers relation-changed or departed hook, potentially any hook).
Whenever a charm needs to perform a coordinated action it will acquire()
the lock and perform the action immediately if acquisition is
successful. It will also need to perform the same action in every other
hook if the lock has been granted.
Grubby Details
--------------
Why do you need to be able to perform the same action in every hook?
If the unit is the leader, then it may be able to grant its own lock
and perform the action immediately in the source hook. If the unit is
the leader and cannot immediately grant the lock, then its only
guaranteed chance of acquiring the lock is in the peers relation-joined,
relation-changed or peers relation-departed hooks when another unit has
released it (the only channel to communicate to the leader is the peers
relation). If the unit is not the leader, then it is unlikely the lock
is granted in the source hook (a previous hook must have also made the
request for this to happen). A non-leader is notified about the lock via
leader settings. These changes may be visible in any hook, even before
the leader-settings-changed hook has been invoked. Or the requesting
unit may be promoted to leader after making a request, in which case the
lock may be granted in leader-elected or in a future peers
relation-changed or relation-departed hook.
This could be simpler if leader-settings-changed was invoked on the
leader. We could then never grant locks except in
leader-settings-changed hooks giving one place for the operation to be
performed. Unfortunately this is not the case with Juju 1.23 leadership.
But of course, this doesn't really matter to most people as most people
seem to prefer the Services Framework or similar reset-the-world
approaches, rather than the twisty maze of attempting to deduce what
should be done based on what hook happens to be running (which always
seems to evolve into reset-the-world anyway when the charm grows beyond
the trivial).
I chose not to implement a callback model, where a callback was passed
to acquire to be executed when the lock is granted, because the callback
may become invalid between making the request and the lock being granted
due to an upgrade-charm being run in the interim. And it would create
restrictions, such no lambdas, callback defined at the top level of a
module, etc. Still, we could implement it on top of what is here, eg.
by adding a defer decorator that stores a pickle of itself to disk and
have BaseCoordinator unpickle and execute them when the locks are granted.
'''
from datetime import datetime
from functools import wraps
import json
import os.path
from charmhelpers.core import hookenv
# We make BaseCoordinator and subclasses singletons, so that if we
# need to spill to local storage then only a single instance does so,
# rather than having multiple instances stomp over each other.
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args,
**kwargs)
return cls._instances[cls]
# class BaseCoordinator(with_metaclass(Singleton, object)):
class BaseCoordinator(metaclass=Singleton):
relid = None # Peer relation-id, set by __init__
relname = None
grants = None # self.grants[unit][lock] == timestamp
requests = None # self.requests[unit][lock] == timestamp
def __init__(self, relation_key='coordinator', peer_relation_name=None):
'''Instatiate a Coordinator.
Data is stored on the peers relation and in leadership storage
under the provided relation_key.
The peers relation is identified by peer_relation_name, and defaults
to the first one found in metadata.yaml.
'''
# Most initialization is deferred, since invoking hook tools from
# the constructor makes testing hard.
self.key = relation_key
self.relname = peer_relation_name
hookenv.atstart(self.initialize)
# Ensure that handle() is called, without placing that burden on
# the charm author. They still need to do this manually if they
# are not using a hook framework.
hookenv.atstart(self.handle)
def initialize(self):
if self.requests is not None:
return # Already initialized.
assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+'
if self.relname is None:
self.relname = _implicit_peer_relation_name()
relids = hookenv.relation_ids(self.relname)
if relids:
self.relid = sorted(relids)[0]
# Load our state, from leadership, the peer relationship, and maybe
# local state as a fallback. Populates self.requests and self.grants.
self._load_state()
self._emit_state()
# Save our state if the hook completes successfully.
hookenv.atexit(self._save_state)
# Schedule release of granted locks for the end of the hook.
# This needs to be the last of our atexit callbacks to ensure
# it will be run first when the hook is complete, because there
# is no point mutating our state after it has been saved.
hookenv.atexit(self._release_granted)
def acquire(self, lock):
'''Acquire the named lock, non-blocking.
The lock may be granted immediately, or in a future hook.
Returns True if the lock has been granted. The lock will be
automatically released at the end of the hook in which it is
granted.
Do not mindlessly call this method, as it triggers a cascade of
hooks. For example, if you call acquire() every time in your
peers relation-changed hook you will end up with an infinite loop
of hooks. It should almost always be guarded by some condition.
'''
unit = hookenv.local_unit()
ts = self.requests[unit].get(lock)
if not ts:
# If there is no outstanding request on the peers relation,
# create one.
self.requests.setdefault(lock, {})
self.requests[unit][lock] = _timestamp()
self.msg('Requested {}'.format(lock))
# If the leader has granted the lock, yay.
if self.granted(lock):
self.msg('Acquired {}'.format(lock))
return True
# If the unit making the request also happens to be the
# leader, it must handle the request now. Even though the
# request has been stored on the peers relation, the peers
# relation-changed hook will not be triggered.
if hookenv.is_leader():
return self.grant(lock, unit)
return False # Can't acquire lock, yet. Maybe next hook.
def granted(self, lock):
'''Return True if a previously requested lock has been granted'''
unit = hookenv.local_unit()
ts = self.requests[unit].get(lock)
if ts and self.grants.get(unit, {}).get(lock) == ts:
return True
return False
def requested(self, lock):
'''Return True if we are in the queue for the lock'''
return lock in self.requests[hookenv.local_unit()]
def request_timestamp(self, lock):
'''Return the timestamp of our outstanding request for lock, or None.
Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute.
'''
ts = self.requests[hookenv.local_unit()].get(lock, None)
if ts is not None:
return datetime.strptime(ts, _timestamp_format)
def handle(self):
if not hookenv.is_leader():
return # Only the leader can grant requests.
self.msg('Leader handling coordinator requests')
# Clear our grants that have been released.
for unit in self.grants.keys():
for lock, grant_ts in list(self.grants[unit].items()):
req_ts = self.requests.get(unit, {}).get(lock)
if req_ts != grant_ts:
# The request timestamp does not match the granted
# timestamp. Several hooks on 'unit' may have run
# before the leader got a chance to make a decision,
# and 'unit' may have released its lock and attempted
# to reacquire it. This will change the timestamp,
# and we correctly revoke the old grant putting it
# to the end of the queue.
ts = datetime.strptime(self.grants[unit][lock],
_timestamp_format)
del self.grants[unit][lock]
self.released(unit, lock, ts)
# Grant locks
for unit in self.requests.keys():
for lock in self.requests[unit]:
self.grant(lock, unit)
def grant(self, lock, unit):
'''Maybe grant the lock to a unit.
The decision to grant the lock or not is made for $lock
by a corresponding method grant_$lock, which you may define
in a subclass. If no such method is defined, the default_grant
method is used. See Serial.default_grant() for details.
'''
if not hookenv.is_leader():
return False # Not the leader, so we cannot grant.
# Set of units already granted the lock.
granted = set()
for u in self.grants:
if lock in self.grants[u]:
granted.add(u)
if unit in granted:
return True # Already granted.
# Ordered list of units waiting for the lock.
reqs = set()
for u in self.requests:
if u in granted:
continue # In the granted set. Not wanted in the req list.
for _lock, ts in self.requests[u].items():
if _lock == lock:
reqs.add((ts, u))
queue = [t[1] for t in sorted(reqs)]
if unit not in queue:
return False # Unit has not requested the lock.
# Locate custom logic, or fallback to the default.
grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant)
if grant_func(lock, unit, granted, queue):
# Grant the lock.
self.msg('Leader grants {} to {}'.format(lock, unit))
self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
return True
return False
def released(self, unit, lock, timestamp):
'''Called on the leader when it has released a lock.
By default, does nothing but log messages. Override if you
need to perform additional housekeeping when a lock is released,
for example recording timestamps.
'''
interval = _utcnow() - timestamp
self.msg('Leader released {} from {}, held {}'.format(lock, unit,
interval))
def require(self, lock, guard_func, *guard_args, **guard_kw):
"""Decorate a function to be run only when a lock is acquired.
The lock is requested if the guard function returns True.
The decorated function is called if the lock has been granted.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if self.granted(lock):
self.msg('Granted {}'.format(lock))
return f(*args, **kw)
if guard_func(*guard_args, **guard_kw) and self.acquire(lock):
return f(*args, **kw)
return None
return wrapper
return decorator
def msg(self, msg):
'''Emit a message. Override to customize log spam.'''
hookenv.log('coordinator.{} {}'.format(self._name(), msg),
level=hookenv.INFO)
def _name(self):
return self.__class__.__name__
def _load_state(self):
self.msg('Loading state')
# All responses must be stored in the leadership settings.
# The leader cannot use local state, as a different unit may
# be leader next time. Which is fine, as the leadership
# settings are always available.
self.grants = json.loads(hookenv.leader_get(self.key) or '{}')
local_unit = hookenv.local_unit()
# All requests must be stored on the peers relation. This is
# the only channel units have to communicate with the leader.
# Even the leader needs to store its requests here, as a
# different unit may be leader by the time the request can be
# granted.
if self.relid is None:
# The peers relation is not available. Maybe we are early in
# the units's lifecycle. Maybe this unit is standalone.
# Fallback to using local state.
self.msg('No peer relation. Loading local state')
self.requests = {local_unit: self._load_local_state()}
else:
self.requests = self._load_peer_state()
if local_unit not in self.requests:
# The peers relation has just been joined. Update any state
# loaded from our peers with our local state.
self.msg('New peer relation. Merging local state')
self.requests[local_unit] = self._load_local_state()
def _emit_state(self):
# Emit this units lock status.
for lock in sorted(self.requests[hookenv.local_unit()].keys()):
if self.granted(lock):
self.msg('Granted {}'.format(lock))
else:
self.msg('Waiting on {}'.format(lock))
def _save_state(self):
self.msg('Publishing state')
if hookenv.is_leader():
# sort_keys to ensure stability.
raw = json.dumps(self.grants, sort_keys=True)
hookenv.leader_set({self.key: raw})
local_unit = hookenv.local_unit()
if self.relid is None:
# No peers relation yet. Fallback to local state.
self.msg('No peer relation. Saving local state')
self._save_local_state(self.requests[local_unit])
else:
# sort_keys to ensure stability.
raw = json.dumps(self.requests[local_unit], sort_keys=True)
hookenv.relation_set(self.relid, relation_settings={self.key: raw})
def _load_peer_state(self):
requests = {}
units = set(hookenv.related_units(self.relid))
units.add(hookenv.local_unit())
for unit in units:
raw = hookenv.relation_get(self.key, unit, self.relid)
if raw:
requests[unit] = json.loads(raw)
return requests
def _local_state_filename(self):
# Include the class name. We allow multiple BaseCoordinator
# subclasses to be instantiated, and they are singletons, so
# this avoids conflicts (unless someone creates and uses two
# BaseCoordinator subclasses with the same class name, so don't
# do that).
return '.charmhelpers.coordinator.{}'.format(self._name())
def _load_local_state(self):
fn = self._local_state_filename()
if os.path.exists(fn):
with open(fn, 'r') as f:
return json.load(f)
return {}
def _save_local_state(self, state):
fn = self._local_state_filename()
with open(fn, 'w') as f:
json.dump(state, f)
def _release_granted(self):
# At the end of every hook, release all locks granted to
# this unit. If a hook neglects to make use of what it
# requested, it will just have to make the request again.
# Implicit release is the only way this will work, as
# if the unit is standalone there may be no future triggers
# called to do a manual release.
unit = hookenv.local_unit()
for lock in list(self.requests[unit].keys()):
if self.granted(lock):
self.msg('Released local {} lock'.format(lock))
del self.requests[unit][lock]
class Serial(BaseCoordinator):
def default_grant(self, lock, unit, granted, queue):
'''Default logic to grant a lock to a unit. Unless overridden,
only one unit may hold the lock and it will be granted to the
earliest queued request.
To define custom logic for $lock, create a subclass and
define a grant_$lock method.
`unit` is the unit name making the request.
`granted` is the set of units already granted the lock. It will
never include `unit`. It may be empty.
`queue` is the list of units waiting for the lock, ordered by time
of request. It will always include `unit`, but `unit` is not
necessarily first.
Returns True if the lock should be granted to `unit`.
'''
return unit == queue[0] and not granted
def _implicit_peer_relation_name():
md = hookenv.metadata()
assert 'peers' in md, 'No peer relations in metadata.yaml'
return sorted(md['peers'].keys())[0]
# A human readable, sortable UTC timestamp format.
_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ'
def _utcnow(): # pragma: no cover
# This wrapper exists as mocking datetime methods is problematic.
return datetime.utcnow()
def _timestamp():
return _utcnow().strftime(_timestamp_format)

View File

@ -17,12 +17,11 @@
# Authors: # Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com> # Charm Helpers Developers <juju@lists.ubuntu.com>
from __future__ import print_function
import copy import copy
from distutils.version import LooseVersion from distutils.version import LooseVersion
from enum import Enum from enum import Enum
from functools import wraps from functools import wraps
from collections import namedtuple from collections import namedtuple, UserDict
import glob import glob
import os import os
import json import json
@ -36,12 +35,6 @@ from subprocess import CalledProcessError
from charmhelpers import deprecate from charmhelpers import deprecate
import six
if not six.PY3:
from UserDict import UserDict
else:
from collections import UserDict
CRITICAL = "CRITICAL" CRITICAL = "CRITICAL"
ERROR = "ERROR" ERROR = "ERROR"
@ -112,7 +105,7 @@ def log(message, level=None):
command = ['juju-log'] command = ['juju-log']
if level: if level:
command += ['-l', level] command += ['-l', level]
if not isinstance(message, six.string_types): if not isinstance(message, str):
message = repr(message) message = repr(message)
command += [message[:SH_MAX_ARG]] command += [message[:SH_MAX_ARG]]
# Missing juju-log should not cause failures in unit tests # Missing juju-log should not cause failures in unit tests
@ -132,7 +125,7 @@ def log(message, level=None):
def function_log(message): def function_log(message):
"""Write a function progress message""" """Write a function progress message"""
command = ['function-log'] command = ['function-log']
if not isinstance(message, six.string_types): if not isinstance(message, str):
message = repr(message) message = repr(message)
command += [message[:SH_MAX_ARG]] command += [message[:SH_MAX_ARG]]
# Missing function-log should not cause failures in unit tests # Missing function-log should not cause failures in unit tests
@ -445,12 +438,6 @@ def config(scope=None):
""" """
global _cache_config global _cache_config
config_cmd_line = ['config-get', '--all', '--format=json'] config_cmd_line = ['config-get', '--all', '--format=json']
try:
# JSON Decode Exception for Python3.5+
exc_json = json.decoder.JSONDecodeError
except AttributeError:
# JSON Decode Exception for Python2.7 through Python3.4
exc_json = ValueError
try: try:
if _cache_config is None: if _cache_config is None:
config_data = json.loads( config_data = json.loads(
@ -459,7 +446,7 @@ def config(scope=None):
if scope is not None: if scope is not None:
return _cache_config.get(scope) return _cache_config.get(scope)
return _cache_config return _cache_config
except (exc_json, UnicodeDecodeError) as e: except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e:
log('Unable to parse output from config-get: config_cmd_line="{}" ' log('Unable to parse output from config-get: config_cmd_line="{}" '
'message="{}"' 'message="{}"'
.format(config_cmd_line, str(e)), level=ERROR) .format(config_cmd_line, str(e)), level=ERROR)
@ -491,12 +478,26 @@ def relation_get(attribute=None, unit=None, rid=None, app=None):
raise raise
@cached
def _relation_set_accepts_file():
"""Return True if the juju relation-set command accepts a file.
Cache the result as it won't change during the execution of a hook, and
thus we can make relation_set() more efficient by only checking for the
first relation_set() call.
:returns: True if relation_set accepts a file.
:rtype: bool
:raises: subprocess.CalledProcessError if the check fails.
"""
return "--file" in subprocess.check_output(
["relation-set", "--help"], universal_newlines=True)
def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs): def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs):
"""Set relation information for the current unit""" """Set relation information for the current unit"""
relation_settings = relation_settings if relation_settings else {} relation_settings = relation_settings if relation_settings else {}
relation_cmd_line = ['relation-set'] relation_cmd_line = ['relation-set']
accepts_file = "--file" in subprocess.check_output(
relation_cmd_line + ["--help"], universal_newlines=True)
if app: if app:
relation_cmd_line.append('--app') relation_cmd_line.append('--app')
if relation_id is not None: if relation_id is not None:
@ -508,7 +509,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs):
# sites pass in things like dicts or numbers. # sites pass in things like dicts or numbers.
if value is not None: if value is not None:
settings[key] = "{}".format(value) settings[key] = "{}".format(value)
if accepts_file: if _relation_set_accepts_file():
# --file was introduced in Juju 1.23.2. Use it by default if # --file was introduced in Juju 1.23.2. Use it by default if
# available, since otherwise we'll break if the relation data is # available, since otherwise we'll break if the relation data is
# too big. Ideally we should tell relation-set to read the data from # too big. Ideally we should tell relation-set to read the data from
@ -1003,14 +1004,8 @@ def cmd_exists(cmd):
@cached @cached
@deprecate("moved to function_get()", log=log)
def action_get(key=None): def action_get(key=None):
""" """Gets the value of an action parameter, or all key/value param pairs."""
.. deprecated:: 0.20.7
Alias for :func:`function_get`.
Gets the value of an action parameter, or all key/value param pairs.
"""
cmd = ['action-get'] cmd = ['action-get']
if key is not None: if key is not None:
cmd.append(key) cmd.append(key)
@ -1020,8 +1015,12 @@ def action_get(key=None):
@cached @cached
@deprecate("moved to action_get()", log=log)
def function_get(key=None): def function_get(key=None):
"""Gets the value of an action parameter, or all key/value param pairs""" """
.. deprecated::
Gets the value of an action parameter, or all key/value param pairs.
"""
cmd = ['function-get'] cmd = ['function-get']
# Fallback for older charms. # Fallback for older charms.
if not cmd_exists('function-get'): if not cmd_exists('function-get'):
@ -1034,22 +1033,20 @@ def function_get(key=None):
return function_data return function_data
@deprecate("moved to function_set()", log=log)
def action_set(values): def action_set(values):
""" """Sets the values to be returned after the action finishes."""
.. deprecated:: 0.20.7
Alias for :func:`function_set`.
Sets the values to be returned after the action finishes.
"""
cmd = ['action-set'] cmd = ['action-set']
for k, v in list(values.items()): for k, v in list(values.items()):
cmd.append('{}={}'.format(k, v)) cmd.append('{}={}'.format(k, v))
subprocess.check_call(cmd) subprocess.check_call(cmd)
@deprecate("moved to action_set()", log=log)
def function_set(values): def function_set(values):
"""Sets the values to be returned after the function finishes""" """
.. deprecated::
Sets the values to be returned after the function finishes.
"""
cmd = ['function-set'] cmd = ['function-set']
# Fallback for older charms. # Fallback for older charms.
if not cmd_exists('function-get'): if not cmd_exists('function-get'):
@ -1060,12 +1057,8 @@ def function_set(values):
subprocess.check_call(cmd) subprocess.check_call(cmd)
@deprecate("moved to function_fail()", log=log)
def action_fail(message): def action_fail(message):
""" """
.. deprecated:: 0.20.7
Alias for :func:`function_fail`.
Sets the action status to failed and sets the error message. Sets the action status to failed and sets the error message.
The results set by action_set are preserved. The results set by action_set are preserved.
@ -1073,10 +1066,14 @@ def action_fail(message):
subprocess.check_call(['action-fail', message]) subprocess.check_call(['action-fail', message])
@deprecate("moved to action_fail()", log=log)
def function_fail(message): def function_fail(message):
"""Sets the function status to failed and sets the error message. """
.. deprecated::
Sets the function status to failed and sets the error message.
The results set by function_set are preserved.""" The results set by function_set are preserved.
"""
cmd = ['function-fail'] cmd = ['function-fail']
# Fallback for older charms. # Fallback for older charms.
if not cmd_exists('function-fail'): if not cmd_exists('function-fail'):

View File

@ -31,7 +31,6 @@ import subprocess
import hashlib import hashlib
import functools import functools
import itertools import itertools
import six
from contextlib import contextmanager from contextlib import contextmanager
from collections import OrderedDict, defaultdict from collections import OrderedDict, defaultdict
@ -263,7 +262,7 @@ def service(action, service_name, **kwargs):
cmd = ['systemctl', action, service_name] cmd = ['systemctl', action, service_name]
else: else:
cmd = ['service', service_name, action] cmd = ['service', service_name, action]
for key, value in six.iteritems(kwargs): for key, value in kwargs.items():
parameter = '%s=%s' % (key, value) parameter = '%s=%s' % (key, value)
cmd.append(parameter) cmd.append(parameter)
return subprocess.call(cmd) == 0 return subprocess.call(cmd) == 0
@ -289,7 +288,7 @@ def service_running(service_name, **kwargs):
if os.path.exists(_UPSTART_CONF.format(service_name)): if os.path.exists(_UPSTART_CONF.format(service_name)):
try: try:
cmd = ['status', service_name] cmd = ['status', service_name]
for key, value in six.iteritems(kwargs): for key, value in kwargs.items():
parameter = '%s=%s' % (key, value) parameter = '%s=%s' % (key, value)
cmd.append(parameter) cmd.append(parameter)
output = subprocess.check_output( output = subprocess.check_output(
@ -564,7 +563,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
with open(path, 'wb') as target: with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid) os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms) os.fchmod(target.fileno(), perms)
if six.PY3 and isinstance(content, six.string_types): if isinstance(content, str):
content = content.encode('UTF-8') content = content.encode('UTF-8')
target.write(content) target.write(content)
return return
@ -967,7 +966,7 @@ def get_bond_master(interface):
def list_nics(nic_type=None): def list_nics(nic_type=None):
"""Return a list of nics of given type(s)""" """Return a list of nics of given type(s)"""
if isinstance(nic_type, six.string_types): if isinstance(nic_type, str):
int_types = [nic_type] int_types = [nic_type]
else: else:
int_types = nic_type int_types = nic_type
@ -1081,8 +1080,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False):
try: try:
chown(full, uid, gid) chown(full, uid, gid)
except (IOError, OSError) as e: except (IOError, OSError) as e:
# Intended to ignore "file not found". Catching both to be # Intended to ignore "file not found".
# compatible with both Python 2.7 and 3.x.
if e.errno == errno.ENOENT: if e.errno == errno.ENOENT:
pass pass

View File

@ -17,8 +17,6 @@ import json
import inspect import inspect
from collections import Iterable, OrderedDict from collections import Iterable, OrderedDict
import six
from charmhelpers.core import host from charmhelpers.core import host
from charmhelpers.core import hookenv from charmhelpers.core import hookenv
@ -171,10 +169,7 @@ class ServiceManager(object):
if not units: if not units:
continue continue
remote_service = units[0].split('/')[0] remote_service = units[0].split('/')[0]
if six.PY2: argspec = inspect.getfullargspec(provider.provide_data)
argspec = inspect.getargspec(provider.provide_data)
else:
argspec = inspect.getfullargspec(provider.provide_data)
if len(argspec.args) > 1: if len(argspec.args) > 1:
data = provider.provide_data(remote_service, service_ready) data = provider.provide_data(remote_service, service_ready)
else: else:

View File

@ -179,7 +179,7 @@ class RequiredConfig(dict):
self.required_options = args self.required_options = args
self['config'] = hookenv.config() self['config'] = hookenv.config()
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
self.config = yaml.load(fp).get('options', {}) self.config = yaml.safe_load(fp).get('options', {})
def __bool__(self): def __bool__(self):
for option in self.required_options: for option in self.required_options:
@ -227,7 +227,7 @@ class StoredContext(dict):
if not os.path.isabs(file_name): if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name) file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'r') as file_stream: with open(file_name, 'r') as file_stream:
data = yaml.load(file_stream) data = yaml.safe_load(file_stream)
if not data: if not data:
raise OSError("%s is empty" % file_name) raise OSError("%s is empty" % file_name)
return data return data

View File

@ -15,7 +15,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import six
import re import re
TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'} TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'}
@ -27,8 +26,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY
Returns True if value translates to True otherwise False. Returns True if value translates to True otherwise False.
""" """
if isinstance(value, six.string_types): if isinstance(value, str):
value = six.text_type(value) value = str(value)
else: else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value) msg = "Unable to interpret non-string value '%s' as boolean" % (value)
raise ValueError(msg) raise ValueError(msg)
@ -61,8 +60,8 @@ def bytes_from_string(value):
'P': 5, 'P': 5,
'PB': 5, 'PB': 5,
} }
if isinstance(value, six.string_types): if isinstance(value, str):
value = six.text_type(value) value = str(value)
else: else:
msg = "Unable to interpret non-string value '%s' as bytes" % (value) msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg) raise ValueError(msg)

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import os import os
import sys
from charmhelpers.core import host from charmhelpers.core import host
from charmhelpers.core import hookenv from charmhelpers.core import hookenv
@ -43,9 +42,8 @@ def render(source, target, context, owner='root', group='root',
The rendered template will be written to the file as well as being returned The rendered template will be written to the file as well as being returned
as a string. as a string.
Note: Using this requires python-jinja2 or python3-jinja2; if it is not Note: Using this requires python3-jinja2; if it is not installed, calling
installed, calling this will attempt to use charmhelpers.fetch.apt_install this will attempt to use charmhelpers.fetch.apt_install to install it.
to install it.
""" """
try: try:
from jinja2 import FileSystemLoader, Environment, exceptions from jinja2 import FileSystemLoader, Environment, exceptions
@ -57,10 +55,7 @@ def render(source, target, context, owner='root', group='root',
'charmhelpers.fetch to install it', 'charmhelpers.fetch to install it',
level=hookenv.ERROR) level=hookenv.ERROR)
raise raise
if sys.version_info.major == 2: apt_install('python3-jinja2', fatal=True)
apt_install('python-jinja2', fatal=True)
else:
apt_install('python3-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions from jinja2 import FileSystemLoader, Environment, exceptions
if template_loader: if template_loader:

View File

@ -20,11 +20,7 @@ from charmhelpers.core.hookenv import (
log, log,
) )
import six from urllib.parse import urlparse, urlunparse
if six.PY3:
from urllib.parse import urlparse, urlunparse
else:
from urlparse import urlparse, urlunparse
# The order of this list is very important. Handlers should be listed in from # The order of this list is very important. Handlers should be listed in from
@ -134,14 +130,14 @@ def configure_sources(update=False,
sources = safe_load((config(sources_var) or '').strip()) or [] sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, six.string_types): if isinstance(sources, str):
sources = [sources] sources = [sources]
if keys is None: if keys is None:
for source in sources: for source in sources:
add_source(source, None) add_source(source, None)
else: else:
if isinstance(keys, six.string_types): if isinstance(keys, str):
keys = [keys] keys = [keys]
if len(sources) != len(keys): if len(sources) != len(keys):

View File

@ -26,26 +26,15 @@ from charmhelpers.payload.archive import (
) )
from charmhelpers.core.host import mkdir, check_hash from charmhelpers.core.host import mkdir, check_hash
import six from urllib.request import (
if six.PY3: build_opener, install_opener, urlopen, urlretrieve,
from urllib.request import ( HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
build_opener, install_opener, urlopen, urlretrieve, )
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, from urllib.parse import urlparse, urlunparse, parse_qs
) from urllib.error import URLError
from urllib.parse import urlparse, urlunparse, parse_qs
from urllib.error import URLError
else:
from urllib import urlretrieve
from urllib2 import (
build_opener, install_opener, urlopen,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
URLError
)
from urlparse import urlparse, urlunparse, parse_qs
def splituser(host): def splituser(host):
'''urllib.splituser(), but six's support of this seems broken'''
_userprog = re.compile('^(.*)@(.*)$') _userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host) match = _userprog.match(host)
if match: if match:
@ -54,7 +43,6 @@ def splituser(host):
def splitpasswd(user): def splitpasswd(user):
'''urllib.splitpasswd(), but six's support of this is missing'''
_passwdprog = re.compile('^([^:]*):(.*)$', re.S) _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
match = _passwdprog.match(user) match = _passwdprog.match(user)
if match: if match:
@ -150,10 +138,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
raise UnhandledSource(e.strerror) raise UnhandledSource(e.strerror)
options = parse_qs(url_parts.fragment) options = parse_qs(url_parts.fragment)
for key, value in options.items(): for key, value in options.items():
if not six.PY3: algorithms = hashlib.algorithms_available
algorithms = hashlib.algorithms
else:
algorithms = hashlib.algorithms_available
if key in algorithms: if key in algorithms:
if len(value) != 1: if len(value) != 1:
raise TypeError( raise TypeError(

View File

@ -15,7 +15,6 @@
import subprocess import subprocess
import os import os
import time import time
import six
import yum import yum
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile
@ -42,7 +41,7 @@ def install(packages, options=None, fatal=False):
if options is not None: if options is not None:
cmd.extend(options) cmd.extend(options)
cmd.append('install') cmd.append('install')
if isinstance(packages, six.string_types): if isinstance(packages, str):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -71,7 +70,7 @@ def update(fatal=False):
def purge(packages, fatal=False): def purge(packages, fatal=False):
"""Purge one or more packages.""" """Purge one or more packages."""
cmd = ['yum', '--assumeyes', 'remove'] cmd = ['yum', '--assumeyes', 'remove']
if isinstance(packages, six.string_types): if isinstance(packages, str):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -83,7 +82,7 @@ def yum_search(packages):
"""Search for a package.""" """Search for a package."""
output = {} output = {}
cmd = ['yum', 'search'] cmd = ['yum', 'search']
if isinstance(packages, six.string_types): if isinstance(packages, str):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)

View File

@ -15,8 +15,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import atexit import atexit
import sys import sys

View File

@ -16,7 +16,6 @@
# limitations under the License. # limitations under the License.
import os import os
import six
import subprocess import subprocess
import sys import sys
@ -40,10 +39,7 @@ def pip_execute(*args, **kwargs):
from pip import main as _pip_execute from pip import main as _pip_execute
except ImportError: except ImportError:
apt_update() apt_update()
if six.PY2: apt_install('python3-pip')
apt_install('python-pip')
else:
apt_install('python3-pip')
from pip import main as _pip_execute from pip import main as _pip_execute
_pip_execute(*args, **kwargs) _pip_execute(*args, **kwargs)
finally: finally:
@ -140,12 +136,8 @@ def pip_list():
def pip_create_virtualenv(path=None): def pip_create_virtualenv(path=None):
"""Create an isolated Python environment.""" """Create an isolated Python environment."""
if six.PY2: apt_install(['python3-virtualenv', 'virtualenv'])
apt_install('python-virtualenv') extra_flags = ['--python=python3']
extra_flags = []
else:
apt_install(['python3-virtualenv', 'virtualenv'])
extra_flags = ['--python=python3']
if path: if path:
venv_path = path venv_path = path

View File

@ -13,10 +13,8 @@
# limitations under the License. # limitations under the License.
from collections import OrderedDict from collections import OrderedDict
import os
import platform import platform
import re import re
import six
import subprocess import subprocess
import sys import sys
import time import time
@ -361,7 +359,7 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
cmd = ['apt-get', '--assume-yes'] cmd = ['apt-get', '--assume-yes']
cmd.extend(options) cmd.extend(options)
cmd.append('install') cmd.append('install')
if isinstance(packages, six.string_types): if isinstance(packages, str):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -413,7 +411,7 @@ def apt_purge(packages, fatal=False):
:raises: subprocess.CalledProcessError :raises: subprocess.CalledProcessError
""" """
cmd = ['apt-get', '--assume-yes', 'purge'] cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, six.string_types): if isinstance(packages, str):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -440,7 +438,7 @@ def apt_mark(packages, mark, fatal=False):
"""Flag one or more packages using apt-mark.""" """Flag one or more packages using apt-mark."""
log("Marking {} as {}".format(packages, mark)) log("Marking {} as {}".format(packages, mark))
cmd = ['apt-mark', mark] cmd = ['apt-mark', mark]
if isinstance(packages, six.string_types): if isinstance(packages, str):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -485,10 +483,7 @@ def import_key(key):
if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
'-----END PGP PUBLIC KEY BLOCK-----' in key): '-----END PGP PUBLIC KEY BLOCK-----' in key):
log("Writing provided PGP key in the binary format", level=DEBUG) log("Writing provided PGP key in the binary format", level=DEBUG)
if six.PY3: key_bytes = key.encode('utf-8')
key_bytes = key.encode('utf-8')
else:
key_bytes = key
key_name = _get_keyid_by_gpg_key(key_bytes) key_name = _get_keyid_by_gpg_key(key_bytes)
key_gpg = _dearmor_gpg_key(key_bytes) key_gpg = _dearmor_gpg_key(key_bytes)
_write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
@ -528,9 +523,8 @@ def _get_keyid_by_gpg_key(key_material):
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE) stdin=subprocess.PIPE)
out, err = ps.communicate(input=key_material) out, err = ps.communicate(input=key_material)
if six.PY3: out = out.decode('utf-8')
out = out.decode('utf-8') err = err.decode('utf-8')
err = err.decode('utf-8')
if 'gpg: no valid OpenPGP data found.' in err: if 'gpg: no valid OpenPGP data found.' in err:
raise GPGKeyError('Invalid GPG key material provided') raise GPGKeyError('Invalid GPG key material provided')
# from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
@ -588,8 +582,7 @@ def _dearmor_gpg_key(key_asc):
stdin=subprocess.PIPE) stdin=subprocess.PIPE)
out, err = ps.communicate(input=key_asc) out, err = ps.communicate(input=key_asc)
# no need to decode output as it is binary (invalid utf-8), only error # no need to decode output as it is binary (invalid utf-8), only error
if six.PY3: err = err.decode('utf-8')
err = err.decode('utf-8')
if 'gpg: no valid OpenPGP data found.' in err: if 'gpg: no valid OpenPGP data found.' in err:
raise GPGKeyError('Invalid GPG key material. Check your network setup' raise GPGKeyError('Invalid GPG key material. Check your network setup'
' (MTU, routing, DNS) and/or proxy server settings' ' (MTU, routing, DNS) and/or proxy server settings'
@ -693,7 +686,7 @@ def add_source(source, key=None, fail_invalid=False):
]) ])
if source is None: if source is None:
source = '' source = ''
for r, fn in six.iteritems(_mapping): for r, fn in _mapping.items():
m = re.match(r, source) m = re.match(r, source)
if m: if m:
if key: if key:
@ -726,7 +719,7 @@ def _add_proposed():
""" """
release = get_distrib_codename() release = get_distrib_codename()
arch = platform.machine() arch = platform.machine()
if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): if arch not in ARCH_TO_PROPOSED_POCKET.keys():
raise SourceConfigError("Arch {} not supported for (distro-)proposed" raise SourceConfigError("Arch {} not supported for (distro-)proposed"
.format(arch)) .format(arch))
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
@ -913,9 +906,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
kwargs = {} kwargs = {}
if quiet: if quiet:
devnull = os.devnull if six.PY2 else subprocess.DEVNULL kwargs['stdout'] = subprocess.DEVNULL
kwargs['stdout'] = devnull kwargs['stderr'] = subprocess.DEVNULL
kwargs['stderr'] = devnull
if not retry_message: if not retry_message:
retry_message = "Failed executing '{}'".format(" ".join(cmd)) retry_message = "Failed executing '{}'".format(" ".join(cmd))
@ -957,9 +949,8 @@ def _run_apt_command(cmd, fatal=False, quiet=False):
else: else:
kwargs = {} kwargs = {}
if quiet: if quiet:
devnull = os.devnull if six.PY2 else subprocess.DEVNULL kwargs['stdout'] = subprocess.DEVNULL
kwargs['stdout'] = devnull kwargs['stderr'] = subprocess.DEVNULL
kwargs['stderr'] = devnull
subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs) subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs)
@ -989,7 +980,7 @@ def get_installed_version(package):
Version object Version object
""" """
cache = apt_cache() cache = apt_cache()
dpkg_result = cache._dpkg_list([package]).get(package, {}) dpkg_result = cache.dpkg_list([package]).get(package, {})
current_ver = None current_ver = None
installed_version = dpkg_result.get('version') installed_version = dpkg_result.get('version')

View File

@ -40,6 +40,9 @@ import os
import subprocess import subprocess
import sys import sys
from charmhelpers import deprecate
from charmhelpers.core.hookenv import log
class _container(dict): class _container(dict):
"""Simple container for attributes.""" """Simple container for attributes."""
@ -79,7 +82,7 @@ class Cache(object):
apt_result = self._apt_cache_show([package])[package] apt_result = self._apt_cache_show([package])[package]
apt_result['name'] = apt_result.pop('package') apt_result['name'] = apt_result.pop('package')
pkg = Package(apt_result) pkg = Package(apt_result)
dpkg_result = self._dpkg_list([package]).get(package, {}) dpkg_result = self.dpkg_list([package]).get(package, {})
current_ver = None current_ver = None
installed_version = dpkg_result.get('version') installed_version = dpkg_result.get('version')
if installed_version: if installed_version:
@ -88,9 +91,29 @@ class Cache(object):
pkg.architecture = dpkg_result.get('architecture') pkg.architecture = dpkg_result.get('architecture')
return pkg return pkg
@deprecate("use dpkg_list() instead.", "2022-05", log=log)
def _dpkg_list(self, packages): def _dpkg_list(self, packages):
return self.dpkg_list(packages)
def dpkg_list(self, packages):
"""Get data from system dpkg database for package. """Get data from system dpkg database for package.
Note that this method is also useful for querying package names
containing wildcards, for example
apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*'])
may return
{
'nvidia-vgpu-ubuntu-470': {
'name': 'nvidia-vgpu-ubuntu-470',
'version': '470.68',
'architecture': 'amd64',
'description': 'NVIDIA vGPU driver - version 470.68'
}
}
:param packages: Packages to get data from :param packages: Packages to get data from
:type packages: List[str] :type packages: List[str]
:returns: Structured data about installed packages, keys like :returns: Structured data about installed packages, keys like