Sync/rebuild for Dalmatian/Epoxy updates
Refresh and rebuild charm for awareness of Dalmatian and Epoxy Cloud Archive releases. Change-Id: Id20db247085515c7a47c6aaad559993e3344c49f
This commit is contained in:
parent
a86390aeab
commit
05a43b057f
@ -14,30 +14,15 @@
|
||||
|
||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||
# only standard libraries.
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
try:
|
||||
import six # NOQA:F401
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||
import six # NOQA:F401
|
||||
|
||||
try:
|
||||
import yaml # NOQA:F401
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||
import yaml # NOQA:F401
|
||||
|
||||
|
||||
|
@ -16,9 +16,6 @@ import inspect
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
import six
|
||||
from six.moves import zip
|
||||
|
||||
import charmhelpers.core.unitdata
|
||||
|
||||
|
||||
@ -149,10 +146,7 @@ class CommandLine(object):
|
||||
def run(self):
|
||||
"Run cli, processing arguments and executing subcommands."
|
||||
arguments = self.argument_parser.parse_args()
|
||||
if six.PY2:
|
||||
argspec = inspect.getargspec(arguments.func)
|
||||
else:
|
||||
argspec = inspect.getfullargspec(arguments.func)
|
||||
argspec = inspect.getfullargspec(arguments.func)
|
||||
vargs = []
|
||||
for arg in argspec.args:
|
||||
vargs.append(getattr(arguments, arg))
|
||||
@ -177,10 +171,7 @@ def describe_arguments(func):
|
||||
Analyze a function's signature and return a data structure suitable for
|
||||
passing in as arguments to an argparse parser's add_argument() method."""
|
||||
|
||||
if six.PY2:
|
||||
argspec = inspect.getargspec(func)
|
||||
else:
|
||||
argspec = inspect.getfullargspec(func)
|
||||
argspec = inspect.getfullargspec(func)
|
||||
# we should probably raise an exception somewhere if func includes **kwargs
|
||||
if argspec.defaults:
|
||||
positional_args = argspec.args[:-len(argspec.defaults)]
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
import glob
|
||||
import grp
|
||||
import json
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
@ -28,7 +29,9 @@ import subprocess
|
||||
import yaml
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
application_name,
|
||||
config,
|
||||
ERROR,
|
||||
hook_name,
|
||||
local_unit,
|
||||
log,
|
||||
@ -174,7 +177,8 @@ define service {{
|
||||
if os.path.exists(os.path.join(path, parts[0])):
|
||||
command = os.path.join(path, parts[0])
|
||||
if len(parts) > 1:
|
||||
command += " " + " ".join(parts[1:])
|
||||
safe_args = [shlex.quote(arg) for arg in parts[1:]]
|
||||
command += " " + " ".join(safe_args)
|
||||
return command
|
||||
log('Check command not found: {}'.format(parts[0]))
|
||||
return ''
|
||||
@ -414,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
|
||||
:param str unit_name: Unit name to use in check description
|
||||
:param bool immediate_check: For sysv init, run the service check immediately
|
||||
"""
|
||||
# check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
|
||||
# just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
|
||||
for rid in relation_ids("ha"):
|
||||
ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
|
||||
if ha_resources:
|
||||
try:
|
||||
ha_resources_parsed = json.loads(ha_resources)
|
||||
except ValueError as e:
|
||||
log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
|
||||
raise
|
||||
if "lsb:haproxy" in ha_resources_parsed.values():
|
||||
if "haproxy" in services:
|
||||
log("removed check_haproxy. This service will be monitored by check_crm")
|
||||
services.remove("haproxy")
|
||||
for svc in services:
|
||||
# Don't add a check for these services from neutron-gateway
|
||||
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
||||
@ -520,3 +538,39 @@ def remove_deprecated_check(nrpe, deprecated_services):
|
||||
for dep_svc in deprecated_services:
|
||||
log('Deprecated service: {}'.format(dep_svc))
|
||||
nrpe.remove_check(shortname=dep_svc)
|
||||
|
||||
|
||||
def add_deferred_restarts_check(nrpe):
|
||||
"""
|
||||
Add NRPE check for services with deferred restarts.
|
||||
|
||||
:param NRPE nrpe: NRPE object to add check to
|
||||
"""
|
||||
unit_name = local_unit().replace('/', '-')
|
||||
shortname = unit_name + '_deferred_restarts'
|
||||
check_cmd = 'check_deferred_restarts.py --application {}'.format(
|
||||
application_name())
|
||||
|
||||
log('Adding deferred restarts nrpe check: {}'.format(shortname))
|
||||
nrpe.add_check(
|
||||
shortname=shortname,
|
||||
description='Check deferred service restarts {}'.format(unit_name),
|
||||
check_cmd=check_cmd)
|
||||
|
||||
|
||||
def remove_deferred_restarts_check(nrpe):
|
||||
"""
|
||||
Remove NRPE check for services with deferred service restarts.
|
||||
|
||||
:param NRPE nrpe: NRPE object to remove check from
|
||||
"""
|
||||
unit_name = local_unit().replace('/', '-')
|
||||
shortname = unit_name + '_deferred_restarts'
|
||||
check_cmd = 'check_deferred_restarts.py --application {}'.format(
|
||||
application_name())
|
||||
|
||||
log('Removing deferred restarts nrpe check: {}'.format(shortname))
|
||||
nrpe.remove_check(
|
||||
shortname=shortname,
|
||||
description='Check deferred service restarts {}'.format(unit_name),
|
||||
check_cmd=check_cmd)
|
||||
|
@ -19,7 +19,6 @@ import sys
|
||||
import platform
|
||||
import os
|
||||
import glob
|
||||
import six
|
||||
|
||||
# from string import upper
|
||||
|
||||
@ -55,10 +54,7 @@ try:
|
||||
import MySQLdb
|
||||
except ImportError:
|
||||
apt_update(fatal=True)
|
||||
if six.PY2:
|
||||
apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
|
||||
else:
|
||||
apt_install(filter_installed_packages(['python3-mysqldb']), fatal=True)
|
||||
apt_install(filter_installed_packages(['python3-mysqldb']), fatal=True)
|
||||
import MySQLdb
|
||||
|
||||
|
||||
@ -346,9 +342,9 @@ class MySQLHelper(object):
|
||||
# changes to root-password were not supported) the user changed the
|
||||
# password, so leader-get is more reliable source than
|
||||
# config.previous('root-password').
|
||||
rel_username = None if username == 'root' else username
|
||||
if not current_password:
|
||||
current_password = self.get_mysql_password(rel_username)
|
||||
current_password = self.get_mysql_password(
|
||||
None if username == 'root' else username)
|
||||
|
||||
# password that needs to be set
|
||||
new_passwd = password
|
||||
@ -356,12 +352,62 @@ class MySQLHelper(object):
|
||||
# update password for all users (e.g. root@localhost, root@::1, etc)
|
||||
try:
|
||||
self.connect(user=username, password=current_password)
|
||||
cursor = self.connection.cursor()
|
||||
except MySQLdb.OperationalError as ex:
|
||||
raise MySQLSetPasswordError(('Cannot connect using password in '
|
||||
'leader settings (%s)') % ex, ex)
|
||||
self.set_mysql_password_using_current_connection(
|
||||
username, new_passwd)
|
||||
|
||||
def set_mysql_password_using_current_connection(
|
||||
self, username, new_passwd, hosts=None):
|
||||
"""Update a mysql password using the current connection.
|
||||
|
||||
Update the password for a username using the current connection in
|
||||
`self.connection`. It is expected that the connect is a root
|
||||
connection that can change the password for any user. The leader
|
||||
settings (if the unit is the leader) are also changed to match the
|
||||
password in the database.
|
||||
|
||||
Note: passwords have to be changed on each mysql unit as they are not
|
||||
propagated using replication in clusters.
|
||||
|
||||
:param username: the username to change the password for.
|
||||
:type username: str
|
||||
:param new_passwd: the new password for the user.
|
||||
:type new_passwd: str
|
||||
:param hosts: optional list of hosts.
|
||||
:type hosts: Optional[List[str]]
|
||||
:raises MySQLSetPasswordError: if the password can't be changed.
|
||||
"""
|
||||
# update the password using the self.connection
|
||||
self._update_password(username, new_passwd, hosts=hosts)
|
||||
|
||||
# check the password was changed, only if it is local (i.e. no hosts
|
||||
# are assigned) as otherwise this will fail.
|
||||
if not hosts:
|
||||
self._check_user_can_connect(username, new_passwd)
|
||||
|
||||
# Update the leader settings (if leader) with the new password.
|
||||
# It's a no-op on a non-leader
|
||||
self._update_leader_settings(username, new_passwd)
|
||||
|
||||
def _update_password(self, username, new_passwd, hosts=None):
|
||||
"""Update the password for a user using the existing self.connection
|
||||
|
||||
:param username: the user to connect for
|
||||
:type username: str
|
||||
:param password: the password to use.
|
||||
:type password: str
|
||||
:param hosts: optional list of hosts.
|
||||
:type hosts: Optional[List[str]]
|
||||
:raises MySQLSetPasswordError: if the user can't connect.
|
||||
"""
|
||||
if not hosts:
|
||||
hosts = None
|
||||
user_sub = "%s" if hosts is None else "%s@%s"
|
||||
cursor = None
|
||||
try:
|
||||
cursor = self.connection.cursor()
|
||||
# NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account
|
||||
# fails when using SET PASSWORD so using UPDATE against the
|
||||
# mysql.user table is needed, but changes to this table are not
|
||||
@ -371,40 +417,67 @@ class MySQLHelper(object):
|
||||
release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
|
||||
if release < 'bionic':
|
||||
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = "
|
||||
"PASSWORD( %s ) WHERE user = %s;")
|
||||
"PASSWORD( %s ) WHERE user = {};"
|
||||
.format(user_sub))
|
||||
else:
|
||||
# PXC 5.7 (introduced in Bionic) uses authentication_string
|
||||
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET "
|
||||
"authentication_string = "
|
||||
"PASSWORD( %s ) WHERE user = %s;")
|
||||
cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username))
|
||||
"PASSWORD( %s ) WHERE user = {};"
|
||||
.format(user_sub))
|
||||
if hosts is None:
|
||||
cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username))
|
||||
else:
|
||||
for host in hosts:
|
||||
cursor.execute(SQL_UPDATE_PASSWD,
|
||||
(new_passwd, username, host))
|
||||
cursor.execute('FLUSH PRIVILEGES;')
|
||||
self.connection.commit()
|
||||
except MySQLdb.OperationalError as ex:
|
||||
raise MySQLSetPasswordError('Cannot update password: %s' % str(ex),
|
||||
ex)
|
||||
finally:
|
||||
cursor.close()
|
||||
if cursor is not None:
|
||||
cursor.close()
|
||||
|
||||
# check the password was changed
|
||||
def _check_user_can_connect(self, username, password):
|
||||
"""Verify that a user can connect using a password.
|
||||
|
||||
:param username: the user to connect for
|
||||
:type username: str
|
||||
:param password: the password to use.
|
||||
:type password: str
|
||||
:raises MySQLSetPasswordError: if the user can't connect.
|
||||
"""
|
||||
try:
|
||||
self.connect(user=username, password=new_passwd)
|
||||
self.connect(user=username, password=password)
|
||||
self.execute('select 1;')
|
||||
except MySQLdb.OperationalError as ex:
|
||||
raise MySQLSetPasswordError(('Cannot connect using new password: '
|
||||
'%s') % str(ex), ex)
|
||||
|
||||
def _update_leader_settings(self, username, password):
|
||||
"""Update the leader settings for the username & password.
|
||||
|
||||
This is a no-op if not the leader. If the username hasn't previous had
|
||||
the password set, then this does not store the new password.
|
||||
|
||||
:param username: the user to connect for
|
||||
:type username: str
|
||||
:param password: the password to use.
|
||||
:type password: str
|
||||
"""
|
||||
if not is_leader():
|
||||
log('Only the leader can set a new password in the relation',
|
||||
level=DEBUG)
|
||||
return
|
||||
|
||||
for key in self.passwd_keys(rel_username):
|
||||
for key in self.passwd_keys(None if username == 'root' else username):
|
||||
_password = leader_get(key)
|
||||
if _password:
|
||||
log('Updating password for %s (%s)' % (key, rel_username),
|
||||
log('Updating password for %s (%s)' % (key, username),
|
||||
level=DEBUG)
|
||||
leader_set(settings={key: new_passwd})
|
||||
leader_set(settings={key: password})
|
||||
|
||||
def set_mysql_root_password(self, password, current_password=None):
|
||||
"""Update mysql root password changing the leader settings
|
||||
@ -652,6 +725,17 @@ class MySQLConfigHelper(object):
|
||||
|
||||
return innodb_buffer_pool_size
|
||||
|
||||
def get_group_replication_message_cache_size(self):
|
||||
"""Get value for group_replication_message_cache_size.
|
||||
|
||||
:returns: Numeric value for group_replication_message_cache_size
|
||||
None if not set.
|
||||
:rtype: Union[None, int]
|
||||
"""
|
||||
gr_message_cache_size = config_get('group-replication-message-cache-size')
|
||||
if gr_message_cache_size:
|
||||
return self.human_to_bytes(gr_message_cache_size)
|
||||
|
||||
|
||||
class PerconaClusterHelper(MySQLConfigHelper):
|
||||
"""Percona-cluster specific configuration helper."""
|
||||
@ -716,6 +800,28 @@ class MySQL8Helper(MySQLHelper):
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def user_host_list(self):
|
||||
"""Return a list of (user, host) tuples from the database.
|
||||
|
||||
This requires that self.connection has the permissions to perform the
|
||||
action.
|
||||
|
||||
:returns: list of (user, host) tuples.
|
||||
:rtype: List[Tuple[str, str]]
|
||||
"""
|
||||
SQL_USER_LIST = "SELECT user, host from mysql.user"
|
||||
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute(SQL_USER_LIST)
|
||||
return [(i[0], i[1]) for i in cursor.fetchall()]
|
||||
except MySQLdb.OperationalError as e:
|
||||
log("Couldn't return user list: reason {}".format(str(e)),
|
||||
"WARNING")
|
||||
finally:
|
||||
cursor.close()
|
||||
return []
|
||||
|
||||
def create_user(self, db_user, remote_ip, password):
|
||||
|
||||
SQL_USER_CREATE = (
|
||||
@ -729,12 +835,50 @@ class MySQL8Helper(MySQLHelper):
|
||||
remote_ip=remote_ip,
|
||||
password=password)
|
||||
)
|
||||
except MySQLdb._exceptions.OperationalError:
|
||||
except MySQLdb.OperationalError:
|
||||
log("DB user {} already exists.".format(db_user),
|
||||
"WARNING")
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def _update_password(self, username, new_passwd, hosts=None):
|
||||
"""Update the password for a user using the existing self.connection
|
||||
|
||||
:param username: the user to connect for
|
||||
:type username: str
|
||||
:param password: the password to use.
|
||||
:type password: str
|
||||
:param hosts: optional list of hosts.
|
||||
:type hosts: Optional[List[str]]
|
||||
:raises MySQLSetPasswordError: if the user can't connect.
|
||||
"""
|
||||
if not hosts:
|
||||
hosts = None
|
||||
user_sub = "%s" if hosts is None else "%s@%s"
|
||||
cursor = None
|
||||
try:
|
||||
cursor = self.connection.cursor()
|
||||
SQL_UPDATE_PASSWD = ("ALTER USER {} IDENTIFIED BY %s;"
|
||||
.format(user_sub))
|
||||
if hosts is None:
|
||||
log("Updating password for username: {}".format(username),
|
||||
"DEBUG")
|
||||
cursor.execute(SQL_UPDATE_PASSWD, (username, new_passwd))
|
||||
else:
|
||||
for host in hosts:
|
||||
log("Updating password for username: {}".format(username),
|
||||
"DEBUG")
|
||||
cursor.execute(SQL_UPDATE_PASSWD,
|
||||
(username, host, new_passwd))
|
||||
cursor.execute('FLUSH PRIVILEGES;')
|
||||
self.connection.commit()
|
||||
except MySQLdb.OperationalError as ex:
|
||||
raise MySQLSetPasswordError('Cannot update password: %s' % str(ex),
|
||||
ex)
|
||||
finally:
|
||||
if cursor is not None:
|
||||
cursor.close()
|
||||
|
||||
def create_router_grant(self, db_user, remote_ip, password):
|
||||
|
||||
# Make sure the user exists
|
||||
|
@ -32,8 +32,6 @@ import time
|
||||
|
||||
from socket import gethostname as get_unit_hostname
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
relation_ids,
|
||||
@ -125,16 +123,16 @@ def is_crm_dc():
|
||||
"""
|
||||
cmd = ['crm', 'status']
|
||||
try:
|
||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
if not isinstance(status, six.text_type):
|
||||
status = six.text_type(status, "utf-8")
|
||||
status = subprocess.check_output(
|
||||
cmd, stderr=subprocess.STDOUT).decode('utf-8')
|
||||
except subprocess.CalledProcessError as ex:
|
||||
raise CRMDCNotFound(str(ex))
|
||||
|
||||
current_dc = ''
|
||||
for line in status.split('\n'):
|
||||
if line.startswith('Current DC'):
|
||||
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
|
||||
# Current DC: juju-lytrusty-machine-2 (168108163)
|
||||
# - partition with quorum
|
||||
current_dc = line.split(':')[1].split()[0]
|
||||
if current_dc == get_unit_hostname():
|
||||
return True
|
||||
@ -158,9 +156,8 @@ def is_crm_leader(resource, retry=False):
|
||||
return is_crm_dc()
|
||||
cmd = ['crm', 'resource', 'show', resource]
|
||||
try:
|
||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
if not isinstance(status, six.text_type):
|
||||
status = six.text_type(status, "utf-8")
|
||||
status = subprocess.check_output(
|
||||
cmd, stderr=subprocess.STDOUT).decode('utf-8')
|
||||
except subprocess.CalledProcessError:
|
||||
status = None
|
||||
|
||||
@ -224,6 +221,13 @@ def https():
|
||||
return True
|
||||
if config_get('ssl_cert') and config_get('ssl_key'):
|
||||
return True
|
||||
# Local import to avoid ciruclar dependency.
|
||||
import charmhelpers.contrib.openstack.cert_utils as cert_utils
|
||||
if (
|
||||
cert_utils.get_certificate_request() and not
|
||||
cert_utils.get_requests_for_local_unit("certificates")
|
||||
):
|
||||
return False
|
||||
for r_id in relation_ids('certificates'):
|
||||
for unit in relation_list(r_id):
|
||||
ca = relation_get('ca', rid=r_id, unit=unit)
|
||||
@ -327,7 +331,7 @@ def valid_hacluster_config():
|
||||
'''
|
||||
vip = config_get('vip')
|
||||
dns = config_get('dns-ha')
|
||||
if not(bool(vip) ^ bool(dns)):
|
||||
if not (bool(vip) ^ bool(dns)):
|
||||
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
|
||||
'use high availability')
|
||||
status_set('blocked', msg)
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import six
|
||||
import subprocess
|
||||
|
||||
|
||||
@ -95,9 +94,7 @@ class ApacheConfContext(object):
|
||||
settings = utils.get_settings('apache')
|
||||
ctxt = settings['hardening']
|
||||
|
||||
out = subprocess.check_output(['apache2', '-v'])
|
||||
if six.PY3:
|
||||
out = out.decode('utf-8')
|
||||
out = subprocess.check_output(['apache2', '-v']).decode('utf-8')
|
||||
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
|
||||
out).group(1)
|
||||
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
|
||||
|
@ -15,8 +15,6 @@
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
INFO,
|
||||
@ -35,7 +33,7 @@ class DisabledModuleAudit(BaseAudit):
|
||||
def __init__(self, modules):
|
||||
if modules is None:
|
||||
self.modules = []
|
||||
elif isinstance(modules, six.string_types):
|
||||
elif isinstance(modules, str):
|
||||
self.modules = [modules]
|
||||
else:
|
||||
self.modules = modules
|
||||
@ -68,9 +66,7 @@ class DisabledModuleAudit(BaseAudit):
|
||||
@staticmethod
|
||||
def _get_loaded_modules():
|
||||
"""Returns the modules which are enabled in Apache."""
|
||||
output = subprocess.check_output(['apache2ctl', '-M'])
|
||||
if six.PY3:
|
||||
output = output.decode('utf-8')
|
||||
output = subprocess.check_output(['apache2ctl', '-M']).decode('utf-8')
|
||||
modules = []
|
||||
for line in output.splitlines():
|
||||
# Each line of the enabled module output looks like:
|
||||
|
@ -12,9 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import # required for external apt import
|
||||
from six import string_types
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_cache,
|
||||
apt_purge
|
||||
@ -51,7 +48,7 @@ class RestrictedPackages(BaseAudit):
|
||||
|
||||
def __init__(self, pkgs, **kwargs):
|
||||
super(RestrictedPackages, self).__init__(**kwargs)
|
||||
if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
|
||||
if isinstance(pkgs, str) or not hasattr(pkgs, '__iter__'):
|
||||
self.pkgs = pkgs.split()
|
||||
else:
|
||||
self.pkgs = pkgs
|
||||
|
@ -23,7 +23,6 @@ from subprocess import (
|
||||
check_call,
|
||||
)
|
||||
from traceback import format_exc
|
||||
from six import string_types
|
||||
from stat import (
|
||||
S_ISGID,
|
||||
S_ISUID
|
||||
@ -63,7 +62,7 @@ class BaseFileAudit(BaseAudit):
|
||||
"""
|
||||
super(BaseFileAudit, self).__init__(*args, **kwargs)
|
||||
self.always_comply = always_comply
|
||||
if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
|
||||
if isinstance(paths, str) or not hasattr(paths, '__iter__'):
|
||||
self.paths = [paths]
|
||||
else:
|
||||
self.paths = paths
|
||||
|
@ -12,8 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import six
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
@ -53,18 +51,17 @@ def harden(overrides=None):
|
||||
overrides = []
|
||||
|
||||
def _harden_inner1(f):
|
||||
# As this has to be py2.7 compat, we can't use nonlocal. Use a trick
|
||||
# to capture the dictionary that can then be updated.
|
||||
_logged = {'done': False}
|
||||
_logged = False
|
||||
|
||||
def _harden_inner2(*args, **kwargs):
|
||||
# knock out hardening via a config var; normally it won't get
|
||||
# disabled.
|
||||
nonlocal _logged
|
||||
if _DISABLE_HARDENING_FOR_UNIT_TEST:
|
||||
return f(*args, **kwargs)
|
||||
if not _logged['done']:
|
||||
if not _logged:
|
||||
log("Hardening function '%s'" % (f.__name__), level=DEBUG)
|
||||
_logged['done'] = True
|
||||
_logged = True
|
||||
RUN_CATALOG = OrderedDict([('os', run_os_checks),
|
||||
('ssh', run_ssh_checks),
|
||||
('mysql', run_mysql_checks),
|
||||
@ -74,7 +71,7 @@ def harden(overrides=None):
|
||||
if enabled:
|
||||
modules_to_run = []
|
||||
# modules will always be performed in the following order
|
||||
for module, func in six.iteritems(RUN_CATALOG):
|
||||
for module, func in RUN_CATALOG.items():
|
||||
if module in enabled:
|
||||
enabled.remove(module)
|
||||
modules_to_run.append(func)
|
||||
|
@ -12,8 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from six import string_types
|
||||
|
||||
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
|
||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
||||
from charmhelpers.contrib.hardening import utils
|
||||
@ -41,7 +39,7 @@ class LoginContext(object):
|
||||
# a string assume it to be octal and turn it into an octal
|
||||
# string.
|
||||
umask = settings['environment']['umask']
|
||||
if not isinstance(umask, string_types):
|
||||
if not isinstance(umask, str):
|
||||
umask = '%s' % oct(umask)
|
||||
|
||||
ctxt = {
|
||||
|
@ -15,7 +15,6 @@
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import six
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
@ -183,9 +182,9 @@ class SysCtlHardeningContext(object):
|
||||
|
||||
ctxt['sysctl'][key] = d[2] or None
|
||||
|
||||
# Translate for python3
|
||||
return {'sysctl_settings':
|
||||
[(k, v) for k, v in six.iteritems(ctxt['sysctl'])]}
|
||||
return {
|
||||
'sysctl_settings': [(k, v) for k, v in ctxt['sysctl'].items()]
|
||||
}
|
||||
|
||||
|
||||
class SysctlConf(TemplatedFile):
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import six
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
@ -82,6 +81,6 @@ class MySQLConfContext(object):
|
||||
"""
|
||||
def __call__(self):
|
||||
settings = utils.get_settings('mysql')
|
||||
# Translate for python3
|
||||
return {'mysql_settings':
|
||||
[(k, v) for k, v in six.iteritems(settings['security'])]}
|
||||
return {
|
||||
'mysql_settings': [(k, v) for k, v in settings['security'].items()]
|
||||
}
|
||||
|
@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import six
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
@ -27,10 +26,7 @@ except ImportError:
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.fetch import apt_update
|
||||
apt_update(fatal=True)
|
||||
if six.PY2:
|
||||
apt_install('python-jinja2', fatal=True)
|
||||
else:
|
||||
apt_install('python3-jinja2', fatal=True)
|
||||
apt_install('python3-jinja2', fatal=True)
|
||||
from jinja2 import FileSystemLoader, Environment
|
||||
|
||||
|
||||
|
@ -16,7 +16,6 @@ import glob
|
||||
import grp
|
||||
import os
|
||||
import pwd
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
@ -91,7 +90,7 @@ def _apply_overrides(settings, overrides, schema):
|
||||
:returns: dictionary of modules config with user overrides applied.
|
||||
"""
|
||||
if overrides:
|
||||
for k, v in six.iteritems(overrides):
|
||||
for k, v in overrides.items():
|
||||
if k in schema:
|
||||
if schema[k] is None:
|
||||
settings[k] = v
|
||||
|
@ -15,8 +15,8 @@
|
||||
import glob
|
||||
import re
|
||||
import subprocess
|
||||
import six
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
from functools import partial
|
||||
|
||||
@ -39,20 +39,14 @@ try:
|
||||
import netifaces
|
||||
except ImportError:
|
||||
apt_update(fatal=True)
|
||||
if six.PY2:
|
||||
apt_install('python-netifaces', fatal=True)
|
||||
else:
|
||||
apt_install('python3-netifaces', fatal=True)
|
||||
apt_install('python3-netifaces', fatal=True)
|
||||
import netifaces
|
||||
|
||||
try:
|
||||
import netaddr
|
||||
except ImportError:
|
||||
apt_update(fatal=True)
|
||||
if six.PY2:
|
||||
apt_install('python-netaddr', fatal=True)
|
||||
else:
|
||||
apt_install('python3-netaddr', fatal=True)
|
||||
apt_install('python3-netaddr', fatal=True)
|
||||
import netaddr
|
||||
|
||||
|
||||
@ -462,22 +456,19 @@ def ns_query(address):
|
||||
try:
|
||||
import dns.resolver
|
||||
except ImportError:
|
||||
if six.PY2:
|
||||
apt_install('python-dnspython', fatal=True)
|
||||
else:
|
||||
apt_install('python3-dnspython', fatal=True)
|
||||
apt_install('python3-dnspython', fatal=True)
|
||||
import dns.resolver
|
||||
|
||||
if isinstance(address, dns.name.Name):
|
||||
rtype = 'PTR'
|
||||
elif isinstance(address, six.string_types):
|
||||
elif isinstance(address, str):
|
||||
rtype = 'A'
|
||||
else:
|
||||
return None
|
||||
|
||||
try:
|
||||
answers = dns.resolver.query(address, rtype)
|
||||
except dns.resolver.NXDOMAIN:
|
||||
except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):
|
||||
return None
|
||||
|
||||
if answers:
|
||||
@ -513,10 +504,7 @@ def get_hostname(address, fqdn=True):
|
||||
try:
|
||||
import dns.reversename
|
||||
except ImportError:
|
||||
if six.PY2:
|
||||
apt_install("python-dnspython", fatal=True)
|
||||
else:
|
||||
apt_install("python3-dnspython", fatal=True)
|
||||
apt_install("python3-dnspython", fatal=True)
|
||||
import dns.reversename
|
||||
|
||||
rev = dns.reversename.from_address(address)
|
||||
@ -540,19 +528,56 @@ def get_hostname(address, fqdn=True):
|
||||
return result.split('.')[0]
|
||||
|
||||
|
||||
def port_has_listener(address, port):
|
||||
class SSLPortCheckInfo(object):
|
||||
|
||||
def __init__(self, key, cert, ca_cert, check_hostname=False):
|
||||
self.key = key
|
||||
self.cert = cert
|
||||
self.ca_cert = ca_cert
|
||||
# NOTE: by default we do not check hostname since the port check is
|
||||
# typically performed using 0.0.0.0 which will not match the
|
||||
# certificate. Hence the default for this is False.
|
||||
self.check_hostname = check_hostname
|
||||
|
||||
@property
|
||||
def ssl_context(self):
|
||||
context = ssl.create_default_context()
|
||||
context.check_hostname = self.check_hostname
|
||||
context.load_cert_chain(self.cert, self.key)
|
||||
context.load_verify_locations(self.ca_cert)
|
||||
return context
|
||||
|
||||
|
||||
def port_has_listener(address, port, sslinfo=None):
|
||||
"""
|
||||
Returns True if the address:port is open and being listened to,
|
||||
else False.
|
||||
else False. By default uses netcat to check ports but if sslinfo is
|
||||
provided will use an SSL connection instead.
|
||||
|
||||
@param address: an IP address or hostname
|
||||
@param port: integer port
|
||||
@param sslinfo: optional SSLPortCheckInfo object.
|
||||
If provided, the check is performed using an ssl
|
||||
connection.
|
||||
|
||||
Note calls 'zc' via a subprocess shell
|
||||
"""
|
||||
cmd = ['nc', '-z', address, str(port)]
|
||||
result = subprocess.call(cmd)
|
||||
return not(bool(result))
|
||||
if not sslinfo:
|
||||
cmd = ['nc', '-z', address, str(port)]
|
||||
result = subprocess.call(cmd)
|
||||
return not (bool(result))
|
||||
|
||||
try:
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
|
||||
ssock = sslinfo.ssl_context.wrap_socket(sock,
|
||||
server_hostname=address)
|
||||
ssock.connect((address, port))
|
||||
# this bit is crucial to ensure tls close_notify is sent
|
||||
ssock.unwrap()
|
||||
|
||||
return True
|
||||
except ConnectionRefusedError:
|
||||
return False
|
||||
|
||||
|
||||
def assert_charm_supports_ipv6():
|
||||
|
@ -127,7 +127,9 @@ def deferred_events():
|
||||
"""
|
||||
events = []
|
||||
for defer_file in deferred_events_files():
|
||||
events.append((defer_file, read_event_file(defer_file)))
|
||||
event = read_event_file(defer_file)
|
||||
if event.policy_requestor_name == hookenv.service_name():
|
||||
events.append((defer_file, event))
|
||||
return events
|
||||
|
||||
|
||||
|
128
charmhelpers/contrib/openstack/files/check_deferred_restarts.py
Executable file
128
charmhelpers/contrib/openstack/files/check_deferred_restarts.py
Executable file
@ -0,0 +1,128 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright 2014-2022 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Checks for services with deferred restarts.
|
||||
|
||||
This Nagios check will parse /var/lib/policy-rd.d/
|
||||
to find any restarts that are currently deferred.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
DEFERRED_EVENTS_DIR = '/var/lib/policy-rc.d'
|
||||
|
||||
|
||||
def get_deferred_events():
|
||||
"""Return a list of deferred events dicts from policy-rc.d files.
|
||||
|
||||
Events are read from DEFERRED_EVENTS_DIR and are of the form:
|
||||
{
|
||||
action: restart,
|
||||
policy_requestor_name: rabbitmq-server,
|
||||
policy_requestor_type: charm,
|
||||
reason: 'Pkg update',
|
||||
service: rabbitmq-server,
|
||||
time: 1614328743
|
||||
}
|
||||
|
||||
:raises OSError: Raised in case of a system error while reading a policy file
|
||||
:raises yaml.YAMLError: Raised if parsing a policy file fails
|
||||
|
||||
:returns: List of deferred event dictionaries
|
||||
:rtype: list
|
||||
"""
|
||||
deferred_events_files = glob.glob(
|
||||
'{}/*.deferred'.format(DEFERRED_EVENTS_DIR))
|
||||
|
||||
deferred_events = []
|
||||
for event_file in deferred_events_files:
|
||||
with open(event_file, 'r') as f:
|
||||
event = yaml.safe_load(f)
|
||||
deferred_events.append(event)
|
||||
|
||||
return deferred_events
|
||||
|
||||
|
||||
def get_deferred_restart_services(application=None):
|
||||
"""Returns a list of services with deferred restarts.
|
||||
|
||||
:param str application: Name of the application that blocked the service restart.
|
||||
If application is None, all services with deferred restarts
|
||||
are returned. Services which are blocked by a non-charm
|
||||
requestor are always returned.
|
||||
|
||||
:raises OSError: Raised in case of a system error while reading a policy file
|
||||
:raises yaml.YAMLError: Raised if parsing a policy file fails
|
||||
|
||||
:returns: List of services with deferred restarts belonging to application.
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
deferred_restart_events = filter(
|
||||
lambda e: e['action'] == 'restart', get_deferred_events())
|
||||
|
||||
deferred_restart_services = set()
|
||||
for restart_event in deferred_restart_events:
|
||||
if application:
|
||||
if (
|
||||
restart_event['policy_requestor_type'] != 'charm' or
|
||||
restart_event['policy_requestor_type'] == 'charm' and
|
||||
restart_event['policy_requestor_name'] == application
|
||||
):
|
||||
deferred_restart_services.add(restart_event['service'])
|
||||
else:
|
||||
deferred_restart_services.add(restart_event['service'])
|
||||
|
||||
return list(deferred_restart_services)
|
||||
|
||||
|
||||
def main():
|
||||
"""Check for services with deferred restarts."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Check for services with deferred restarts')
|
||||
parser.add_argument(
|
||||
'--application', help='Check services belonging to this application only')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
services = set(get_deferred_restart_services(args.application))
|
||||
|
||||
if len(services) == 0:
|
||||
print('OK: No deferred service restarts.')
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(
|
||||
'CRITICAL: Restarts are deferred for services: {}.'.format(', '.join(services)))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except OSError as e:
|
||||
print('CRITICAL: A system error occurred: {} ({})'.format(e.errno, e.strerror))
|
||||
sys.exit(1)
|
||||
except yaml.YAMLError as e:
|
||||
print('CRITICAL: Failed to parse a policy file: {}'.format(str(e)))
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print('CRITICAL: An unknown error occurred: {}'.format(str(e)))
|
||||
sys.exit(1)
|
@ -25,6 +25,7 @@ Helpers for high availability.
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
|
||||
import re
|
||||
|
||||
@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
status_set,
|
||||
DEBUG,
|
||||
application_name,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
|
||||
|
||||
VIP_GROUP_NAME = 'grp_{service}_vips'
|
||||
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
|
||||
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
|
||||
|
||||
|
||||
class DNSHAException(Exception):
|
||||
@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
|
||||
relation_data['groups'] = {
|
||||
key: ' '.join(vip_group)
|
||||
}
|
||||
|
||||
|
||||
def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
|
||||
"""Load grafana dashboard json model and insert prometheus datasource.
|
||||
|
||||
:param prometheus_app_name: name of the 'prometheus' application that will
|
||||
be used as datasource in grafana dashboard
|
||||
:type prometheus_app_name: str
|
||||
:param haproxy_dashboard: path to haproxy dashboard
|
||||
:type haproxy_dashboard: str
|
||||
:return: Grafana dashboard json model as a str.
|
||||
:rtype: str
|
||||
"""
|
||||
from charmhelpers.contrib.templating import jinja
|
||||
|
||||
dashboard_template = os.path.basename(haproxy_dashboard)
|
||||
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
|
||||
app_name = application_name()
|
||||
datasource = "{} - Juju generated source".format(prometheus_app_name)
|
||||
return jinja.render(dashboard_template,
|
||||
{"datasource": datasource,
|
||||
"app_name": app_name,
|
||||
"prometheus_app_name": prometheus_app_name},
|
||||
template_dir=dashboard_template_dir,
|
||||
jinja_env_args={"variable_start_string": "<< ",
|
||||
"variable_end_string": " >>"})
|
||||
|
@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
|
||||
is_ipv6,
|
||||
get_ipv6_addr,
|
||||
resolve_network_cidr,
|
||||
get_iface_for_address
|
||||
)
|
||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||
|
||||
@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
|
||||
return unit_get(unit_get_fallback)
|
||||
|
||||
|
||||
def get_invalid_vips():
|
||||
"""Check if any of the provided vips are invalid.
|
||||
A vip is invalid if it doesn't belong to the subnet in any interface.
|
||||
If all vips are valid, this returns an empty list.
|
||||
|
||||
:returns: A list of strings, where each string is an invalid vip address.
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
clustered = is_clustered()
|
||||
vips = config('vip')
|
||||
if vips:
|
||||
vips = vips.split()
|
||||
invalid_vips = []
|
||||
|
||||
if clustered and vips:
|
||||
for vip in vips:
|
||||
iface_for_vip = get_iface_for_address(vip)
|
||||
if iface_for_vip is None:
|
||||
invalid_vips.append(vip)
|
||||
|
||||
return invalid_vips
|
||||
|
||||
|
||||
def resolve_address(endpoint_type=PUBLIC, override=True):
|
||||
"""Return unit address depending on net config.
|
||||
|
||||
|
@ -15,7 +15,6 @@
|
||||
import collections
|
||||
import contextlib
|
||||
import os
|
||||
import six
|
||||
import shutil
|
||||
import yaml
|
||||
import zipfile
|
||||
@ -204,12 +203,6 @@ class BadPolicyYamlFile(Exception):
|
||||
return self.log_message
|
||||
|
||||
|
||||
if six.PY2:
|
||||
BadZipFile = zipfile.BadZipfile
|
||||
else:
|
||||
BadZipFile = zipfile.BadZipFile
|
||||
|
||||
|
||||
def is_policyd_override_valid_on_this_release(openstack_release):
|
||||
"""Check that the charm is running on at least Ubuntu Xenial, and at
|
||||
least the queens release.
|
||||
@ -487,10 +480,10 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None):
|
||||
if blacklisted_keys_present:
|
||||
raise BadPolicyYamlFile("blacklisted keys {} present."
|
||||
.format(", ".join(blacklisted_keys_present)))
|
||||
if not all(isinstance(k, six.string_types) for k in keys):
|
||||
if not all(isinstance(k, str) for k in keys):
|
||||
raise BadPolicyYamlFile("keys in yaml aren't all strings?")
|
||||
# check that the dictionary looks like a mapping of str to str
|
||||
if not all(isinstance(v, six.string_types) for v in doc.values()):
|
||||
if not all(isinstance(v, str) for v in doc.values()):
|
||||
raise BadPolicyYamlFile("values in yaml aren't all strings?")
|
||||
return doc
|
||||
|
||||
@ -530,8 +523,7 @@ def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None):
|
||||
hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG)
|
||||
if not os.path.exists(path):
|
||||
ch_host.mkdir(path, owner=_user, group=_group, perms=0o775)
|
||||
_scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir
|
||||
for direntry in _scanner(path):
|
||||
for direntry in os.scandir(path):
|
||||
# see if the path should be kept.
|
||||
if direntry.path in keep_paths:
|
||||
continue
|
||||
@ -558,36 +550,6 @@ def maybe_create_directory_for(path, user, group):
|
||||
ch_host.mkdir(_dir, owner=user, group=group, perms=0o775)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _fallback_scandir(path):
|
||||
"""Fallback os.scandir implementation.
|
||||
|
||||
provide a fallback implementation of os.scandir if this module ever gets
|
||||
used in a py2 or py34 charm. Uses os.listdir() to get the names in the path,
|
||||
and then mocks the is_dir() function using os.path.isdir() to check for
|
||||
directory.
|
||||
|
||||
:param path: the path to list the directories for
|
||||
:type path: str
|
||||
:returns: Generator that provides _FBDirectory objects
|
||||
:rtype: ContextManager[_FBDirectory]
|
||||
"""
|
||||
for f in os.listdir(path):
|
||||
yield _FBDirectory(f)
|
||||
|
||||
|
||||
class _FBDirectory(object):
|
||||
"""Mock a scandir Directory object with enough to use in
|
||||
clean_policyd_dir_for
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
def is_dir(self):
|
||||
return os.path.isdir(self.path)
|
||||
|
||||
|
||||
def path_for_policy_file(service, name):
|
||||
"""Return the full path for a policy.d file that will be written to the
|
||||
service's policy.d directory.
|
||||
@ -768,7 +730,7 @@ def process_policy_resource_file(resource_file,
|
||||
_group)
|
||||
# Every thing worked, so we mark up a success.
|
||||
completed = True
|
||||
except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e:
|
||||
except (zipfile.BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e:
|
||||
hookenv.log("Processing {} failed: {}".format(resource_file, str(e)),
|
||||
level=POLICYD_LOG_LEVEL_DEFAULT)
|
||||
except IOError as e:
|
||||
|
@ -25,7 +25,6 @@ import re
|
||||
import itertools
|
||||
import functools
|
||||
|
||||
import six
|
||||
import traceback
|
||||
import uuid
|
||||
import yaml
|
||||
@ -159,6 +158,12 @@ OPENSTACK_CODENAMES = OrderedDict([
|
||||
('2021.1', 'wallaby'),
|
||||
('2021.2', 'xena'),
|
||||
('2022.1', 'yoga'),
|
||||
('2022.2', 'zed'),
|
||||
('2023.1', 'antelope'),
|
||||
('2023.2', 'bobcat'),
|
||||
('2024.1', 'caracal'),
|
||||
('2024.2', 'dalmatian'),
|
||||
('2025.1', 'epoxy'),
|
||||
])
|
||||
|
||||
# The ugly duckling - must list releases oldest to newest
|
||||
@ -362,6 +367,8 @@ def get_os_codename_install_source(src):
|
||||
rel = ''
|
||||
if src is None:
|
||||
return rel
|
||||
if src in OPENSTACK_RELEASES:
|
||||
return src
|
||||
if src in ['distro', 'distro-proposed', 'proposed']:
|
||||
try:
|
||||
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||
@ -399,39 +406,32 @@ def get_os_codename_version(vers):
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
|
||||
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES,
|
||||
raise_exception=False):
|
||||
'''Determine OpenStack version number from codename.'''
|
||||
for k, v in six.iteritems(version_map):
|
||||
for k, v in version_map.items():
|
||||
if v == codename:
|
||||
return k
|
||||
e = 'Could not derive OpenStack version for '\
|
||||
'codename: %s' % codename
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_codename_swift(codename):
|
||||
'''Determine OpenStack version number of swift from codename.'''
|
||||
for k, v in six.iteritems(SWIFT_CODENAMES):
|
||||
if k == codename:
|
||||
return v[-1]
|
||||
e = 'Could not derive swift version for '\
|
||||
'codename: %s' % codename
|
||||
if raise_exception:
|
||||
raise ValueError(str(e))
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_swift_codename(version):
|
||||
'''Determine OpenStack codename that corresponds to swift version.'''
|
||||
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
|
||||
codenames = [k for k, v in SWIFT_CODENAMES.items() if version in v]
|
||||
|
||||
if len(codenames) > 1:
|
||||
# If more than one release codename contains this version we determine
|
||||
# the actual codename based on the highest available install source.
|
||||
for codename in reversed(codenames):
|
||||
releases = UBUNTU_OPENSTACK_RELEASE
|
||||
release = [k for k, v in six.iteritems(releases) if codename in v]
|
||||
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
|
||||
if six.PY3:
|
||||
ret = ret.decode('UTF-8')
|
||||
release = [k for k, v in releases.items() if codename in v]
|
||||
ret = (subprocess
|
||||
.check_output(['apt-cache', 'policy', 'swift'])
|
||||
.decode('UTF-8'))
|
||||
if codename in ret or release[0] in ret:
|
||||
return codename
|
||||
elif len(codenames) == 1:
|
||||
@ -441,7 +441,7 @@ def get_swift_codename(version):
|
||||
match = re.match(r'^(\d+)\.(\d+)', version)
|
||||
if match:
|
||||
major_minor_version = match.group(0)
|
||||
for codename, versions in six.iteritems(SWIFT_CODENAMES):
|
||||
for codename, versions in SWIFT_CODENAMES.items():
|
||||
for release_version in versions:
|
||||
if release_version.startswith(major_minor_version):
|
||||
return codename
|
||||
@ -477,9 +477,7 @@ def get_os_codename_package(package, fatal=True):
|
||||
if snap_install_requested():
|
||||
cmd = ['snap', 'list', package]
|
||||
try:
|
||||
out = subprocess.check_output(cmd)
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
out = subprocess.check_output(cmd).decode('UTF-8')
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
lines = out.split('\n')
|
||||
@ -549,16 +547,14 @@ def get_os_version_package(pkg, fatal=True):
|
||||
|
||||
if 'swift' in pkg:
|
||||
vers_map = SWIFT_CODENAMES
|
||||
for cname, version in six.iteritems(vers_map):
|
||||
for cname, version in vers_map.items():
|
||||
if cname == codename:
|
||||
return version[-1]
|
||||
else:
|
||||
vers_map = OPENSTACK_CODENAMES
|
||||
for version, cname in six.iteritems(vers_map):
|
||||
for version, cname in vers_map.items():
|
||||
if cname == codename:
|
||||
return version
|
||||
# e = "Could not determine OpenStack version for package: %s" % pkg
|
||||
# error_out(e)
|
||||
|
||||
|
||||
def get_installed_os_version():
|
||||
@ -581,7 +577,6 @@ def get_installed_os_version():
|
||||
return openstack_release().get('OPENSTACK_CODENAME')
|
||||
|
||||
|
||||
@cached
|
||||
def openstack_release():
|
||||
"""Return /etc/os-release in a dict."""
|
||||
d = {}
|
||||
@ -821,10 +816,10 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
||||
os.mkdir(os.path.dirname(juju_rc_path))
|
||||
with open(juju_rc_path, 'wt') as rc_script:
|
||||
rc_script.write(
|
||||
"#!/bin/bash\n")
|
||||
[rc_script.write('export %s=%s\n' % (u, p))
|
||||
for u, p in six.iteritems(env_vars) if u != "script_path"]
|
||||
rc_script.write("#!/bin/bash\n")
|
||||
for u, p in env_vars.items():
|
||||
if u != "script_path":
|
||||
rc_script.write('export %s=%s\n' % (u, p))
|
||||
|
||||
|
||||
def openstack_upgrade_available(package):
|
||||
@ -843,14 +838,10 @@ def openstack_upgrade_available(package):
|
||||
if not cur_vers:
|
||||
# The package has not been installed yet do not attempt upgrade
|
||||
return False
|
||||
if "swift" in package:
|
||||
codename = get_os_codename_install_source(src)
|
||||
avail_vers = get_os_version_codename_swift(codename)
|
||||
else:
|
||||
try:
|
||||
avail_vers = get_os_version_install_source(src)
|
||||
except Exception:
|
||||
avail_vers = cur_vers
|
||||
try:
|
||||
avail_vers = get_os_version_install_source(src)
|
||||
except Exception:
|
||||
avail_vers = cur_vers
|
||||
apt.init()
|
||||
return apt.version_compare(avail_vers, cur_vers) >= 1
|
||||
|
||||
@ -954,7 +945,7 @@ def os_requires_version(ostack_release, pkg):
|
||||
def wrap(f):
|
||||
@wraps(f)
|
||||
def wrapped_f(*args):
|
||||
if os_release(pkg) < ostack_release:
|
||||
if CompareOpenStackReleases(os_release(pkg)) < ostack_release:
|
||||
raise Exception("This hook is not supported on releases"
|
||||
" before %s" % ostack_release)
|
||||
f(*args)
|
||||
@ -1039,7 +1030,7 @@ def _determine_os_workload_status(
|
||||
state, message, lambda: charm_func(configs))
|
||||
|
||||
if state is None:
|
||||
state, message = _ows_check_services_running(services, ports)
|
||||
state, message = ows_check_services_running(services, ports)
|
||||
|
||||
if state is None:
|
||||
state = 'active'
|
||||
@ -1213,12 +1204,19 @@ def _ows_check_charm_func(state, message, charm_func_with_configs):
|
||||
return state, message
|
||||
|
||||
|
||||
@deprecate("use ows_check_services_running() instead", "2022-05", log=juju_log)
|
||||
def _ows_check_services_running(services, ports):
|
||||
return ows_check_services_running(services, ports)
|
||||
|
||||
|
||||
def ows_check_services_running(services, ports, ssl_check_info=None):
|
||||
"""Check that the services that should be running are actually running
|
||||
and that any ports specified are being listened to.
|
||||
|
||||
@param services: list of strings OR dictionary specifying services/ports
|
||||
@param ports: list of ports
|
||||
@param ssl_check_info: SSLPortCheckInfo object. If provided, port checks
|
||||
will be done using an SSL connection.
|
||||
@returns state, message: strings or None, None
|
||||
"""
|
||||
messages = []
|
||||
@ -1234,7 +1232,7 @@ def _ows_check_services_running(services, ports):
|
||||
# also verify that the ports that should be open are open
|
||||
# NB, that ServiceManager objects only OPTIONALLY have ports
|
||||
map_not_open, ports_open = (
|
||||
_check_listening_on_services_ports(services))
|
||||
_check_listening_on_services_ports(services, ssl_check_info))
|
||||
if not all(ports_open):
|
||||
# find which service has missing ports. They are in service
|
||||
# order which makes it a bit easier.
|
||||
@ -1249,7 +1247,8 @@ def _ows_check_services_running(services, ports):
|
||||
|
||||
if ports is not None:
|
||||
# and we can also check ports which we don't know the service for
|
||||
ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
|
||||
ports_open, ports_open_bools = \
|
||||
_check_listening_on_ports_list(ports, ssl_check_info)
|
||||
if not all(ports_open_bools):
|
||||
messages.append(
|
||||
"Ports which should be open, but are not: {}"
|
||||
@ -1308,7 +1307,8 @@ def _check_running_services(services):
|
||||
return list(zip(services, services_running)), services_running
|
||||
|
||||
|
||||
def _check_listening_on_services_ports(services, test=False):
|
||||
def _check_listening_on_services_ports(services, test=False,
|
||||
ssl_check_info=None):
|
||||
"""Check that the unit is actually listening (has the port open) on the
|
||||
ports that the service specifies are open. If test is True then the
|
||||
function returns the services with ports that are open rather than
|
||||
@ -1318,11 +1318,14 @@ def _check_listening_on_services_ports(services, test=False):
|
||||
|
||||
@param services: OrderedDict(service: [port, ...], ...)
|
||||
@param test: default=False, if False, test for closed, otherwise open.
|
||||
@param ssl_check_info: SSLPortCheckInfo object. If provided, port checks
|
||||
will be done using an SSL connection.
|
||||
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
|
||||
"""
|
||||
test = not(not(test)) # ensure test is True or False
|
||||
test = not (not (test)) # ensure test is True or False
|
||||
all_ports = list(itertools.chain(*services.values()))
|
||||
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
|
||||
ports_states = [port_has_listener('0.0.0.0', p, ssl_check_info)
|
||||
for p in all_ports]
|
||||
map_ports = OrderedDict()
|
||||
matched_ports = [p for p, opened in zip(all_ports, ports_states)
|
||||
if opened == test] # essentially opened xor test
|
||||
@ -1333,16 +1336,19 @@ def _check_listening_on_services_ports(services, test=False):
|
||||
return map_ports, ports_states
|
||||
|
||||
|
||||
def _check_listening_on_ports_list(ports):
|
||||
def _check_listening_on_ports_list(ports, ssl_check_info=None):
|
||||
"""Check that the ports list given are being listened to
|
||||
|
||||
Returns a list of ports being listened to and a list of the
|
||||
booleans.
|
||||
|
||||
@param ssl_check_info: SSLPortCheckInfo object. If provided, port checks
|
||||
will be done using an SSL connection.
|
||||
@param ports: LIST of port numbers.
|
||||
@returns [(port_num, boolean), ...], [boolean]
|
||||
"""
|
||||
ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
|
||||
ports_open = [port_has_listener('0.0.0.0', p, ssl_check_info)
|
||||
for p in ports]
|
||||
return zip(ports, ports_open), ports_open
|
||||
|
||||
|
||||
@ -1413,45 +1419,75 @@ def incomplete_relation_data(configs, required_interfaces):
|
||||
for i in incomplete_relations}
|
||||
|
||||
|
||||
def do_action_openstack_upgrade(package, upgrade_callback, configs,
|
||||
force_upgrade=False):
|
||||
def do_action_openstack_upgrade(package, upgrade_callback, configs):
|
||||
"""Perform action-managed OpenStack upgrade.
|
||||
|
||||
Upgrades packages to the configured openstack-origin version and sets
|
||||
the corresponding action status as a result.
|
||||
|
||||
If the charm was installed from source we cannot upgrade it.
|
||||
For backwards compatibility a config flag (action-managed-upgrade) must
|
||||
be set for this code to run, otherwise a full service level upgrade will
|
||||
fire on config-changed.
|
||||
|
||||
@param package: package name for determining if upgrade available
|
||||
@param package: package name for determining if openstack upgrade available
|
||||
@param upgrade_callback: function callback to charm's upgrade function
|
||||
@param configs: templating object derived from OSConfigRenderer class
|
||||
@param force_upgrade: perform dist-upgrade regardless of new openstack
|
||||
|
||||
@return: True if upgrade successful; False if upgrade failed or skipped
|
||||
"""
|
||||
ret = False
|
||||
|
||||
if openstack_upgrade_available(package) or force_upgrade:
|
||||
if openstack_upgrade_available(package):
|
||||
if config('action-managed-upgrade'):
|
||||
juju_log('Upgrading OpenStack release')
|
||||
|
||||
try:
|
||||
upgrade_callback(configs=configs)
|
||||
action_set({'outcome': 'success, upgrade completed.'})
|
||||
action_set({'outcome': 'success, upgrade completed'})
|
||||
ret = True
|
||||
except Exception:
|
||||
action_set({'outcome': 'upgrade failed, see traceback.'})
|
||||
action_set({'outcome': 'upgrade failed, see traceback'})
|
||||
action_set({'traceback': traceback.format_exc()})
|
||||
action_fail('do_openstack_upgrade resulted in an '
|
||||
action_fail('upgrade callback resulted in an '
|
||||
'unexpected error')
|
||||
else:
|
||||
action_set({'outcome': 'action-managed-upgrade config is '
|
||||
'False, skipped upgrade.'})
|
||||
'False, skipped upgrade'})
|
||||
else:
|
||||
action_set({'outcome': 'no upgrade available.'})
|
||||
action_set({'outcome': 'no upgrade available'})
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def do_action_package_upgrade(package, upgrade_callback, configs):
|
||||
"""Perform package upgrade within the current OpenStack release.
|
||||
|
||||
Upgrades packages only if there is not an openstack upgrade available,
|
||||
and sets the corresponding action status as a result.
|
||||
|
||||
@param package: package name for determining if openstack upgrade available
|
||||
@param upgrade_callback: function callback to charm's upgrade function
|
||||
@param configs: templating object derived from OSConfigRenderer class
|
||||
|
||||
@return: True if upgrade successful; False if upgrade failed or skipped
|
||||
"""
|
||||
ret = False
|
||||
|
||||
if not openstack_upgrade_available(package):
|
||||
juju_log('Upgrading packages')
|
||||
|
||||
try:
|
||||
upgrade_callback(configs=configs)
|
||||
action_set({'outcome': 'success, upgrade completed'})
|
||||
ret = True
|
||||
except Exception:
|
||||
action_set({'outcome': 'upgrade failed, see traceback'})
|
||||
action_set({'traceback': traceback.format_exc()})
|
||||
action_fail('upgrade callback resulted in an '
|
||||
'unexpected error')
|
||||
else:
|
||||
action_set({'outcome': 'upgrade skipped because an openstack upgrade '
|
||||
'is available'})
|
||||
|
||||
return ret
|
||||
|
||||
@ -1546,7 +1582,7 @@ def is_unit_paused_set():
|
||||
with unitdata.HookData()() as t:
|
||||
kv = t[0]
|
||||
# transform something truth-y into a Boolean.
|
||||
return not(not(kv.get('unit-paused')))
|
||||
return not (not (kv.get('unit-paused')))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
@ -1849,21 +1885,20 @@ def pausable_restart_on_change(restart_map, stopstart=False,
|
||||
|
||||
"""
|
||||
def wrap(f):
|
||||
# py27 compatible nonlocal variable. When py3 only, replace with
|
||||
# nonlocal keyword
|
||||
__restart_map_cache = {'cache': None}
|
||||
__restart_map_cache = None
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapped_f(*args, **kwargs):
|
||||
nonlocal __restart_map_cache
|
||||
if is_unit_paused_set():
|
||||
return f(*args, **kwargs)
|
||||
if __restart_map_cache['cache'] is None:
|
||||
__restart_map_cache['cache'] = restart_map() \
|
||||
if __restart_map_cache is None:
|
||||
__restart_map_cache = restart_map() \
|
||||
if callable(restart_map) else restart_map
|
||||
# otherwise, normal restart_on_change functionality
|
||||
return restart_on_change_helper(
|
||||
(lambda: f(*args, **kwargs)),
|
||||
__restart_map_cache['cache'],
|
||||
__restart_map_cache,
|
||||
stopstart,
|
||||
restart_functions,
|
||||
can_restart_now_f,
|
||||
@ -1888,7 +1923,7 @@ def ordered(orderme):
|
||||
raise ValueError('argument must be a dict type')
|
||||
|
||||
result = OrderedDict()
|
||||
for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]):
|
||||
for k, v in sorted(orderme.items(), key=lambda x: x[0]):
|
||||
if isinstance(v, dict):
|
||||
result[k] = ordered(v)
|
||||
else:
|
||||
@ -2145,7 +2180,7 @@ def is_unit_upgrading_set():
|
||||
with unitdata.HookData()() as t:
|
||||
kv = t[0]
|
||||
# transform something truth-y into a Boolean.
|
||||
return not(not(kv.get('unit-upgrading')))
|
||||
return not (not (kv.get('unit-upgrading')))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import six
|
||||
|
||||
from charmhelpers.core.hookenv import relation_id as current_relation_id
|
||||
from charmhelpers.core.hookenv import (
|
||||
@ -229,7 +228,7 @@ def peer_echo(includes=None, force=False):
|
||||
if ex in echo_data:
|
||||
echo_data.pop(ex)
|
||||
else:
|
||||
for attribute, value in six.iteritems(rdata):
|
||||
for attribute, value in rdata.items():
|
||||
for include in includes:
|
||||
if include in attribute:
|
||||
echo_data[attribute] = value
|
||||
@ -255,8 +254,8 @@ def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
|
||||
relation_settings=relation_settings,
|
||||
**kwargs)
|
||||
if is_relation_made(peer_relation_name):
|
||||
for key, value in six.iteritems(dict(list(kwargs.items()) +
|
||||
list(relation_settings.items()))):
|
||||
items = dict(list(kwargs.items()) + list(relation_settings.items()))
|
||||
for key, value in items.items():
|
||||
key_prefix = relation_id or current_relation_id()
|
||||
peer_store(key_prefix + delimiter + key,
|
||||
value,
|
||||
|
@ -12,8 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# deprecated aliases for backwards compatibility
|
||||
from charmhelpers.fetch.python import debug # noqa
|
||||
from charmhelpers.fetch.python import packages # noqa
|
||||
|
@ -23,13 +23,11 @@ import collections
|
||||
import errno
|
||||
import hashlib
|
||||
import math
|
||||
import six
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
@ -160,15 +158,19 @@ def get_osd_settings(relation_name):
|
||||
return _order_dict_by_key(osd_settings)
|
||||
|
||||
|
||||
def send_application_name(relid=None):
|
||||
def send_application_name(relid=None, app_name=None):
|
||||
"""Send the application name down the relation.
|
||||
|
||||
:param relid: Relation id to set application name in.
|
||||
:type relid: str
|
||||
:param app_name: Application name to send in the relation.
|
||||
:type app_name: str
|
||||
"""
|
||||
if app_name is None:
|
||||
app_name = application_name()
|
||||
relation_set(
|
||||
relation_id=relid,
|
||||
relation_settings={'application-name': application_name()})
|
||||
relation_settings={'application-name': app_name})
|
||||
|
||||
|
||||
def send_osd_settings():
|
||||
@ -218,7 +220,7 @@ def validator(value, valid_type, valid_range=None):
|
||||
"was given {} of type {}"
|
||||
.format(valid_range, type(valid_range)))
|
||||
# If we're dealing with strings
|
||||
if isinstance(value, six.string_types):
|
||||
if isinstance(value, str):
|
||||
assert value in valid_range, (
|
||||
"{} is not in the list {}".format(value, valid_range))
|
||||
# Integer, float should have a min and max
|
||||
@ -434,9 +436,9 @@ class BasePool(object):
|
||||
:type mode: str
|
||||
"""
|
||||
# Check the input types and values
|
||||
validator(value=cache_pool, valid_type=six.string_types)
|
||||
validator(value=cache_pool, valid_type=str)
|
||||
validator(
|
||||
value=mode, valid_type=six.string_types,
|
||||
value=mode, valid_type=str,
|
||||
valid_range=["readonly", "writeback"])
|
||||
|
||||
check_call([
|
||||
@ -615,7 +617,8 @@ class Pool(BasePool):
|
||||
|
||||
class ReplicatedPool(BasePool):
|
||||
def __init__(self, service, name=None, pg_num=None, replicas=None,
|
||||
percent_data=None, app_name=None, op=None):
|
||||
percent_data=None, app_name=None, op=None,
|
||||
profile_name='replicated_rule'):
|
||||
"""Initialize ReplicatedPool object.
|
||||
|
||||
Pool information is either initialized from individual keyword
|
||||
@ -632,6 +635,8 @@ class ReplicatedPool(BasePool):
|
||||
to this replicated pool.
|
||||
:type replicas: int
|
||||
:raises: KeyError
|
||||
:param profile_name: Crush Profile to use
|
||||
:type profile_name: Optional[str]
|
||||
"""
|
||||
# NOTE: Do not perform initialization steps that require live data from
|
||||
# a running cluster here. The *Pool classes may be used for validation.
|
||||
@ -646,11 +651,20 @@ class ReplicatedPool(BasePool):
|
||||
# we will fail with KeyError if it is not provided.
|
||||
self.replicas = op['replicas']
|
||||
self.pg_num = op.get('pg_num')
|
||||
self.profile_name = op.get('crush-profile') or profile_name
|
||||
else:
|
||||
self.replicas = replicas or 2
|
||||
self.pg_num = pg_num
|
||||
self.profile_name = profile_name or 'replicated_rule'
|
||||
|
||||
def _create(self):
|
||||
# Validate if crush profile exists
|
||||
if self.profile_name is None:
|
||||
msg = ("Failed to discover crush profile named "
|
||||
"{}".format(self.profile_name))
|
||||
log(msg, level=ERROR)
|
||||
raise PoolCreationError(msg)
|
||||
|
||||
# Do extra validation on pg_num with data from live cluster
|
||||
if self.pg_num:
|
||||
# Since the number of placement groups were specified, ensure
|
||||
@ -668,12 +682,12 @@ class ReplicatedPool(BasePool):
|
||||
'--pg-num-min={}'.format(
|
||||
min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
|
||||
),
|
||||
self.name, str(self.pg_num)
|
||||
self.name, str(self.pg_num), self.profile_name
|
||||
]
|
||||
else:
|
||||
cmd = [
|
||||
'ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||
self.name, str(self.pg_num)
|
||||
self.name, str(self.pg_num), self.profile_name
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
@ -692,7 +706,7 @@ class ErasurePool(BasePool):
|
||||
def __init__(self, service, name=None, erasure_code_profile=None,
|
||||
percent_data=None, app_name=None, op=None,
|
||||
allow_ec_overwrites=False):
|
||||
"""Initialize ReplicatedPool object.
|
||||
"""Initialize ErasurePool object.
|
||||
|
||||
Pool information is either initialized from individual keyword
|
||||
arguments or from a individual CephBrokerRq operation Dict.
|
||||
@ -778,10 +792,11 @@ def enabled_manager_modules():
|
||||
:rtype: List[str]
|
||||
"""
|
||||
cmd = ['ceph', 'mgr', 'module', 'ls']
|
||||
quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0
|
||||
if quincy_or_later:
|
||||
cmd.append('--format=json')
|
||||
try:
|
||||
modules = check_output(cmd)
|
||||
if six.PY3:
|
||||
modules = modules.decode('UTF-8')
|
||||
modules = check_output(cmd).decode('utf-8')
|
||||
except CalledProcessError as e:
|
||||
log("Failed to list ceph modules: {}".format(e), WARNING)
|
||||
return []
|
||||
@ -814,10 +829,10 @@ def get_mon_map(service):
|
||||
ceph command fails.
|
||||
"""
|
||||
try:
|
||||
mon_status = check_output(['ceph', '--id', service,
|
||||
'mon_status', '--format=json'])
|
||||
if six.PY3:
|
||||
mon_status = mon_status.decode('UTF-8')
|
||||
octopus_or_later = cmp_pkgrevno('ceph-common', '15.0.0') >= 0
|
||||
mon_status_cmd = 'quorum_status' if octopus_or_later else 'mon_status'
|
||||
mon_status = (check_output(['ceph', '--id', service, mon_status_cmd,
|
||||
'--format=json'])).decode('utf-8')
|
||||
try:
|
||||
return json.loads(mon_status)
|
||||
except ValueError as v:
|
||||
@ -959,9 +974,7 @@ def get_erasure_profile(service, name):
|
||||
try:
|
||||
out = check_output(['ceph', '--id', service,
|
||||
'osd', 'erasure-code-profile', 'get',
|
||||
name, '--format=json'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
name, '--format=json']).decode('utf-8')
|
||||
return json.loads(out)
|
||||
except (CalledProcessError, OSError, ValueError):
|
||||
return None
|
||||
@ -1164,8 +1177,7 @@ def create_erasure_profile(service, profile_name,
|
||||
'nvme'
|
||||
]
|
||||
|
||||
validator(erasure_plugin_name, six.string_types,
|
||||
list(plugin_techniques.keys()))
|
||||
validator(erasure_plugin_name, str, list(plugin_techniques.keys()))
|
||||
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
@ -1176,7 +1188,7 @@ def create_erasure_profile(service, profile_name,
|
||||
]
|
||||
|
||||
if erasure_plugin_technique:
|
||||
validator(erasure_plugin_technique, six.string_types,
|
||||
validator(erasure_plugin_technique, str,
|
||||
plugin_techniques[erasure_plugin_name])
|
||||
cmd.append('technique={}'.format(erasure_plugin_technique))
|
||||
|
||||
@ -1189,7 +1201,7 @@ def create_erasure_profile(service, profile_name,
|
||||
failure_domain = 'rack'
|
||||
|
||||
if failure_domain:
|
||||
validator(failure_domain, six.string_types, failure_domains)
|
||||
validator(failure_domain, str, failure_domains)
|
||||
# failure_domain changed in luminous
|
||||
if luminous_or_later:
|
||||
cmd.append('crush-failure-domain={}'.format(failure_domain))
|
||||
@ -1198,7 +1210,7 @@ def create_erasure_profile(service, profile_name,
|
||||
|
||||
# device class new in luminous
|
||||
if luminous_or_later and device_class:
|
||||
validator(device_class, six.string_types, device_classes)
|
||||
validator(device_class, str, device_classes)
|
||||
cmd.append('crush-device-class={}'.format(device_class))
|
||||
else:
|
||||
log('Skipping device class configuration (ceph < 12.0.0)',
|
||||
@ -1213,7 +1225,7 @@ def create_erasure_profile(service, profile_name,
|
||||
raise ValueError("locality must be provided for lrc plugin")
|
||||
# LRC optional configuration
|
||||
if crush_locality:
|
||||
validator(crush_locality, six.string_types, failure_domains)
|
||||
validator(crush_locality, str, failure_domains)
|
||||
cmd.append('crush-locality={}'.format(crush_locality))
|
||||
|
||||
if erasure_plugin_name == 'shec':
|
||||
@ -1241,8 +1253,8 @@ def rename_pool(service, old_name, new_name):
|
||||
:param new_name: Name to rename pool to.
|
||||
:type new_name: str
|
||||
"""
|
||||
validator(value=old_name, valid_type=six.string_types)
|
||||
validator(value=new_name, valid_type=six.string_types)
|
||||
validator(value=old_name, valid_type=str)
|
||||
validator(value=new_name, valid_type=str)
|
||||
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
@ -1260,7 +1272,7 @@ def erasure_profile_exists(service, name):
|
||||
:returns: True if it exists, False otherwise.
|
||||
:rtype: bool
|
||||
"""
|
||||
validator(value=name, valid_type=six.string_types)
|
||||
validator(value=name, valid_type=str)
|
||||
try:
|
||||
check_call(['ceph', '--id', service,
|
||||
'osd', 'erasure-code-profile', 'get',
|
||||
@ -1280,12 +1292,10 @@ def get_cache_mode(service, pool_name):
|
||||
:returns: Current cache mode.
|
||||
:rtype: Optional[int]
|
||||
"""
|
||||
validator(value=service, valid_type=six.string_types)
|
||||
validator(value=pool_name, valid_type=six.string_types)
|
||||
validator(value=service, valid_type=str)
|
||||
validator(value=pool_name, valid_type=str)
|
||||
out = check_output(['ceph', '--id', service,
|
||||
'osd', 'dump', '--format=json'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
'osd', 'dump', '--format=json']).decode('utf-8')
|
||||
try:
|
||||
osd_json = json.loads(out)
|
||||
for pool in osd_json['pools']:
|
||||
@ -1299,9 +1309,8 @@ def get_cache_mode(service, pool_name):
|
||||
def pool_exists(service, name):
|
||||
"""Check to see if a RADOS pool already exists."""
|
||||
try:
|
||||
out = check_output(['rados', '--id', service, 'lspools'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
out = check_output(
|
||||
['rados', '--id', service, 'lspools']).decode('utf-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
@ -1320,13 +1329,11 @@ def get_osds(service, device_class=None):
|
||||
out = check_output(['ceph', '--id', service,
|
||||
'osd', 'crush', 'class',
|
||||
'ls-osd', device_class,
|
||||
'--format=json'])
|
||||
'--format=json']).decode('utf-8')
|
||||
else:
|
||||
out = check_output(['ceph', '--id', service,
|
||||
'osd', 'ls',
|
||||
'--format=json'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
'--format=json']).decode('utf-8')
|
||||
return json.loads(out)
|
||||
|
||||
|
||||
@ -1343,9 +1350,7 @@ def rbd_exists(service, pool, rbd_img):
|
||||
"""Check to see if a RADOS block device exists."""
|
||||
try:
|
||||
out = check_output(['rbd', 'list', '--id',
|
||||
service, '--pool', pool])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
service, '--pool', pool]).decode('utf-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
@ -1371,7 +1376,7 @@ def update_pool(client, pool, settings):
|
||||
:raises: CalledProcessError
|
||||
"""
|
||||
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
|
||||
for k, v in six.iteritems(settings):
|
||||
for k, v in settings.items():
|
||||
check_call(cmd + [k, v])
|
||||
|
||||
|
||||
@ -1509,9 +1514,7 @@ def configure(service, key, auth, use_syslog):
|
||||
def image_mapped(name):
|
||||
"""Determine whether a RADOS block device is mapped locally."""
|
||||
try:
|
||||
out = check_output(['rbd', 'showmapped'])
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
out = check_output(['rbd', 'showmapped']).decode('utf-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
@ -1677,6 +1680,10 @@ class CephBrokerRq(object):
|
||||
The API is versioned and defaults to version 1.
|
||||
"""
|
||||
|
||||
# The below hash is the result of running
|
||||
# `hashlib.sha1('[]'.encode()).hexdigest()`
|
||||
EMPTY_LIST_SHA = '97d170e1550eee4afc0af065b78cda302a97674c'
|
||||
|
||||
def __init__(self, api_version=1, request_id=None, raw_request_data=None):
|
||||
"""Initialize CephBrokerRq object.
|
||||
|
||||
@ -1685,8 +1692,12 @@ class CephBrokerRq(object):
|
||||
|
||||
:param api_version: API version for request (default: 1).
|
||||
:type api_version: Optional[int]
|
||||
:param request_id: Unique identifier for request.
|
||||
(default: string representation of generated UUID)
|
||||
:param request_id: Unique identifier for request. The identifier will
|
||||
be updated as ops are added or removed from the
|
||||
broker request. This ensures that Ceph will
|
||||
correctly process requests where operations are
|
||||
added after the initial request is processed.
|
||||
(default: sha1 of operations)
|
||||
:type request_id: Optional[str]
|
||||
:param raw_request_data: JSON-encoded string to build request from.
|
||||
:type raw_request_data: Optional[str]
|
||||
@ -1695,16 +1706,20 @@ class CephBrokerRq(object):
|
||||
if raw_request_data:
|
||||
request_data = json.loads(raw_request_data)
|
||||
self.api_version = request_data['api-version']
|
||||
self.request_id = request_data['request-id']
|
||||
self.set_ops(request_data['ops'])
|
||||
self.request_id = request_data['request-id']
|
||||
else:
|
||||
self.api_version = api_version
|
||||
if request_id:
|
||||
self.request_id = request_id
|
||||
else:
|
||||
self.request_id = str(uuid.uuid1())
|
||||
self.request_id = CephBrokerRq.EMPTY_LIST_SHA
|
||||
self.ops = []
|
||||
|
||||
def _hash_ops(self):
|
||||
"""Return the sha1 of the requested Broker ops."""
|
||||
return hashlib.sha1(json.dumps(self.ops, sort_keys=True).encode()).hexdigest()
|
||||
|
||||
def add_op(self, op):
|
||||
"""Add an op if it is not already in the list.
|
||||
|
||||
@ -1713,6 +1728,7 @@ class CephBrokerRq(object):
|
||||
"""
|
||||
if op not in self.ops:
|
||||
self.ops.append(op)
|
||||
self.request_id = self._hash_ops()
|
||||
|
||||
def add_op_request_access_to_group(self, name, namespace=None,
|
||||
permission=None, key_name=None,
|
||||
@ -1857,7 +1873,7 @@ class CephBrokerRq(object):
|
||||
}
|
||||
|
||||
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
|
||||
**kwargs):
|
||||
crush_profile=None, **kwargs):
|
||||
"""Adds an operation to create a replicated pool.
|
||||
|
||||
Refer to docstring for ``_partial_build_common_op_create`` for
|
||||
@ -1871,6 +1887,10 @@ class CephBrokerRq(object):
|
||||
for pool.
|
||||
:type pg_num: int
|
||||
:raises: AssertionError if provided data is of invalid type/range
|
||||
:param crush_profile: Name of crush profile to use. If not set the
|
||||
ceph-mon unit handling the broker request will
|
||||
set its default value.
|
||||
:type crush_profile: Optional[str]
|
||||
"""
|
||||
if pg_num and kwargs.get('weight'):
|
||||
raise ValueError('pg_num and weight are mutually exclusive')
|
||||
@ -1880,6 +1900,7 @@ class CephBrokerRq(object):
|
||||
'name': name,
|
||||
'replicas': replica_count,
|
||||
'pg_num': pg_num,
|
||||
'crush-profile': crush_profile
|
||||
}
|
||||
op.update(self._partial_build_common_op_create(**kwargs))
|
||||
|
||||
@ -1986,6 +2007,7 @@ class CephBrokerRq(object):
|
||||
to allow comparisons to ensure validity.
|
||||
"""
|
||||
self.ops = ops
|
||||
self.request_id = self._hash_ops()
|
||||
|
||||
@property
|
||||
def request(self):
|
||||
|
@ -19,8 +19,6 @@ from subprocess import (
|
||||
check_output,
|
||||
)
|
||||
|
||||
import six
|
||||
|
||||
|
||||
##################################################
|
||||
# loopback device helpers.
|
||||
@ -40,9 +38,7 @@ def loopback_devices():
|
||||
'''
|
||||
loopbacks = {}
|
||||
cmd = ['losetup', '-a']
|
||||
output = check_output(cmd)
|
||||
if six.PY3:
|
||||
output = output.decode('utf-8')
|
||||
output = check_output(cmd).decode('utf-8')
|
||||
devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
|
||||
for dev, _, f in devs:
|
||||
loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
|
||||
@ -57,7 +53,7 @@ def create_loopback(file_path):
|
||||
'''
|
||||
file_path = os.path.abspath(file_path)
|
||||
check_call(['losetup', '--find', file_path])
|
||||
for d, f in six.iteritems(loopback_devices()):
|
||||
for d, f in loopback_devices().items():
|
||||
if f == file_path:
|
||||
return d
|
||||
|
||||
@ -71,7 +67,7 @@ def ensure_loopback_device(path, size):
|
||||
|
||||
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
||||
'''
|
||||
for d, f in six.iteritems(loopback_devices()):
|
||||
for d, f in loopback_devices().items():
|
||||
if f == path:
|
||||
return d
|
||||
|
||||
|
@ -17,8 +17,6 @@ from subprocess import (
|
||||
CalledProcessError,
|
||||
check_call,
|
||||
check_output,
|
||||
Popen,
|
||||
PIPE,
|
||||
)
|
||||
|
||||
|
||||
@ -58,9 +56,7 @@ def remove_lvm_physical_volume(block_device):
|
||||
|
||||
:param block_device: str: Full path of block device to scrub.
|
||||
'''
|
||||
p = Popen(['pvremove', '-ff', block_device],
|
||||
stdin=PIPE)
|
||||
p.communicate(input='y\n')
|
||||
check_call(['pvremove', '-ff', '--yes', block_device])
|
||||
|
||||
|
||||
def list_lvm_volume_group(block_device):
|
||||
|
@ -23,6 +23,12 @@ from subprocess import (
|
||||
call
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
WARNING,
|
||||
INFO
|
||||
)
|
||||
|
||||
|
||||
def _luks_uuid(dev):
|
||||
"""
|
||||
@ -110,7 +116,7 @@ def is_device_mounted(device):
|
||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||
|
||||
|
||||
def mkfs_xfs(device, force=False, inode_size=1024):
|
||||
def mkfs_xfs(device, force=False, inode_size=None):
|
||||
"""Format device with XFS filesystem.
|
||||
|
||||
By default this should fail if the device already has a filesystem on it.
|
||||
@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
|
||||
:ptype device: tr
|
||||
:param force: Force operation
|
||||
:ptype: force: boolean
|
||||
:param inode_size: XFS inode size in bytes
|
||||
:param inode_size: XFS inode size in bytes; if set to 0 or None,
|
||||
the value used will be the XFS system default
|
||||
:ptype inode_size: int"""
|
||||
cmd = ['mkfs.xfs']
|
||||
if force:
|
||||
cmd.append("-f")
|
||||
|
||||
cmd += ['-i', "size={}".format(inode_size), device]
|
||||
if inode_size:
|
||||
if inode_size >= 256 and inode_size <= 2048:
|
||||
cmd += ['-i', "size={}".format(inode_size)]
|
||||
else:
|
||||
log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
|
||||
else:
|
||||
log("Using XFS filesystem with system default inode size.", level=INFO)
|
||||
|
||||
cmd += [device]
|
||||
check_call(cmd)
|
||||
|
@ -17,12 +17,14 @@
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
|
||||
from __future__ import print_function
|
||||
import copy
|
||||
from distutils.version import LooseVersion
|
||||
try:
|
||||
from distutils.version import LooseVersion
|
||||
except ImportError:
|
||||
from looseversion import LooseVersion
|
||||
from enum import Enum
|
||||
from functools import wraps
|
||||
from collections import namedtuple
|
||||
from collections import namedtuple, UserDict
|
||||
import glob
|
||||
import os
|
||||
import json
|
||||
@ -36,12 +38,6 @@ from subprocess import CalledProcessError
|
||||
|
||||
from charmhelpers import deprecate
|
||||
|
||||
import six
|
||||
if not six.PY3:
|
||||
from UserDict import UserDict
|
||||
else:
|
||||
from collections import UserDict
|
||||
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
@ -112,7 +108,7 @@ def log(message, level=None):
|
||||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
if not isinstance(message, six.string_types):
|
||||
if not isinstance(message, str):
|
||||
message = repr(message)
|
||||
command += [message[:SH_MAX_ARG]]
|
||||
# Missing juju-log should not cause failures in unit tests
|
||||
@ -132,7 +128,7 @@ def log(message, level=None):
|
||||
def function_log(message):
|
||||
"""Write a function progress message"""
|
||||
command = ['function-log']
|
||||
if not isinstance(message, six.string_types):
|
||||
if not isinstance(message, str):
|
||||
message = repr(message)
|
||||
command += [message[:SH_MAX_ARG]]
|
||||
# Missing function-log should not cause failures in unit tests
|
||||
@ -445,12 +441,6 @@ def config(scope=None):
|
||||
"""
|
||||
global _cache_config
|
||||
config_cmd_line = ['config-get', '--all', '--format=json']
|
||||
try:
|
||||
# JSON Decode Exception for Python3.5+
|
||||
exc_json = json.decoder.JSONDecodeError
|
||||
except AttributeError:
|
||||
# JSON Decode Exception for Python2.7 through Python3.4
|
||||
exc_json = ValueError
|
||||
try:
|
||||
if _cache_config is None:
|
||||
config_data = json.loads(
|
||||
@ -459,7 +449,7 @@ def config(scope=None):
|
||||
if scope is not None:
|
||||
return _cache_config.get(scope)
|
||||
return _cache_config
|
||||
except (exc_json, UnicodeDecodeError) as e:
|
||||
except (json.decoder.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
log('Unable to parse output from config-get: config_cmd_line="{}" '
|
||||
'message="{}"'
|
||||
.format(config_cmd_line, str(e)), level=ERROR)
|
||||
@ -491,12 +481,26 @@ def relation_get(attribute=None, unit=None, rid=None, app=None):
|
||||
raise
|
||||
|
||||
|
||||
@cached
|
||||
def _relation_set_accepts_file():
|
||||
"""Return True if the juju relation-set command accepts a file.
|
||||
|
||||
Cache the result as it won't change during the execution of a hook, and
|
||||
thus we can make relation_set() more efficient by only checking for the
|
||||
first relation_set() call.
|
||||
|
||||
:returns: True if relation_set accepts a file.
|
||||
:rtype: bool
|
||||
:raises: subprocess.CalledProcessError if the check fails.
|
||||
"""
|
||||
return "--file" in subprocess.check_output(
|
||||
["relation-set", "--help"], universal_newlines=True)
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs):
|
||||
"""Set relation information for the current unit"""
|
||||
relation_settings = relation_settings if relation_settings else {}
|
||||
relation_cmd_line = ['relation-set']
|
||||
accepts_file = "--file" in subprocess.check_output(
|
||||
relation_cmd_line + ["--help"], universal_newlines=True)
|
||||
if app:
|
||||
relation_cmd_line.append('--app')
|
||||
if relation_id is not None:
|
||||
@ -508,7 +512,7 @@ def relation_set(relation_id=None, relation_settings=None, app=False, **kwargs):
|
||||
# sites pass in things like dicts or numbers.
|
||||
if value is not None:
|
||||
settings[key] = "{}".format(value)
|
||||
if accepts_file:
|
||||
if _relation_set_accepts_file():
|
||||
# --file was introduced in Juju 1.23.2. Use it by default if
|
||||
# available, since otherwise we'll break if the relation data is
|
||||
# too big. Ideally we should tell relation-set to read the data from
|
||||
@ -1003,14 +1007,8 @@ def cmd_exists(cmd):
|
||||
|
||||
|
||||
@cached
|
||||
@deprecate("moved to function_get()", log=log)
|
||||
def action_get(key=None):
|
||||
"""
|
||||
.. deprecated:: 0.20.7
|
||||
Alias for :func:`function_get`.
|
||||
|
||||
Gets the value of an action parameter, or all key/value param pairs.
|
||||
"""
|
||||
"""Gets the value of an action parameter, or all key/value param pairs."""
|
||||
cmd = ['action-get']
|
||||
if key is not None:
|
||||
cmd.append(key)
|
||||
@ -1020,8 +1018,12 @@ def action_get(key=None):
|
||||
|
||||
|
||||
@cached
|
||||
@deprecate("moved to action_get()", log=log)
|
||||
def function_get(key=None):
|
||||
"""Gets the value of an action parameter, or all key/value param pairs"""
|
||||
"""
|
||||
.. deprecated::
|
||||
Gets the value of an action parameter, or all key/value param pairs.
|
||||
"""
|
||||
cmd = ['function-get']
|
||||
# Fallback for older charms.
|
||||
if not cmd_exists('function-get'):
|
||||
@ -1034,22 +1036,20 @@ def function_get(key=None):
|
||||
return function_data
|
||||
|
||||
|
||||
@deprecate("moved to function_set()", log=log)
|
||||
def action_set(values):
|
||||
"""
|
||||
.. deprecated:: 0.20.7
|
||||
Alias for :func:`function_set`.
|
||||
|
||||
Sets the values to be returned after the action finishes.
|
||||
"""
|
||||
"""Sets the values to be returned after the action finishes."""
|
||||
cmd = ['action-set']
|
||||
for k, v in list(values.items()):
|
||||
cmd.append('{}={}'.format(k, v))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
@deprecate("moved to action_set()", log=log)
|
||||
def function_set(values):
|
||||
"""Sets the values to be returned after the function finishes"""
|
||||
"""
|
||||
.. deprecated::
|
||||
Sets the values to be returned after the function finishes.
|
||||
"""
|
||||
cmd = ['function-set']
|
||||
# Fallback for older charms.
|
||||
if not cmd_exists('function-get'):
|
||||
@ -1060,12 +1060,8 @@ def function_set(values):
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
@deprecate("moved to function_fail()", log=log)
|
||||
def action_fail(message):
|
||||
"""
|
||||
.. deprecated:: 0.20.7
|
||||
Alias for :func:`function_fail`.
|
||||
|
||||
Sets the action status to failed and sets the error message.
|
||||
|
||||
The results set by action_set are preserved.
|
||||
@ -1073,10 +1069,14 @@ def action_fail(message):
|
||||
subprocess.check_call(['action-fail', message])
|
||||
|
||||
|
||||
@deprecate("moved to action_fail()", log=log)
|
||||
def function_fail(message):
|
||||
"""Sets the function status to failed and sets the error message.
|
||||
"""
|
||||
.. deprecated::
|
||||
Sets the function status to failed and sets the error message.
|
||||
|
||||
The results set by function_set are preserved."""
|
||||
The results set by function_set are preserved.
|
||||
"""
|
||||
cmd = ['function-fail']
|
||||
# Fallback for older charms.
|
||||
if not cmd_exists('function-fail'):
|
||||
|
@ -31,7 +31,6 @@ import subprocess
|
||||
import hashlib
|
||||
import functools
|
||||
import itertools
|
||||
import six
|
||||
|
||||
from contextlib import contextmanager
|
||||
from collections import OrderedDict, defaultdict
|
||||
@ -115,6 +114,33 @@ def service_stop(service_name, **kwargs):
|
||||
return service('stop', service_name, **kwargs)
|
||||
|
||||
|
||||
def service_enable(service_name, **kwargs):
|
||||
"""Enable a system service.
|
||||
|
||||
The specified service name is managed via the system level init system.
|
||||
Some init systems (e.g. upstart) require that additional arguments be
|
||||
provided in order to directly control service instances whereas other init
|
||||
systems allow for addressing instances of a service directly by name (e.g.
|
||||
systemd).
|
||||
|
||||
The kwargs allow for the additional parameters to be passed to underlying
|
||||
init systems for those systems which require/allow for them. For example,
|
||||
the ceph-osd upstart script requires the id parameter to be passed along
|
||||
in order to identify which running daemon should be restarted. The follow-
|
||||
ing example restarts the ceph-osd service for instance id=4:
|
||||
|
||||
service_enable('ceph-osd', id=4)
|
||||
|
||||
:param service_name: the name of the service to enable
|
||||
:param **kwargs: additional parameters to pass to the init system when
|
||||
managing services. These will be passed as key=value
|
||||
parameters to the init system's commandline. kwargs
|
||||
are ignored for init systems not allowing additional
|
||||
parameters via the commandline (systemd).
|
||||
"""
|
||||
return service('enable', service_name, **kwargs)
|
||||
|
||||
|
||||
def service_restart(service_name, **kwargs):
|
||||
"""Restart a system service.
|
||||
|
||||
@ -135,7 +161,7 @@ def service_restart(service_name, **kwargs):
|
||||
:param service_name: the name of the service to restart
|
||||
:param **kwargs: additional parameters to pass to the init system when
|
||||
managing services. These will be passed as key=value
|
||||
parameters to the init system's commandline. kwargs
|
||||
parameters to the init system's commandline. kwargs
|
||||
are ignored for init systems not allowing additional
|
||||
parameters via the commandline (systemd).
|
||||
"""
|
||||
@ -230,8 +256,11 @@ def service_resume(service_name, init_dir="/etc/init",
|
||||
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||
sysv_file = os.path.join(initd_dir, service_name)
|
||||
if init_is_systemd(service_name=service_name):
|
||||
service('unmask', service_name)
|
||||
service('enable', service_name)
|
||||
if service('is-enabled', service_name):
|
||||
log('service {} already enabled'.format(service_name), level=DEBUG)
|
||||
else:
|
||||
service('unmask', service_name)
|
||||
service('enable', service_name)
|
||||
elif os.path.exists(upstart_file):
|
||||
override_path = os.path.join(
|
||||
init_dir, '{}.override'.format(service_name))
|
||||
@ -251,7 +280,7 @@ def service_resume(service_name, init_dir="/etc/init",
|
||||
return started
|
||||
|
||||
|
||||
def service(action, service_name, **kwargs):
|
||||
def service(action, service_name=None, **kwargs):
|
||||
"""Control a system service.
|
||||
|
||||
:param action: the action to take on the service
|
||||
@ -260,10 +289,12 @@ def service(action, service_name, **kwargs):
|
||||
the form of key=value.
|
||||
"""
|
||||
if init_is_systemd(service_name=service_name):
|
||||
cmd = ['systemctl', action, service_name]
|
||||
cmd = ['systemctl', action]
|
||||
if service_name is not None:
|
||||
cmd.append(service_name)
|
||||
else:
|
||||
cmd = ['service', service_name, action]
|
||||
for key, value in six.iteritems(kwargs):
|
||||
for key, value in kwargs.items():
|
||||
parameter = '%s=%s' % (key, value)
|
||||
cmd.append(parameter)
|
||||
return subprocess.call(cmd) == 0
|
||||
@ -289,7 +320,7 @@ def service_running(service_name, **kwargs):
|
||||
if os.path.exists(_UPSTART_CONF.format(service_name)):
|
||||
try:
|
||||
cmd = ['status', service_name]
|
||||
for key, value in six.iteritems(kwargs):
|
||||
for key, value in kwargs.items():
|
||||
parameter = '%s=%s' % (key, value)
|
||||
cmd.append(parameter)
|
||||
output = subprocess.check_output(
|
||||
@ -564,7 +595,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||
with open(path, 'wb') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
if six.PY3 and isinstance(content, six.string_types):
|
||||
if isinstance(content, str):
|
||||
content = content.encode('UTF-8')
|
||||
target.write(content)
|
||||
return
|
||||
@ -926,7 +957,7 @@ def pwgen(length=None):
|
||||
random_generator = random.SystemRandom()
|
||||
random_chars = [
|
||||
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
||||
return(''.join(random_chars))
|
||||
return ''.join(random_chars)
|
||||
|
||||
|
||||
def is_phy_iface(interface):
|
||||
@ -967,7 +998,7 @@ def get_bond_master(interface):
|
||||
|
||||
def list_nics(nic_type=None):
|
||||
"""Return a list of nics of given type(s)"""
|
||||
if isinstance(nic_type, six.string_types):
|
||||
if isinstance(nic_type, str):
|
||||
int_types = [nic_type]
|
||||
else:
|
||||
int_types = nic_type
|
||||
@ -1081,8 +1112,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
||||
try:
|
||||
chown(full, uid, gid)
|
||||
except (IOError, OSError) as e:
|
||||
# Intended to ignore "file not found". Catching both to be
|
||||
# compatible with both Python 2.7 and 3.x.
|
||||
# Intended to ignore "file not found".
|
||||
if e.errno == errno.ENOENT:
|
||||
pass
|
||||
|
||||
|
@ -30,6 +30,11 @@ UBUNTU_RELEASES = (
|
||||
'hirsute',
|
||||
'impish',
|
||||
'jammy',
|
||||
'kinetic',
|
||||
'lunar',
|
||||
'mantic',
|
||||
'noble',
|
||||
'oracular',
|
||||
)
|
||||
|
||||
|
||||
|
@ -15,9 +15,8 @@
|
||||
import os
|
||||
import json
|
||||
import inspect
|
||||
from collections import Iterable, OrderedDict
|
||||
|
||||
import six
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Iterable
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
@ -171,10 +170,7 @@ class ServiceManager(object):
|
||||
if not units:
|
||||
continue
|
||||
remote_service = units[0].split('/')[0]
|
||||
if six.PY2:
|
||||
argspec = inspect.getargspec(provider.provide_data)
|
||||
else:
|
||||
argspec = inspect.getfullargspec(provider.provide_data)
|
||||
argspec = inspect.getfullargspec(provider.provide_data)
|
||||
if len(argspec.args) > 1:
|
||||
data = provider.provide_data(remote_service, service_ready)
|
||||
else:
|
||||
|
@ -179,7 +179,7 @@ class RequiredConfig(dict):
|
||||
self.required_options = args
|
||||
self['config'] = hookenv.config()
|
||||
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
||||
self.config = yaml.load(fp).get('options', {})
|
||||
self.config = yaml.safe_load(fp).get('options', {})
|
||||
|
||||
def __bool__(self):
|
||||
for option in self.required_options:
|
||||
@ -227,7 +227,7 @@ class StoredContext(dict):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'r') as file_stream:
|
||||
data = yaml.load(file_stream)
|
||||
data = yaml.safe_load(file_stream)
|
||||
if not data:
|
||||
raise OSError("%s is empty" % file_name)
|
||||
return data
|
||||
|
@ -15,7 +15,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import six
|
||||
import re
|
||||
|
||||
TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'}
|
||||
@ -27,8 +26,8 @@ def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY
|
||||
|
||||
Returns True if value translates to True otherwise False.
|
||||
"""
|
||||
if isinstance(value, six.string_types):
|
||||
value = six.text_type(value)
|
||||
if isinstance(value, str):
|
||||
value = str(value)
|
||||
else:
|
||||
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||
raise ValueError(msg)
|
||||
@ -61,8 +60,8 @@ def bytes_from_string(value):
|
||||
'P': 5,
|
||||
'PB': 5,
|
||||
}
|
||||
if isinstance(value, six.string_types):
|
||||
value = six.text_type(value)
|
||||
if isinstance(value, str):
|
||||
value = str(value)
|
||||
else:
|
||||
msg = "Unable to interpret non-string value '%s' as bytes" % (value)
|
||||
raise ValueError(msg)
|
||||
|
@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
@ -43,9 +42,8 @@ def render(source, target, context, owner='root', group='root',
|
||||
The rendered template will be written to the file as well as being returned
|
||||
as a string.
|
||||
|
||||
Note: Using this requires python-jinja2 or python3-jinja2; if it is not
|
||||
installed, calling this will attempt to use charmhelpers.fetch.apt_install
|
||||
to install it.
|
||||
Note: Using this requires python3-jinja2; if it is not installed, calling
|
||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||
"""
|
||||
try:
|
||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||
@ -57,10 +55,7 @@ def render(source, target, context, owner='root', group='root',
|
||||
'charmhelpers.fetch to install it',
|
||||
level=hookenv.ERROR)
|
||||
raise
|
||||
if sys.version_info.major == 2:
|
||||
apt_install('python-jinja2', fatal=True)
|
||||
else:
|
||||
apt_install('python3-jinja2', fatal=True)
|
||||
apt_install('python3-jinja2', fatal=True)
|
||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||
|
||||
if template_loader:
|
||||
|
@ -151,6 +151,7 @@ import contextlib
|
||||
import datetime
|
||||
import itertools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import pprint
|
||||
import sqlite3
|
||||
@ -171,8 +172,9 @@ class Storage(object):
|
||||
path parameter which causes sqlite3 to only build the db in memory.
|
||||
This should only be used for testing purposes.
|
||||
"""
|
||||
def __init__(self, path=None):
|
||||
def __init__(self, path=None, keep_revisions=False):
|
||||
self.db_path = path
|
||||
self.keep_revisions = keep_revisions
|
||||
if path is None:
|
||||
if 'UNIT_STATE_DB' in os.environ:
|
||||
self.db_path = os.environ['UNIT_STATE_DB']
|
||||
@ -242,7 +244,7 @@ class Storage(object):
|
||||
Remove a key from the database entirely.
|
||||
"""
|
||||
self.cursor.execute('delete from kv where key=?', [key])
|
||||
if self.revision and self.cursor.rowcount:
|
||||
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values (?, ?, ?)',
|
||||
[key, self.revision, json.dumps('DELETED')])
|
||||
@ -259,14 +261,14 @@ class Storage(object):
|
||||
if keys is not None:
|
||||
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||
if self.revision and self.cursor.rowcount:
|
||||
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||
else:
|
||||
self.cursor.execute('delete from kv where key like ?',
|
||||
['%s%%' % prefix])
|
||||
if self.revision and self.cursor.rowcount:
|
||||
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||
self.cursor.execute(
|
||||
'insert into kv_revisions values (?, ?, ?)',
|
||||
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||
@ -299,7 +301,7 @@ class Storage(object):
|
||||
where key = ?''', [serialized, key])
|
||||
|
||||
# Save
|
||||
if not self.revision:
|
||||
if (not self.keep_revisions) or (not self.revision):
|
||||
return value
|
||||
|
||||
self.cursor.execute(
|
||||
@ -520,6 +522,42 @@ _KV = None
|
||||
|
||||
def kv():
|
||||
global _KV
|
||||
|
||||
# If we are running unit tests, it is useful to go into memory-backed KV store to
|
||||
# avoid concurrency issues when running multiple tests. This is not a
|
||||
# problem when juju is running normally.
|
||||
|
||||
env_var = os.environ.get("CHARM_HELPERS_TESTMODE", "auto").lower()
|
||||
if env_var not in ["auto", "no", "yes"]:
|
||||
logging.warning("Unknown value for CHARM_HELPERS_TESTMODE '%s'"
|
||||
", assuming 'no'", env_var)
|
||||
env_var = "no"
|
||||
|
||||
if env_var == "no":
|
||||
in_memory_db = False
|
||||
elif env_var == "yes":
|
||||
in_memory_db = True
|
||||
elif env_var == "auto":
|
||||
# If UNIT_STATE_DB is set, respect this request
|
||||
if "UNIT_STATE_DB" in os.environ:
|
||||
in_memory_db = False
|
||||
# Autodetect normal juju execution by looking for juju variables
|
||||
elif "JUJU_CHARM_DIR" in os.environ or "JUJU_UNIT_NAME" in os.environ:
|
||||
in_memory_db = False
|
||||
else:
|
||||
# We are probably running in unit test mode
|
||||
logging.warning("Auto-detected unit test environment for KV store.")
|
||||
in_memory_db = True
|
||||
else:
|
||||
# Help the linter realise that in_memory_db is always set
|
||||
raise Exception("Cannot reach this line")
|
||||
|
||||
if _KV is None:
|
||||
_KV = Storage()
|
||||
if in_memory_db:
|
||||
_KV = Storage(":memory:")
|
||||
else:
|
||||
_KV = Storage()
|
||||
else:
|
||||
if in_memory_db and _KV.db_path != ":memory:":
|
||||
logging.warning("Running with in_memory_db and KV is not set to :memory:")
|
||||
return _KV
|
||||
|
@ -20,11 +20,7 @@ from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
)
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
else:
|
||||
from urlparse import urlparse, urlunparse
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
|
||||
# The order of this list is very important. Handlers should be listed in from
|
||||
@ -134,14 +130,14 @@ def configure_sources(update=False,
|
||||
sources = safe_load((config(sources_var) or '').strip()) or []
|
||||
keys = safe_load((config(keys_var) or '').strip()) or None
|
||||
|
||||
if isinstance(sources, six.string_types):
|
||||
if isinstance(sources, str):
|
||||
sources = [sources]
|
||||
|
||||
if keys is None:
|
||||
for source in sources:
|
||||
add_source(source, None)
|
||||
else:
|
||||
if isinstance(keys, six.string_types):
|
||||
if isinstance(keys, str):
|
||||
keys = [keys]
|
||||
|
||||
if len(sources) != len(keys):
|
||||
|
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import hashlib
|
||||
import re
|
||||
@ -24,28 +25,21 @@ from charmhelpers.payload.archive import (
|
||||
get_archive_handler,
|
||||
extract,
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
env_proxy_settings,
|
||||
)
|
||||
from charmhelpers.core.host import mkdir, check_hash
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.request import (
|
||||
build_opener, install_opener, urlopen, urlretrieve,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
)
|
||||
from urllib.parse import urlparse, urlunparse, parse_qs
|
||||
from urllib.error import URLError
|
||||
else:
|
||||
from urllib import urlretrieve
|
||||
from urllib2 import (
|
||||
build_opener, install_opener, urlopen,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
URLError
|
||||
)
|
||||
from urlparse import urlparse, urlunparse, parse_qs
|
||||
from urllib.request import (
|
||||
build_opener, install_opener, urlopen, urlretrieve,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
ProxyHandler
|
||||
)
|
||||
from urllib.parse import urlparse, urlunparse, parse_qs
|
||||
from urllib.error import URLError
|
||||
|
||||
|
||||
def splituser(host):
|
||||
'''urllib.splituser(), but six's support of this seems broken'''
|
||||
_userprog = re.compile('^(.*)@(.*)$')
|
||||
match = _userprog.match(host)
|
||||
if match:
|
||||
@ -54,7 +48,6 @@ def splituser(host):
|
||||
|
||||
|
||||
def splitpasswd(user):
|
||||
'''urllib.splitpasswd(), but six's support of this is missing'''
|
||||
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
|
||||
match = _passwdprog.match(user)
|
||||
if match:
|
||||
@ -62,6 +55,20 @@ def splitpasswd(user):
|
||||
return user, None
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def proxy_env():
|
||||
"""
|
||||
Creates a context which temporarily modifies the proxy settings in os.environ.
|
||||
"""
|
||||
restore = {**os.environ} # Copy the current os.environ
|
||||
juju_proxies = env_proxy_settings() or {}
|
||||
os.environ.update(**juju_proxies) # Insert or Update the os.environ
|
||||
yield os.environ
|
||||
for key in juju_proxies:
|
||||
del os.environ[key] # remove any keys which were added or updated
|
||||
os.environ.update(**restore) # restore any original values
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""
|
||||
Handler to download archive files from arbitrary URLs.
|
||||
@ -92,6 +99,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
# propagate all exceptions
|
||||
# URLError, OSError, etc
|
||||
proto, netloc, path, params, query, fragment = urlparse(source)
|
||||
handlers = []
|
||||
if proto in ('http', 'https'):
|
||||
auth, barehost = splituser(netloc)
|
||||
if auth is not None:
|
||||
@ -101,10 +109,13 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
# Realm is set to None in add_password to force the username and password
|
||||
# to be used whatever the realm
|
||||
passman.add_password(None, source, username, password)
|
||||
authhandler = HTTPBasicAuthHandler(passman)
|
||||
opener = build_opener(authhandler)
|
||||
install_opener(opener)
|
||||
response = urlopen(source)
|
||||
handlers.append(HTTPBasicAuthHandler(passman))
|
||||
|
||||
with proxy_env():
|
||||
handlers.append(ProxyHandler())
|
||||
opener = build_opener(*handlers)
|
||||
install_opener(opener)
|
||||
response = urlopen(source)
|
||||
try:
|
||||
with open(dest, 'wb') as dest_file:
|
||||
dest_file.write(response.read())
|
||||
@ -150,10 +161,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
raise UnhandledSource(e.strerror)
|
||||
options = parse_qs(url_parts.fragment)
|
||||
for key, value in options.items():
|
||||
if not six.PY3:
|
||||
algorithms = hashlib.algorithms
|
||||
else:
|
||||
algorithms = hashlib.algorithms_available
|
||||
algorithms = hashlib.algorithms_available
|
||||
if key in algorithms:
|
||||
if len(value) != 1:
|
||||
raise TypeError(
|
||||
|
@ -15,7 +15,6 @@
|
||||
import subprocess
|
||||
import os
|
||||
import time
|
||||
import six
|
||||
import yum
|
||||
|
||||
from tempfile import NamedTemporaryFile
|
||||
@ -42,7 +41,7 @@ def install(packages, options=None, fatal=False):
|
||||
if options is not None:
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, six.string_types):
|
||||
if isinstance(packages, str):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
@ -71,7 +70,7 @@ def update(fatal=False):
|
||||
def purge(packages, fatal=False):
|
||||
"""Purge one or more packages."""
|
||||
cmd = ['yum', '--assumeyes', 'remove']
|
||||
if isinstance(packages, six.string_types):
|
||||
if isinstance(packages, str):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
@ -83,7 +82,7 @@ def yum_search(packages):
|
||||
"""Search for a package."""
|
||||
output = {}
|
||||
cmd = ['yum', 'search']
|
||||
if isinstance(packages, six.string_types):
|
||||
if isinstance(packages, str):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
|
@ -15,8 +15,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import atexit
|
||||
import sys
|
||||
|
||||
|
@ -16,7 +16,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import six
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@ -40,10 +39,7 @@ def pip_execute(*args, **kwargs):
|
||||
from pip import main as _pip_execute
|
||||
except ImportError:
|
||||
apt_update()
|
||||
if six.PY2:
|
||||
apt_install('python-pip')
|
||||
else:
|
||||
apt_install('python3-pip')
|
||||
apt_install('python3-pip')
|
||||
from pip import main as _pip_execute
|
||||
_pip_execute(*args, **kwargs)
|
||||
finally:
|
||||
@ -140,12 +136,8 @@ def pip_list():
|
||||
|
||||
def pip_create_virtualenv(path=None):
|
||||
"""Create an isolated Python environment."""
|
||||
if six.PY2:
|
||||
apt_install('python-virtualenv')
|
||||
extra_flags = []
|
||||
else:
|
||||
apt_install(['python3-virtualenv', 'virtualenv'])
|
||||
extra_flags = ['--python=python3']
|
||||
apt_install(['python3-virtualenv', 'virtualenv'])
|
||||
extra_flags = ['--python=python3']
|
||||
|
||||
if path:
|
||||
venv_path = path
|
||||
|
@ -52,7 +52,7 @@ def _snap_exec(commands):
|
||||
:param commands: List commands
|
||||
:return: Integer exit code
|
||||
"""
|
||||
assert type(commands) == list
|
||||
assert isinstance(commands, list)
|
||||
|
||||
retry_count = 0
|
||||
return_code = None
|
||||
|
@ -13,10 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
from collections import OrderedDict
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import six
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
@ -224,6 +222,58 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||
'yoga/proposed': 'focal-proposed/yoga',
|
||||
'focal-yoga/proposed': 'focal-proposed/yoga',
|
||||
'focal-proposed/yoga': 'focal-proposed/yoga',
|
||||
# Zed
|
||||
'zed': 'jammy-updates/zed',
|
||||
'jammy-zed': 'jammy-updates/zed',
|
||||
'jammy-zed/updates': 'jammy-updates/zed',
|
||||
'jammy-updates/zed': 'jammy-updates/zed',
|
||||
'zed/proposed': 'jammy-proposed/zed',
|
||||
'jammy-zed/proposed': 'jammy-proposed/zed',
|
||||
'jammy-proposed/zed': 'jammy-proposed/zed',
|
||||
# antelope
|
||||
'antelope': 'jammy-updates/antelope',
|
||||
'jammy-antelope': 'jammy-updates/antelope',
|
||||
'jammy-antelope/updates': 'jammy-updates/antelope',
|
||||
'jammy-updates/antelope': 'jammy-updates/antelope',
|
||||
'antelope/proposed': 'jammy-proposed/antelope',
|
||||
'jammy-antelope/proposed': 'jammy-proposed/antelope',
|
||||
'jammy-proposed/antelope': 'jammy-proposed/antelope',
|
||||
# bobcat
|
||||
'bobcat': 'jammy-updates/bobcat',
|
||||
'jammy-bobcat': 'jammy-updates/bobcat',
|
||||
'jammy-bobcat/updates': 'jammy-updates/bobcat',
|
||||
'jammy-updates/bobcat': 'jammy-updates/bobcat',
|
||||
'bobcat/proposed': 'jammy-proposed/bobcat',
|
||||
'jammy-bobcat/proposed': 'jammy-proposed/bobcat',
|
||||
'jammy-proposed/bobcat': 'jammy-proposed/bobcat',
|
||||
# caracal
|
||||
'caracal': 'jammy-updates/caracal',
|
||||
'jammy-caracal': 'jammy-updates/caracal',
|
||||
'jammy-caracal/updates': 'jammy-updates/caracal',
|
||||
'jammy-updates/caracal': 'jammy-updates/caracal',
|
||||
'caracal/proposed': 'jammy-proposed/caracal',
|
||||
'jammy-caracal/proposed': 'jammy-proposed/caracal',
|
||||
'jammy-proposed/caracal': 'jammy-proposed/caracal',
|
||||
# dalmatian
|
||||
'dalmatian': 'noble-updates/dalmatian',
|
||||
'noble-dalmatian': 'noble-updates/dalmatian',
|
||||
'noble-dalmatian/updates': 'noble-updates/dalmatian',
|
||||
'noble-updates/dalmatian': 'noble-updates/dalmatian',
|
||||
'dalmatian/proposed': 'noble-proposed/dalmatian',
|
||||
'noble-dalmatian/proposed': 'noble-proposed/dalmatian',
|
||||
'noble-proposed/dalmatian': 'noble-proposed/dalmatian',
|
||||
# epoxy
|
||||
'epoxy': 'noble-updates/epoxy',
|
||||
'noble-epoxy': 'noble-updates/epoxy',
|
||||
'noble-epoxy/updates': 'noble-updates/epoxy',
|
||||
'noble-updates/epoxy': 'noble-updates/epoxy',
|
||||
'epoxy/proposed': 'noble-proposed/epoxy',
|
||||
'noble-epoxy/proposed': 'noble-proposed/epoxy',
|
||||
'noble-proposed/epoxy': 'noble-proposed/epoxy',
|
||||
|
||||
# OVN
|
||||
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
|
||||
'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03',
|
||||
}
|
||||
|
||||
|
||||
@ -250,6 +300,12 @@ OPENSTACK_RELEASES = (
|
||||
'wallaby',
|
||||
'xena',
|
||||
'yoga',
|
||||
'zed',
|
||||
'antelope',
|
||||
'bobcat',
|
||||
'caracal',
|
||||
'dalmatian',
|
||||
'epoxy',
|
||||
)
|
||||
|
||||
|
||||
@ -276,6 +332,12 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
('hirsute', 'wallaby'),
|
||||
('impish', 'xena'),
|
||||
('jammy', 'yoga'),
|
||||
('kinetic', 'zed'),
|
||||
('lunar', 'antelope'),
|
||||
('mantic', 'bobcat'),
|
||||
('noble', 'caracal'),
|
||||
('oracular', 'dalmatian'),
|
||||
('plucky', 'epoxy'),
|
||||
])
|
||||
|
||||
|
||||
@ -355,13 +417,16 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
|
||||
:type quiet: bool
|
||||
:raises: subprocess.CalledProcessError
|
||||
"""
|
||||
if not packages:
|
||||
log("Nothing to install", level=DEBUG)
|
||||
return
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, six.string_types):
|
||||
if isinstance(packages, str):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
@ -413,7 +478,7 @@ def apt_purge(packages, fatal=False):
|
||||
:raises: subprocess.CalledProcessError
|
||||
"""
|
||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||
if isinstance(packages, six.string_types):
|
||||
if isinstance(packages, str):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
@ -440,7 +505,7 @@ def apt_mark(packages, mark, fatal=False):
|
||||
"""Flag one or more packages using apt-mark."""
|
||||
log("Marking {} as {}".format(packages, mark))
|
||||
cmd = ['apt-mark', mark]
|
||||
if isinstance(packages, six.string_types):
|
||||
if isinstance(packages, str):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
@ -485,10 +550,7 @@ def import_key(key):
|
||||
if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
|
||||
'-----END PGP PUBLIC KEY BLOCK-----' in key):
|
||||
log("Writing provided PGP key in the binary format", level=DEBUG)
|
||||
if six.PY3:
|
||||
key_bytes = key.encode('utf-8')
|
||||
else:
|
||||
key_bytes = key
|
||||
key_bytes = key.encode('utf-8')
|
||||
key_name = _get_keyid_by_gpg_key(key_bytes)
|
||||
key_gpg = _dearmor_gpg_key(key_bytes)
|
||||
_write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
|
||||
@ -528,9 +590,8 @@ def _get_keyid_by_gpg_key(key_material):
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE)
|
||||
out, err = ps.communicate(input=key_material)
|
||||
if six.PY3:
|
||||
out = out.decode('utf-8')
|
||||
err = err.decode('utf-8')
|
||||
out = out.decode('utf-8')
|
||||
err = err.decode('utf-8')
|
||||
if 'gpg: no valid OpenPGP data found.' in err:
|
||||
raise GPGKeyError('Invalid GPG key material provided')
|
||||
# from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
|
||||
@ -570,7 +631,7 @@ def _get_key_by_keyid(keyid):
|
||||
curl_cmd = ['curl', keyserver_url.format(keyid)]
|
||||
# use proxy server settings in order to retrieve the key
|
||||
return subprocess.check_output(curl_cmd,
|
||||
env=env_proxy_settings(['https']))
|
||||
env=env_proxy_settings(['https', 'no_proxy']))
|
||||
|
||||
|
||||
def _dearmor_gpg_key(key_asc):
|
||||
@ -588,8 +649,7 @@ def _dearmor_gpg_key(key_asc):
|
||||
stdin=subprocess.PIPE)
|
||||
out, err = ps.communicate(input=key_asc)
|
||||
# no need to decode output as it is binary (invalid utf-8), only error
|
||||
if six.PY3:
|
||||
err = err.decode('utf-8')
|
||||
err = err.decode('utf-8')
|
||||
if 'gpg: no valid OpenPGP data found.' in err:
|
||||
raise GPGKeyError('Invalid GPG key material. Check your network setup'
|
||||
' (MTU, routing, DNS) and/or proxy server settings'
|
||||
@ -684,6 +744,7 @@ def add_source(source, key=None, fail_invalid=False):
|
||||
(r"^cloud-archive:(.*)$", _add_apt_repository),
|
||||
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
|
||||
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
|
||||
(r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check),
|
||||
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
|
||||
(r"^cloud:(.*)$", _add_cloud_pocket),
|
||||
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
|
||||
@ -693,7 +754,7 @@ def add_source(source, key=None, fail_invalid=False):
|
||||
])
|
||||
if source is None:
|
||||
source = ''
|
||||
for r, fn in six.iteritems(_mapping):
|
||||
for r, fn in _mapping.items():
|
||||
m = re.match(r, source)
|
||||
if m:
|
||||
if key:
|
||||
@ -726,7 +787,7 @@ def _add_proposed():
|
||||
"""
|
||||
release = get_distrib_codename()
|
||||
arch = platform.machine()
|
||||
if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
|
||||
if arch not in ARCH_TO_PROPOSED_POCKET.keys():
|
||||
raise SourceConfigError("Arch {} not supported for (distro-)proposed"
|
||||
.format(arch))
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
@ -747,6 +808,11 @@ def _add_apt_repository(spec):
|
||||
)
|
||||
|
||||
|
||||
def __write_sources_list_d_actual_pocket(file, actual_pocket):
|
||||
with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||
|
||||
|
||||
def _add_cloud_pocket(pocket):
|
||||
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
|
||||
|
||||
@ -766,8 +832,9 @@ def _add_cloud_pocket(pocket):
|
||||
'Unsupported cloud: source option %s' %
|
||||
pocket)
|
||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||
__write_sources_list_d_actual_pocket(
|
||||
'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'),
|
||||
actual_pocket)
|
||||
|
||||
|
||||
def _add_cloud_staging(cloud_archive_release, openstack_release):
|
||||
@ -913,9 +980,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
|
||||
|
||||
kwargs = {}
|
||||
if quiet:
|
||||
devnull = os.devnull if six.PY2 else subprocess.DEVNULL
|
||||
kwargs['stdout'] = devnull
|
||||
kwargs['stderr'] = devnull
|
||||
kwargs['stdout'] = subprocess.DEVNULL
|
||||
kwargs['stderr'] = subprocess.DEVNULL
|
||||
|
||||
if not retry_message:
|
||||
retry_message = "Failed executing '{}'".format(" ".join(cmd))
|
||||
@ -929,10 +995,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
|
||||
try:
|
||||
result = subprocess.check_call(cmd, env=env, **kwargs)
|
||||
except subprocess.CalledProcessError as e:
|
||||
retry_count = retry_count + 1
|
||||
if retry_count > max_retries:
|
||||
raise
|
||||
result = e.returncode
|
||||
if result not in retry_results:
|
||||
# a non-retriable exitcode was produced
|
||||
raise
|
||||
retry_count += 1
|
||||
if retry_count > max_retries:
|
||||
# a retriable exitcode was produced more than {max_retries} times
|
||||
raise
|
||||
log(retry_message)
|
||||
time.sleep(CMD_RETRY_DELAY)
|
||||
|
||||
@ -957,9 +1027,8 @@ def _run_apt_command(cmd, fatal=False, quiet=False):
|
||||
else:
|
||||
kwargs = {}
|
||||
if quiet:
|
||||
devnull = os.devnull if six.PY2 else subprocess.DEVNULL
|
||||
kwargs['stdout'] = devnull
|
||||
kwargs['stderr'] = devnull
|
||||
kwargs['stdout'] = subprocess.DEVNULL
|
||||
kwargs['stderr'] = subprocess.DEVNULL
|
||||
subprocess.call(cmd, env=get_apt_dpkg_env(), **kwargs)
|
||||
|
||||
|
||||
@ -989,7 +1058,7 @@ def get_installed_version(package):
|
||||
Version object
|
||||
"""
|
||||
cache = apt_cache()
|
||||
dpkg_result = cache._dpkg_list([package]).get(package, {})
|
||||
dpkg_result = cache.dpkg_list([package]).get(package, {})
|
||||
current_ver = None
|
||||
installed_version = dpkg_result.get('version')
|
||||
|
||||
|
@ -40,6 +40,9 @@ import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from charmhelpers import deprecate
|
||||
from charmhelpers.core.hookenv import log
|
||||
|
||||
|
||||
class _container(dict):
|
||||
"""Simple container for attributes."""
|
||||
@ -79,7 +82,7 @@ class Cache(object):
|
||||
apt_result = self._apt_cache_show([package])[package]
|
||||
apt_result['name'] = apt_result.pop('package')
|
||||
pkg = Package(apt_result)
|
||||
dpkg_result = self._dpkg_list([package]).get(package, {})
|
||||
dpkg_result = self.dpkg_list([package]).get(package, {})
|
||||
current_ver = None
|
||||
installed_version = dpkg_result.get('version')
|
||||
if installed_version:
|
||||
@ -88,9 +91,29 @@ class Cache(object):
|
||||
pkg.architecture = dpkg_result.get('architecture')
|
||||
return pkg
|
||||
|
||||
@deprecate("use dpkg_list() instead.", "2022-05", log=log)
|
||||
def _dpkg_list(self, packages):
|
||||
return self.dpkg_list(packages)
|
||||
|
||||
def dpkg_list(self, packages):
|
||||
"""Get data from system dpkg database for package.
|
||||
|
||||
Note that this method is also useful for querying package names
|
||||
containing wildcards, for example
|
||||
|
||||
apt_cache().dpkg_list(['nvidia-vgpu-ubuntu-*'])
|
||||
|
||||
may return
|
||||
|
||||
{
|
||||
'nvidia-vgpu-ubuntu-470': {
|
||||
'name': 'nvidia-vgpu-ubuntu-470',
|
||||
'version': '470.68',
|
||||
'architecture': 'amd64',
|
||||
'description': 'NVIDIA vGPU driver - version 470.68'
|
||||
}
|
||||
}
|
||||
|
||||
:param packages: Packages to get data from
|
||||
:type packages: List[str]
|
||||
:returns: Structured data about installed packages, keys like
|
||||
@ -99,13 +122,12 @@ class Cache(object):
|
||||
:raises: subprocess.CalledProcessError
|
||||
"""
|
||||
pkgs = {}
|
||||
cmd = ['dpkg-query', '--list']
|
||||
cmd = [
|
||||
'dpkg-query', '--show',
|
||||
'--showformat',
|
||||
r'${db:Status-Abbrev}\t${Package}\t${Version}\t${Architecture}\t${binary:Summary}\n'
|
||||
]
|
||||
cmd.extend(packages)
|
||||
if locale.getlocale() == (None, None):
|
||||
# subprocess calls out to locale.getpreferredencoding(False) to
|
||||
# determine encoding. Workaround for Trusty where the
|
||||
# environment appears to not be set up correctly.
|
||||
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
|
||||
try:
|
||||
output = subprocess.check_output(cmd,
|
||||
stderr=subprocess.STDOUT,
|
||||
@ -117,24 +139,17 @@ class Cache(object):
|
||||
if cp.returncode != 1:
|
||||
raise
|
||||
output = cp.output
|
||||
headings = []
|
||||
for line in output.splitlines():
|
||||
if line.startswith('||/'):
|
||||
headings = line.split()
|
||||
headings.pop(0)
|
||||
# only process lines for successfully installed packages
|
||||
if not (line.startswith('ii ') or line.startswith('hi ')):
|
||||
continue
|
||||
elif (line.startswith('|') or line.startswith('+') or
|
||||
line.startswith('dpkg-query:')):
|
||||
continue
|
||||
else:
|
||||
data = line.split(None, 4)
|
||||
status = data.pop(0)
|
||||
if status not in ('ii', 'hi'):
|
||||
continue
|
||||
pkg = {}
|
||||
pkg.update({k.lower(): v for k, v in zip(headings, data)})
|
||||
if 'name' in pkg:
|
||||
pkgs.update({pkg['name']: pkg})
|
||||
status, name, version, arch, desc = line.split('\t', 4)
|
||||
pkgs[name] = {
|
||||
'name': name,
|
||||
'version': version,
|
||||
'architecture': arch,
|
||||
'description': desc,
|
||||
}
|
||||
return pkgs
|
||||
|
||||
def _apt_cache_show(self, packages):
|
||||
|
@ -9,19 +9,13 @@ def get_platform():
|
||||
will be returned (which is the name of the module).
|
||||
This string is used to decide which platform module should be imported.
|
||||
"""
|
||||
# linux_distribution is deprecated and will be removed in Python 3.7
|
||||
# Warnings *not* disabled, as we certainly need to fix this.
|
||||
if hasattr(platform, 'linux_distribution'):
|
||||
tuple_platform = platform.linux_distribution()
|
||||
current_platform = tuple_platform[0]
|
||||
else:
|
||||
current_platform = _get_platform_from_fs()
|
||||
current_platform = _get_current_platform()
|
||||
|
||||
if "Ubuntu" in current_platform:
|
||||
return "ubuntu"
|
||||
elif "CentOS" in current_platform:
|
||||
return "centos"
|
||||
elif "debian" in current_platform:
|
||||
elif "debian" in current_platform or "Debian" in current_platform:
|
||||
# Stock Python does not detect Ubuntu and instead returns debian.
|
||||
# Or at least it does in some build environments like Travis CI
|
||||
return "ubuntu"
|
||||
@ -36,6 +30,24 @@ def get_platform():
|
||||
.format(current_platform))
|
||||
|
||||
|
||||
def _get_current_platform():
|
||||
"""Return the current platform information for the OS.
|
||||
|
||||
Attempts to lookup linux distribution information from the platform
|
||||
module for releases of python < 3.7. For newer versions of python,
|
||||
the platform is determined from the /etc/os-release file.
|
||||
"""
|
||||
# linux_distribution is deprecated and will be removed in Python 3.7
|
||||
# Warnings *not* disabled, as we certainly need to fix this.
|
||||
if hasattr(platform, 'linux_distribution'):
|
||||
tuple_platform = platform.linux_distribution()
|
||||
current_platform = tuple_platform[0]
|
||||
else:
|
||||
current_platform = _get_platform_from_fs()
|
||||
|
||||
return current_platform
|
||||
|
||||
|
||||
def _get_platform_from_fs():
|
||||
"""Get Platform from /etc/os-release."""
|
||||
with open(os.path.join(os.sep, 'etc', 'os-release')) as fin:
|
||||
|
Loading…
Reference in New Issue
Block a user