Add support for application version
Juju 2.0 provides support for display of the version of an application deployed by a charm in juju status. Insert the os_application_version_set function into the existing assess_status function - this gets called after all hook executions, and periodically after that, so any changes in package versions due to normal system updates will also be reflected in the status output. This review also includes a resync of charm-helpers to pickup hookenv and contrib.openstack support for this feature. Change-Id: I3cb8ac7b2f1b455a3c3086544c492500812363c6
This commit is contained in:
parent
ca8fe0b81f
commit
43c436f7a8
@ -1,7 +1,8 @@
|
|||||||
branch: lp:charm-helpers
|
branch: lp:charm-helpers
|
||||||
destination: hooks/charmhelpers
|
destination: hooks/charmhelpers
|
||||||
include:
|
include:
|
||||||
- core
|
- core
|
||||||
|
- osplatform
|
||||||
- cli
|
- cli
|
||||||
- fetch
|
- fetch
|
||||||
- contrib.openstack|inc=*
|
- contrib.openstack|inc=*
|
||||||
|
@ -38,6 +38,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import service
|
from charmhelpers.core.host import service
|
||||||
|
from charmhelpers.core import host
|
||||||
|
|
||||||
# This module adds compatibility with the nrpe-external-master and plain nrpe
|
# This module adds compatibility with the nrpe-external-master and plain nrpe
|
||||||
# subordinate charms. To use it in your charm:
|
# subordinate charms. To use it in your charm:
|
||||||
@ -108,6 +109,13 @@ from charmhelpers.core.host import service
|
|||||||
# def local_monitors_relation_changed():
|
# def local_monitors_relation_changed():
|
||||||
# update_nrpe_config()
|
# update_nrpe_config()
|
||||||
#
|
#
|
||||||
|
# 4.a If your charm is a subordinate charm set primary=False
|
||||||
|
#
|
||||||
|
# from charmsupport.nrpe import NRPE
|
||||||
|
# (...)
|
||||||
|
# def update_nrpe_config():
|
||||||
|
# nrpe_compat = NRPE(primary=False)
|
||||||
|
#
|
||||||
# 5. ln -s hooks.py nrpe-external-master-relation-changed
|
# 5. ln -s hooks.py nrpe-external-master-relation-changed
|
||||||
# ln -s hooks.py local-monitors-relation-changed
|
# ln -s hooks.py local-monitors-relation-changed
|
||||||
|
|
||||||
@ -220,9 +228,10 @@ class NRPE(object):
|
|||||||
nagios_exportdir = '/var/lib/nagios/export'
|
nagios_exportdir = '/var/lib/nagios/export'
|
||||||
nrpe_confdir = '/etc/nagios/nrpe.d'
|
nrpe_confdir = '/etc/nagios/nrpe.d'
|
||||||
|
|
||||||
def __init__(self, hostname=None):
|
def __init__(self, hostname=None, primary=True):
|
||||||
super(NRPE, self).__init__()
|
super(NRPE, self).__init__()
|
||||||
self.config = config()
|
self.config = config()
|
||||||
|
self.primary = primary
|
||||||
self.nagios_context = self.config['nagios_context']
|
self.nagios_context = self.config['nagios_context']
|
||||||
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
|
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
|
||||||
self.nagios_servicegroups = self.config['nagios_servicegroups']
|
self.nagios_servicegroups = self.config['nagios_servicegroups']
|
||||||
@ -238,6 +247,12 @@ class NRPE(object):
|
|||||||
else:
|
else:
|
||||||
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
||||||
self.checks = []
|
self.checks = []
|
||||||
|
# Iff in an nrpe-external-master relation hook, set primary status
|
||||||
|
relation = relation_ids('nrpe-external-master')
|
||||||
|
if relation:
|
||||||
|
log("Setting charm primary status {}".format(primary))
|
||||||
|
for rid in relation_ids('nrpe-external-master'):
|
||||||
|
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
|
||||||
|
|
||||||
def add_check(self, *args, **kwargs):
|
def add_check(self, *args, **kwargs):
|
||||||
self.checks.append(Check(*args, **kwargs))
|
self.checks.append(Check(*args, **kwargs))
|
||||||
@ -332,16 +347,25 @@ def add_init_service_checks(nrpe, services, unit_name):
|
|||||||
:param str unit_name: Unit name to use in check description
|
:param str unit_name: Unit name to use in check description
|
||||||
"""
|
"""
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
# Don't add a check for these services from neutron-gateway
|
||||||
|
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
||||||
|
next
|
||||||
|
|
||||||
upstart_init = '/etc/init/%s.conf' % svc
|
upstart_init = '/etc/init/%s.conf' % svc
|
||||||
sysv_init = '/etc/init.d/%s' % svc
|
sysv_init = '/etc/init.d/%s' % svc
|
||||||
if os.path.exists(upstart_init):
|
|
||||||
# Don't add a check for these services from neutron-gateway
|
if host.init_is_systemd():
|
||||||
if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
|
nrpe.add_check(
|
||||||
nrpe.add_check(
|
shortname=svc,
|
||||||
shortname=svc,
|
description='process check {%s}' % unit_name,
|
||||||
description='process check {%s}' % unit_name,
|
check_cmd='check_systemd.py %s' % svc
|
||||||
check_cmd='check_upstart_job %s' % svc
|
)
|
||||||
)
|
elif os.path.exists(upstart_init):
|
||||||
|
nrpe.add_check(
|
||||||
|
shortname=svc,
|
||||||
|
description='process check {%s}' % unit_name,
|
||||||
|
check_cmd='check_upstart_job %s' % svc
|
||||||
|
)
|
||||||
elif os.path.exists(sysv_init):
|
elif os.path.exists(sysv_init):
|
||||||
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
||||||
cron_file = ('*/5 * * * * root '
|
cron_file = ('*/5 * * * * root '
|
||||||
|
@ -83,6 +83,56 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if not found:
|
if not found:
|
||||||
return 'endpoint not found'
|
return 'endpoint not found'
|
||||||
|
|
||||||
|
def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
|
public_port, expected):
|
||||||
|
"""Validate keystone v3 endpoint data.
|
||||||
|
|
||||||
|
Validate the v3 endpoint data which has changed from v2. The
|
||||||
|
ports are used to find the matching endpoint.
|
||||||
|
|
||||||
|
The new v3 endpoint data looks like:
|
||||||
|
|
||||||
|
[<Endpoint enabled=True,
|
||||||
|
id=0432655fc2f74d1e9fa17bdaa6f6e60b,
|
||||||
|
interface=admin,
|
||||||
|
links={u'self': u'<RESTful URL of this endpoint>'},
|
||||||
|
region=RegionOne,
|
||||||
|
region_id=RegionOne,
|
||||||
|
service_id=17f842a0dc084b928e476fafe67e4095,
|
||||||
|
url=http://10.5.6.5:9312>,
|
||||||
|
<Endpoint enabled=True,
|
||||||
|
id=6536cb6cb92f4f41bf22b079935c7707,
|
||||||
|
interface=admin,
|
||||||
|
links={u'self': u'<RESTful url of this endpoint>'},
|
||||||
|
region=RegionOne,
|
||||||
|
region_id=RegionOne,
|
||||||
|
service_id=72fc8736fb41435e8b3584205bb2cfa3,
|
||||||
|
url=http://10.5.6.6:35357/v3>,
|
||||||
|
... ]
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating v3 endpoint data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(endpoints)))
|
||||||
|
found = []
|
||||||
|
for ep in endpoints:
|
||||||
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
|
if ((admin_port in ep.url and ep.interface == 'admin') or
|
||||||
|
(internal_port in ep.url and ep.interface == 'internal') or
|
||||||
|
(public_port in ep.url and ep.interface == 'public')):
|
||||||
|
found.append(ep.interface)
|
||||||
|
# note we ignore the links member.
|
||||||
|
actual = {'id': ep.id,
|
||||||
|
'region': ep.region,
|
||||||
|
'region_id': ep.region_id,
|
||||||
|
'interface': self.not_null,
|
||||||
|
'url': ep.url,
|
||||||
|
'service_id': ep.service_id, }
|
||||||
|
ret = self._validate_dict_data(expected, actual)
|
||||||
|
if ret:
|
||||||
|
return 'unexpected endpoint data - {}'.format(ret)
|
||||||
|
|
||||||
|
if len(found) != 3:
|
||||||
|
return 'Unexpected number of endpoints found'
|
||||||
|
|
||||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
"""Validate service catalog endpoint data.
|
"""Validate service catalog endpoint data.
|
||||||
|
|
||||||
@ -100,6 +150,72 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return "endpoint {} does not exist".format(k)
|
return "endpoint {} does not exist".format(k)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
|
"""Validate the keystone v3 catalog endpoint data.
|
||||||
|
|
||||||
|
Validate a list of dictinaries that make up the keystone v3 service
|
||||||
|
catalogue.
|
||||||
|
|
||||||
|
It is in the form of:
|
||||||
|
|
||||||
|
|
||||||
|
{u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
|
||||||
|
u'interface': u'admin',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.224:35357/v3'},
|
||||||
|
{u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
|
||||||
|
u'interface': u'public',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.224:5000/v3'},
|
||||||
|
{u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
|
||||||
|
u'interface': u'internal',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.224:5000/v3'}],
|
||||||
|
u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
|
||||||
|
u'interface': u'public',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.223:9311'},
|
||||||
|
{u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
|
||||||
|
u'interface': u'internal',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.223:9311'},
|
||||||
|
{u'id': u'f629388955bc407f8b11d8b7ca168086',
|
||||||
|
u'interface': u'admin',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.223:9312'}]}
|
||||||
|
|
||||||
|
Note, that an added complication is that the order of admin, public,
|
||||||
|
internal against 'interface' in each region.
|
||||||
|
|
||||||
|
Thus, the function sorts the expected and actual lists using the
|
||||||
|
interface key as a sort key, prior to the comparison.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating v3 service catalog endpoint data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for k, v in six.iteritems(expected):
|
||||||
|
if k in actual:
|
||||||
|
l_expected = sorted(v, key=lambda x: x['interface'])
|
||||||
|
l_actual = sorted(actual[k], key=lambda x: x['interface'])
|
||||||
|
if len(l_actual) != len(l_expected):
|
||||||
|
return ("endpoint {} has differing number of interfaces "
|
||||||
|
" - expected({}), actual({})"
|
||||||
|
.format(k, len(l_expected), len(l_actual)))
|
||||||
|
for i_expected, i_actual in zip(l_expected, l_actual):
|
||||||
|
self.log.debug("checking interface {}"
|
||||||
|
.format(i_expected['interface']))
|
||||||
|
ret = self._validate_dict_data(i_expected, i_actual)
|
||||||
|
if ret:
|
||||||
|
return self.endpoint_error(k, ret)
|
||||||
|
else:
|
||||||
|
return "endpoint {} does not exist".format(k)
|
||||||
|
return ret
|
||||||
|
|
||||||
def validate_tenant_data(self, expected, actual):
|
def validate_tenant_data(self, expected, actual):
|
||||||
"""Validate tenant data.
|
"""Validate tenant data.
|
||||||
|
|
||||||
@ -928,7 +1044,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
retry_delay=5,
|
retry_delay=5,
|
||||||
socket_timeout=1)
|
socket_timeout=1)
|
||||||
connection = pika.BlockingConnection(parameters)
|
connection = pika.BlockingConnection(parameters)
|
||||||
assert connection.server_properties['product'] == 'RabbitMQ'
|
assert connection.is_open is True
|
||||||
|
assert connection.is_closing is False
|
||||||
self.log.debug('Connect OK')
|
self.log.debug('Connect OK')
|
||||||
return connection
|
return connection
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -245,6 +245,10 @@ def neutron_plugins():
|
|||||||
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
|
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
|
||||||
plugins['plumgrid']['server_packages'].remove(
|
plugins['plumgrid']['server_packages'].remove(
|
||||||
'neutron-plugin-plumgrid')
|
'neutron-plugin-plumgrid')
|
||||||
|
if release >= 'mitaka':
|
||||||
|
plugins['nsx']['server_packages'].remove('neutron-plugin-vmware')
|
||||||
|
plugins['nsx']['server_packages'].append('python-vmware-nsx')
|
||||||
|
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
|
||||||
return plugins
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,7 +51,8 @@ from charmhelpers.core.hookenv import (
|
|||||||
relation_set,
|
relation_set,
|
||||||
service_name,
|
service_name,
|
||||||
status_set,
|
status_set,
|
||||||
hook_name
|
hook_name,
|
||||||
|
application_version_set,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.lvm import (
|
from charmhelpers.contrib.storage.linux.lvm import (
|
||||||
@ -1889,3 +1890,29 @@ def config_flags_parser(config_flags):
|
|||||||
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
|
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
|
||||||
|
|
||||||
return flags
|
return flags
|
||||||
|
|
||||||
|
|
||||||
|
def os_application_version_set(package):
|
||||||
|
'''Set version of application for Juju 2.0 and later'''
|
||||||
|
import apt_pkg as apt
|
||||||
|
cache = apt_cache()
|
||||||
|
application_version = None
|
||||||
|
application_codename = os_release(package)
|
||||||
|
|
||||||
|
try:
|
||||||
|
pkg = cache[package]
|
||||||
|
if not pkg.current_ver:
|
||||||
|
juju_log('Package {} is not currently installed.'.format(package),
|
||||||
|
DEBUG)
|
||||||
|
else:
|
||||||
|
application_version = apt.upstream_version(pkg.current_ver.ver_str)
|
||||||
|
except:
|
||||||
|
juju_log('Package {} has no installation candidate.'.format(package),
|
||||||
|
DEBUG)
|
||||||
|
|
||||||
|
# NOTE(jamespage) if not able to figure out package version, fallback to
|
||||||
|
# openstack codename version detection.
|
||||||
|
if not application_version:
|
||||||
|
application_version_set(application_codename)
|
||||||
|
else:
|
||||||
|
application_version_set(application_version)
|
||||||
|
@ -87,6 +87,7 @@ clog to syslog = {use_syslog}
|
|||||||
DEFAULT_PGS_PER_OSD_TARGET = 100
|
DEFAULT_PGS_PER_OSD_TARGET = 100
|
||||||
DEFAULT_POOL_WEIGHT = 10.0
|
DEFAULT_POOL_WEIGHT = 10.0
|
||||||
LEGACY_PG_COUNT = 200
|
LEGACY_PG_COUNT = 200
|
||||||
|
DEFAULT_MINIMUM_PGS = 2
|
||||||
|
|
||||||
|
|
||||||
def validator(value, valid_type, valid_range=None):
|
def validator(value, valid_type, valid_range=None):
|
||||||
@ -266,6 +267,11 @@ class Pool(object):
|
|||||||
target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
|
target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
|
||||||
num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
|
num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
|
||||||
|
|
||||||
|
# NOTE: ensure a sane minimum number of PGS otherwise we don't get any
|
||||||
|
# reasonable data distribution in minimal OSD configurations
|
||||||
|
if num_pg < DEFAULT_MINIMUM_PGS:
|
||||||
|
num_pg = DEFAULT_MINIMUM_PGS
|
||||||
|
|
||||||
# The CRUSH algorithm has a slight optimization for placement groups
|
# The CRUSH algorithm has a slight optimization for placement groups
|
||||||
# with powers of 2 so find the nearest power of 2. If the nearest
|
# with powers of 2 so find the nearest power of 2. If the nearest
|
||||||
# power of 2 is more than 25% below the original value, the next
|
# power of 2 is more than 25% below the original value, the next
|
||||||
|
@ -843,6 +843,20 @@ def translate_exc(from_exc, to_exc):
|
|||||||
return inner_translate_exc1
|
return inner_translate_exc1
|
||||||
|
|
||||||
|
|
||||||
|
def application_version_set(version):
|
||||||
|
"""Charm authors may trigger this command from any hook to output what
|
||||||
|
version of the application is running. This could be a package version,
|
||||||
|
for instance postgres version 9.5. It could also be a build number or
|
||||||
|
version control revision identifier, for instance git sha 6fb7ba68. """
|
||||||
|
|
||||||
|
cmd = ['application-version-set']
|
||||||
|
cmd.append(version)
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
except OSError:
|
||||||
|
log("Application Version: {}".format(version))
|
||||||
|
|
||||||
|
|
||||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
def is_leader():
|
def is_leader():
|
||||||
"""Does the current unit hold the juju leadership
|
"""Does the current unit hold the juju leadership
|
||||||
|
@ -30,13 +30,29 @@ import subprocess
|
|||||||
import hashlib
|
import hashlib
|
||||||
import functools
|
import functools
|
||||||
import itertools
|
import itertools
|
||||||
from contextlib import contextmanager
|
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from collections import OrderedDict
|
||||||
from .hookenv import log
|
from .hookenv import log
|
||||||
from .fstab import Fstab
|
from .fstab import Fstab
|
||||||
|
from charmhelpers.osplatform import get_platform
|
||||||
|
|
||||||
|
__platform__ = get_platform()
|
||||||
|
if __platform__ == "ubuntu":
|
||||||
|
from charmhelpers.core.host_factory.ubuntu import (
|
||||||
|
service_available,
|
||||||
|
add_new_group,
|
||||||
|
lsb_release,
|
||||||
|
cmp_pkgrevno,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
elif __platform__ == "centos":
|
||||||
|
from charmhelpers.core.host_factory.centos import (
|
||||||
|
service_available,
|
||||||
|
add_new_group,
|
||||||
|
lsb_release,
|
||||||
|
cmp_pkgrevno,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
|
||||||
|
|
||||||
def service_start(service_name):
|
def service_start(service_name):
|
||||||
@ -144,8 +160,11 @@ def service_running(service_name):
|
|||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
# This works for upstart scripts where the 'service' command
|
# This works for upstart scripts where the 'service' command
|
||||||
# returns a consistent string to represent running 'start/running'
|
# returns a consistent string to represent running
|
||||||
if "start/running" in output:
|
# 'start/running'
|
||||||
|
if ("start/running" in output or
|
||||||
|
"is running" in output or
|
||||||
|
"up and running" in output):
|
||||||
return True
|
return True
|
||||||
elif os.path.exists(_INIT_D_CONF.format(service_name)):
|
elif os.path.exists(_INIT_D_CONF.format(service_name)):
|
||||||
# Check System V scripts init script return codes
|
# Check System V scripts init script return codes
|
||||||
@ -153,18 +172,6 @@ def service_running(service_name):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def service_available(service_name):
|
|
||||||
"""Determine whether a system service is available"""
|
|
||||||
try:
|
|
||||||
subprocess.check_output(
|
|
||||||
['service', service_name, 'status'],
|
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
return b'unrecognized service' not in e.output
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
SYSTEMD_SYSTEM = '/run/systemd/system'
|
SYSTEMD_SYSTEM = '/run/systemd/system'
|
||||||
|
|
||||||
|
|
||||||
@ -173,8 +180,9 @@ def init_is_systemd():
|
|||||||
return os.path.isdir(SYSTEMD_SYSTEM)
|
return os.path.isdir(SYSTEMD_SYSTEM)
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False,
|
def adduser(username, password=None, shell='/bin/bash',
|
||||||
primary_group=None, secondary_groups=None, uid=None, home_dir=None):
|
system_user=False, primary_group=None,
|
||||||
|
secondary_groups=None, uid=None, home_dir=None):
|
||||||
"""Add a user to the system.
|
"""Add a user to the system.
|
||||||
|
|
||||||
Will log but otherwise succeed if the user already exists.
|
Will log but otherwise succeed if the user already exists.
|
||||||
@ -286,17 +294,7 @@ def add_group(group_name, system_group=False, gid=None):
|
|||||||
log('group with gid {0} already exists!'.format(gid))
|
log('group with gid {0} already exists!'.format(gid))
|
||||||
except KeyError:
|
except KeyError:
|
||||||
log('creating group {0}'.format(group_name))
|
log('creating group {0}'.format(group_name))
|
||||||
cmd = ['addgroup']
|
add_new_group(group_name, system_group, gid)
|
||||||
if gid:
|
|
||||||
cmd.extend(['--gid', str(gid)])
|
|
||||||
if system_group:
|
|
||||||
cmd.append('--system')
|
|
||||||
else:
|
|
||||||
cmd.extend([
|
|
||||||
'--group',
|
|
||||||
])
|
|
||||||
cmd.append(group_name)
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
group_info = grp.getgrnam(group_name)
|
group_info = grp.getgrnam(group_name)
|
||||||
return group_info
|
return group_info
|
||||||
|
|
||||||
@ -541,16 +539,6 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
||||||
def lsb_release():
|
|
||||||
"""Return /etc/lsb-release in a dict"""
|
|
||||||
d = {}
|
|
||||||
with open('/etc/lsb-release', 'r') as lsb:
|
|
||||||
for l in lsb:
|
|
||||||
k, v = l.split('=')
|
|
||||||
d[k.strip()] = v.strip()
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def pwgen(length=None):
|
def pwgen(length=None):
|
||||||
"""Generate a random pasword."""
|
"""Generate a random pasword."""
|
||||||
if length is None:
|
if length is None:
|
||||||
@ -674,25 +662,6 @@ def get_nic_hwaddr(nic):
|
|||||||
return hwaddr
|
return hwaddr
|
||||||
|
|
||||||
|
|
||||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
|
||||||
"""Compare supplied revno with the revno of the installed package
|
|
||||||
|
|
||||||
* 1 => Installed revno is greater than supplied arg
|
|
||||||
* 0 => Installed revno is the same as supplied arg
|
|
||||||
* -1 => Installed revno is less than supplied arg
|
|
||||||
|
|
||||||
This function imports apt_cache function from charmhelpers.fetch if
|
|
||||||
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
|
||||||
you call this function, or pass an apt_pkg.Cache() instance.
|
|
||||||
"""
|
|
||||||
import apt_pkg
|
|
||||||
if not pkgcache:
|
|
||||||
from charmhelpers.fetch import apt_cache
|
|
||||||
pkgcache = apt_cache()
|
|
||||||
pkg = pkgcache[package]
|
|
||||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def chdir(directory):
|
def chdir(directory):
|
||||||
"""Change the current working directory to a different directory for a code
|
"""Change the current working directory to a different directory for a code
|
||||||
|
0
hooks/charmhelpers/core/host_factory/__init__.py
Normal file
0
hooks/charmhelpers/core/host_factory/__init__.py
Normal file
56
hooks/charmhelpers/core/host_factory/centos.py
Normal file
56
hooks/charmhelpers/core/host_factory/centos.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
import subprocess
|
||||||
|
import yum
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
# """Determine whether a system service is available."""
|
||||||
|
if os.path.isdir('/run/systemd/system'):
|
||||||
|
cmd = ['systemctl', 'is-enabled', service_name]
|
||||||
|
else:
|
||||||
|
cmd = ['service', service_name, 'is-enabled']
|
||||||
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def add_new_group(group_name, system_group=False, gid=None):
|
||||||
|
cmd = ['groupadd']
|
||||||
|
if gid:
|
||||||
|
cmd.extend(['--gid', str(gid)])
|
||||||
|
if system_group:
|
||||||
|
cmd.append('-r')
|
||||||
|
cmd.append(group_name)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsb_release():
|
||||||
|
"""Return /etc/os-release in a dict."""
|
||||||
|
d = {}
|
||||||
|
with open('/etc/os-release', 'r') as lsb:
|
||||||
|
for l in lsb:
|
||||||
|
s = l.split('=')
|
||||||
|
if len(s) != 2:
|
||||||
|
continue
|
||||||
|
d[s[0].strip()] = s[1].strip()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
|
"""Compare supplied revno with the revno of the installed package.
|
||||||
|
|
||||||
|
* 1 => Installed revno is greater than supplied arg
|
||||||
|
* 0 => Installed revno is the same as supplied arg
|
||||||
|
* -1 => Installed revno is less than supplied arg
|
||||||
|
|
||||||
|
This function imports YumBase function if the pkgcache argument
|
||||||
|
is None.
|
||||||
|
"""
|
||||||
|
if not pkgcache:
|
||||||
|
y = yum.YumBase()
|
||||||
|
packages = y.doPackageLists()
|
||||||
|
pkgcache = {i.Name: i.version for i in packages['installed']}
|
||||||
|
pkg = pkgcache[package]
|
||||||
|
if pkg > revno:
|
||||||
|
return 1
|
||||||
|
if pkg < revno:
|
||||||
|
return -1
|
||||||
|
return 0
|
56
hooks/charmhelpers/core/host_factory/ubuntu.py
Normal file
56
hooks/charmhelpers/core/host_factory/ubuntu.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
"""Determine whether a system service is available"""
|
||||||
|
try:
|
||||||
|
subprocess.check_output(
|
||||||
|
['service', service_name, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
return b'unrecognized service' not in e.output
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def add_new_group(group_name, system_group=False, gid=None):
|
||||||
|
cmd = ['addgroup']
|
||||||
|
if gid:
|
||||||
|
cmd.extend(['--gid', str(gid)])
|
||||||
|
if system_group:
|
||||||
|
cmd.append('--system')
|
||||||
|
else:
|
||||||
|
cmd.extend([
|
||||||
|
'--group',
|
||||||
|
])
|
||||||
|
cmd.append(group_name)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsb_release():
|
||||||
|
"""Return /etc/lsb-release in a dict"""
|
||||||
|
d = {}
|
||||||
|
with open('/etc/lsb-release', 'r') as lsb:
|
||||||
|
for l in lsb:
|
||||||
|
k, v = l.split('=')
|
||||||
|
d[k.strip()] = v.strip()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
|
"""Compare supplied revno with the revno of the installed package.
|
||||||
|
|
||||||
|
* 1 => Installed revno is greater than supplied arg
|
||||||
|
* 0 => Installed revno is the same as supplied arg
|
||||||
|
* -1 => Installed revno is less than supplied arg
|
||||||
|
|
||||||
|
This function imports apt_cache function from charmhelpers.fetch if
|
||||||
|
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||||
|
you call this function, or pass an apt_pkg.Cache() instance.
|
||||||
|
"""
|
||||||
|
import apt_pkg
|
||||||
|
if not pkgcache:
|
||||||
|
from charmhelpers.fetch import apt_cache
|
||||||
|
pkgcache = apt_cache()
|
||||||
|
pkg = pkgcache[package]
|
||||||
|
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
@ -15,15 +15,28 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.osplatform import get_platform
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
log,
|
log,
|
||||||
INFO
|
INFO
|
||||||
)
|
)
|
||||||
|
|
||||||
from subprocess import check_call, check_output
|
__platform__ = get_platform()
|
||||||
import re
|
if __platform__ == "ubuntu":
|
||||||
|
from charmhelpers.core.kernel_factory.ubuntu import (
|
||||||
|
persistent_modprobe,
|
||||||
|
update_initramfs,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
elif __platform__ == "centos":
|
||||||
|
from charmhelpers.core.kernel_factory.centos import (
|
||||||
|
persistent_modprobe,
|
||||||
|
update_initramfs,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
|
||||||
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
|
||||||
def modprobe(module, persist=True):
|
def modprobe(module, persist=True):
|
||||||
@ -32,11 +45,9 @@ def modprobe(module, persist=True):
|
|||||||
|
|
||||||
log('Loading kernel module %s' % module, level=INFO)
|
log('Loading kernel module %s' % module, level=INFO)
|
||||||
|
|
||||||
check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
if persist:
|
if persist:
|
||||||
with open('/etc/modules', 'r+') as modules:
|
persistent_modprobe(module)
|
||||||
if module not in modules.read():
|
|
||||||
modules.write(module)
|
|
||||||
|
|
||||||
|
|
||||||
def rmmod(module, force=False):
|
def rmmod(module, force=False):
|
||||||
@ -46,21 +57,16 @@ def rmmod(module, force=False):
|
|||||||
cmd.append('-f')
|
cmd.append('-f')
|
||||||
cmd.append(module)
|
cmd.append(module)
|
||||||
log('Removing kernel module %s' % module, level=INFO)
|
log('Removing kernel module %s' % module, level=INFO)
|
||||||
return check_call(cmd)
|
return subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def lsmod():
|
def lsmod():
|
||||||
"""Shows what kernel modules are currently loaded"""
|
"""Shows what kernel modules are currently loaded"""
|
||||||
return check_output(['lsmod'],
|
return subprocess.check_output(['lsmod'],
|
||||||
universal_newlines=True)
|
universal_newlines=True)
|
||||||
|
|
||||||
|
|
||||||
def is_module_loaded(module):
|
def is_module_loaded(module):
|
||||||
"""Checks if a kernel module is already loaded"""
|
"""Checks if a kernel module is already loaded"""
|
||||||
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
|
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
|
||||||
return len(matches) > 0
|
return len(matches) > 0
|
||||||
|
|
||||||
|
|
||||||
def update_initramfs(version='all'):
|
|
||||||
"""Updates an initramfs image"""
|
|
||||||
return check_call(["update-initramfs", "-k", version, "-u"])
|
|
||||||
|
0
hooks/charmhelpers/core/kernel_factory/__init__.py
Normal file
0
hooks/charmhelpers/core/kernel_factory/__init__.py
Normal file
17
hooks/charmhelpers/core/kernel_factory/centos.py
Normal file
17
hooks/charmhelpers/core/kernel_factory/centos.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def persistent_modprobe(module):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
if not os.path.exists('/etc/rc.modules'):
|
||||||
|
open('/etc/rc.modules', 'a')
|
||||||
|
os.chmod('/etc/rc.modules', 111)
|
||||||
|
with open('/etc/rc.modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write('modprobe %s\n' % module)
|
||||||
|
|
||||||
|
|
||||||
|
def update_initramfs(version='all'):
|
||||||
|
"""Updates an initramfs image."""
|
||||||
|
return subprocess.check_call(["dracut", "-f", version])
|
13
hooks/charmhelpers/core/kernel_factory/ubuntu.py
Normal file
13
hooks/charmhelpers/core/kernel_factory/ubuntu.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def persistent_modprobe(module):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
with open('/etc/modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write(module)
|
||||||
|
|
||||||
|
|
||||||
|
def update_initramfs(version='all'):
|
||||||
|
"""Updates an initramfs image."""
|
||||||
|
return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
|
@ -13,18 +13,12 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
from tempfile import NamedTemporaryFile
|
from charmhelpers.osplatform import get_platform
|
||||||
import time
|
|
||||||
from yaml import safe_load
|
from yaml import safe_load
|
||||||
from charmhelpers.core.host import (
|
|
||||||
lsb_release
|
|
||||||
)
|
|
||||||
import subprocess
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
log,
|
log,
|
||||||
)
|
)
|
||||||
import os
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
@ -33,87 +27,6 @@ else:
|
|||||||
from urlparse import urlparse, urlunparse
|
from urlparse import urlparse, urlunparse
|
||||||
|
|
||||||
|
|
||||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
|
||||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
|
||||||
"""
|
|
||||||
PROPOSED_POCKET = """# Proposed
|
|
||||||
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
|
||||||
"""
|
|
||||||
CLOUD_ARCHIVE_POCKETS = {
|
|
||||||
# Folsom
|
|
||||||
'folsom': 'precise-updates/folsom',
|
|
||||||
'precise-folsom': 'precise-updates/folsom',
|
|
||||||
'precise-folsom/updates': 'precise-updates/folsom',
|
|
||||||
'precise-updates/folsom': 'precise-updates/folsom',
|
|
||||||
'folsom/proposed': 'precise-proposed/folsom',
|
|
||||||
'precise-folsom/proposed': 'precise-proposed/folsom',
|
|
||||||
'precise-proposed/folsom': 'precise-proposed/folsom',
|
|
||||||
# Grizzly
|
|
||||||
'grizzly': 'precise-updates/grizzly',
|
|
||||||
'precise-grizzly': 'precise-updates/grizzly',
|
|
||||||
'precise-grizzly/updates': 'precise-updates/grizzly',
|
|
||||||
'precise-updates/grizzly': 'precise-updates/grizzly',
|
|
||||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
|
||||||
'precise-grizzly/proposed': 'precise-proposed/grizzly',
|
|
||||||
'precise-proposed/grizzly': 'precise-proposed/grizzly',
|
|
||||||
# Havana
|
|
||||||
'havana': 'precise-updates/havana',
|
|
||||||
'precise-havana': 'precise-updates/havana',
|
|
||||||
'precise-havana/updates': 'precise-updates/havana',
|
|
||||||
'precise-updates/havana': 'precise-updates/havana',
|
|
||||||
'havana/proposed': 'precise-proposed/havana',
|
|
||||||
'precise-havana/proposed': 'precise-proposed/havana',
|
|
||||||
'precise-proposed/havana': 'precise-proposed/havana',
|
|
||||||
# Icehouse
|
|
||||||
'icehouse': 'precise-updates/icehouse',
|
|
||||||
'precise-icehouse': 'precise-updates/icehouse',
|
|
||||||
'precise-icehouse/updates': 'precise-updates/icehouse',
|
|
||||||
'precise-updates/icehouse': 'precise-updates/icehouse',
|
|
||||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
|
||||||
'precise-icehouse/proposed': 'precise-proposed/icehouse',
|
|
||||||
'precise-proposed/icehouse': 'precise-proposed/icehouse',
|
|
||||||
# Juno
|
|
||||||
'juno': 'trusty-updates/juno',
|
|
||||||
'trusty-juno': 'trusty-updates/juno',
|
|
||||||
'trusty-juno/updates': 'trusty-updates/juno',
|
|
||||||
'trusty-updates/juno': 'trusty-updates/juno',
|
|
||||||
'juno/proposed': 'trusty-proposed/juno',
|
|
||||||
'trusty-juno/proposed': 'trusty-proposed/juno',
|
|
||||||
'trusty-proposed/juno': 'trusty-proposed/juno',
|
|
||||||
# Kilo
|
|
||||||
'kilo': 'trusty-updates/kilo',
|
|
||||||
'trusty-kilo': 'trusty-updates/kilo',
|
|
||||||
'trusty-kilo/updates': 'trusty-updates/kilo',
|
|
||||||
'trusty-updates/kilo': 'trusty-updates/kilo',
|
|
||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
|
||||||
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
|
||||||
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
|
||||||
# Liberty
|
|
||||||
'liberty': 'trusty-updates/liberty',
|
|
||||||
'trusty-liberty': 'trusty-updates/liberty',
|
|
||||||
'trusty-liberty/updates': 'trusty-updates/liberty',
|
|
||||||
'trusty-updates/liberty': 'trusty-updates/liberty',
|
|
||||||
'liberty/proposed': 'trusty-proposed/liberty',
|
|
||||||
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
|
||||||
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
|
||||||
# Mitaka
|
|
||||||
'mitaka': 'trusty-updates/mitaka',
|
|
||||||
'trusty-mitaka': 'trusty-updates/mitaka',
|
|
||||||
'trusty-mitaka/updates': 'trusty-updates/mitaka',
|
|
||||||
'trusty-updates/mitaka': 'trusty-updates/mitaka',
|
|
||||||
'mitaka/proposed': 'trusty-proposed/mitaka',
|
|
||||||
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
|
|
||||||
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
|
|
||||||
# Newton
|
|
||||||
'newton': 'xenial-updates/newton',
|
|
||||||
'xenial-newton': 'xenial-updates/newton',
|
|
||||||
'xenial-newton/updates': 'xenial-updates/newton',
|
|
||||||
'xenial-updates/newton': 'xenial-updates/newton',
|
|
||||||
'newton/proposed': 'xenial-proposed/newton',
|
|
||||||
'xenial-newton/proposed': 'xenial-proposed/newton',
|
|
||||||
'xenial-proposed/newton': 'xenial-proposed/newton',
|
|
||||||
}
|
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
# least- to most-specific URL matching.
|
# least- to most-specific URL matching.
|
||||||
FETCH_HANDLERS = (
|
FETCH_HANDLERS = (
|
||||||
@ -122,10 +35,6 @@ FETCH_HANDLERS = (
|
|||||||
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
|
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
|
||||||
)
|
)
|
||||||
|
|
||||||
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
|
|
||||||
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
|
|
||||||
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
|
||||||
|
|
||||||
|
|
||||||
class SourceConfigError(Exception):
|
class SourceConfigError(Exception):
|
||||||
pass
|
pass
|
||||||
@ -163,180 +72,37 @@ class BaseFetchHandler(object):
|
|||||||
return urlunparse(parts)
|
return urlunparse(parts)
|
||||||
|
|
||||||
|
|
||||||
def filter_installed_packages(packages):
|
__platform__ = get_platform()
|
||||||
"""Returns a list of packages that require installation"""
|
module = "charmhelpers.fetch.%s" % __platform__
|
||||||
cache = apt_cache()
|
fetch = importlib.import_module(module)
|
||||||
_pkgs = []
|
|
||||||
for package in packages:
|
|
||||||
try:
|
|
||||||
p = cache[package]
|
|
||||||
p.current_ver or _pkgs.append(package)
|
|
||||||
except KeyError:
|
|
||||||
log('Package {} has no installation candidate.'.format(package),
|
|
||||||
level='WARNING')
|
|
||||||
_pkgs.append(package)
|
|
||||||
return _pkgs
|
|
||||||
|
|
||||||
|
filter_installed_packages = fetch.filter_installed_packages
|
||||||
|
install = fetch.install
|
||||||
|
upgrade = fetch.upgrade
|
||||||
|
update = fetch.update
|
||||||
|
purge = fetch.purge
|
||||||
|
add_source = fetch.add_source
|
||||||
|
|
||||||
def apt_cache(in_memory=True, progress=None):
|
if __platform__ == "ubuntu":
|
||||||
"""Build and return an apt cache"""
|
apt_cache = fetch.apt_cache
|
||||||
from apt import apt_pkg
|
apt_install = fetch.install
|
||||||
apt_pkg.init()
|
apt_update = fetch.update
|
||||||
if in_memory:
|
apt_upgrade = fetch.upgrade
|
||||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
apt_purge = fetch.purge
|
||||||
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
apt_mark = fetch.apt_mark
|
||||||
return apt_pkg.Cache(progress)
|
apt_hold = fetch.apt_hold
|
||||||
|
apt_unhold = fetch.apt_unhold
|
||||||
|
elif __platform__ == "centos":
|
||||||
def apt_install(packages, options=None, fatal=False):
|
yum_search = fetch.yum_search
|
||||||
"""Install one or more packages"""
|
|
||||||
if options is None:
|
|
||||||
options = ['--option=Dpkg::Options::=--force-confold']
|
|
||||||
|
|
||||||
cmd = ['apt-get', '--assume-yes']
|
|
||||||
cmd.extend(options)
|
|
||||||
cmd.append('install')
|
|
||||||
if isinstance(packages, six.string_types):
|
|
||||||
cmd.append(packages)
|
|
||||||
else:
|
|
||||||
cmd.extend(packages)
|
|
||||||
log("Installing {} with options: {}".format(packages,
|
|
||||||
options))
|
|
||||||
_run_apt_command(cmd, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_upgrade(options=None, fatal=False, dist=False):
|
|
||||||
"""Upgrade all packages"""
|
|
||||||
if options is None:
|
|
||||||
options = ['--option=Dpkg::Options::=--force-confold']
|
|
||||||
|
|
||||||
cmd = ['apt-get', '--assume-yes']
|
|
||||||
cmd.extend(options)
|
|
||||||
if dist:
|
|
||||||
cmd.append('dist-upgrade')
|
|
||||||
else:
|
|
||||||
cmd.append('upgrade')
|
|
||||||
log("Upgrading with options: {}".format(options))
|
|
||||||
_run_apt_command(cmd, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_update(fatal=False):
|
|
||||||
"""Update local apt cache"""
|
|
||||||
cmd = ['apt-get', 'update']
|
|
||||||
_run_apt_command(cmd, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_purge(packages, fatal=False):
|
|
||||||
"""Purge one or more packages"""
|
|
||||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
|
||||||
if isinstance(packages, six.string_types):
|
|
||||||
cmd.append(packages)
|
|
||||||
else:
|
|
||||||
cmd.extend(packages)
|
|
||||||
log("Purging {}".format(packages))
|
|
||||||
_run_apt_command(cmd, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_mark(packages, mark, fatal=False):
|
|
||||||
"""Flag one or more packages using apt-mark"""
|
|
||||||
log("Marking {} as {}".format(packages, mark))
|
|
||||||
cmd = ['apt-mark', mark]
|
|
||||||
if isinstance(packages, six.string_types):
|
|
||||||
cmd.append(packages)
|
|
||||||
else:
|
|
||||||
cmd.extend(packages)
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
subprocess.check_call(cmd, universal_newlines=True)
|
|
||||||
else:
|
|
||||||
subprocess.call(cmd, universal_newlines=True)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_hold(packages, fatal=False):
|
|
||||||
return apt_mark(packages, 'hold', fatal=fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_unhold(packages, fatal=False):
|
|
||||||
return apt_mark(packages, 'unhold', fatal=fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def add_source(source, key=None):
|
|
||||||
"""Add a package source to this system.
|
|
||||||
|
|
||||||
@param source: a URL or sources.list entry, as supported by
|
|
||||||
add-apt-repository(1). Examples::
|
|
||||||
|
|
||||||
ppa:charmers/example
|
|
||||||
deb https://stub:key@private.example.com/ubuntu trusty main
|
|
||||||
|
|
||||||
In addition:
|
|
||||||
'proposed:' may be used to enable the standard 'proposed'
|
|
||||||
pocket for the release.
|
|
||||||
'cloud:' may be used to activate official cloud archive pockets,
|
|
||||||
such as 'cloud:icehouse'
|
|
||||||
'distro' may be used as a noop
|
|
||||||
|
|
||||||
@param key: A key to be added to the system's APT keyring and used
|
|
||||||
to verify the signatures on packages. Ideally, this should be an
|
|
||||||
ASCII format GPG public key including the block headers. A GPG key
|
|
||||||
id may also be used, but be aware that only insecure protocols are
|
|
||||||
available to retrieve the actual public key from a public keyserver
|
|
||||||
placing your Juju environment at risk. ppa and cloud archive keys
|
|
||||||
are securely added automtically, so sould not be provided.
|
|
||||||
"""
|
|
||||||
if source is None:
|
|
||||||
log('Source is not present. Skipping')
|
|
||||||
return
|
|
||||||
|
|
||||||
if (source.startswith('ppa:') or
|
|
||||||
source.startswith('http') or
|
|
||||||
source.startswith('deb ') or
|
|
||||||
source.startswith('cloud-archive:')):
|
|
||||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
|
||||||
elif source.startswith('cloud:'):
|
|
||||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
|
||||||
fatal=True)
|
|
||||||
pocket = source.split(':')[-1]
|
|
||||||
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
|
||||||
raise SourceConfigError(
|
|
||||||
'Unsupported cloud: source option %s' %
|
|
||||||
pocket)
|
|
||||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
|
||||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
|
||||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
|
||||||
elif source == 'proposed':
|
|
||||||
release = lsb_release()['DISTRIB_CODENAME']
|
|
||||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
|
||||||
apt.write(PROPOSED_POCKET.format(release))
|
|
||||||
elif source == 'distro':
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
log("Unknown source: {!r}".format(source))
|
|
||||||
|
|
||||||
if key:
|
|
||||||
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
|
||||||
with NamedTemporaryFile('w+') as key_file:
|
|
||||||
key_file.write(key)
|
|
||||||
key_file.flush()
|
|
||||||
key_file.seek(0)
|
|
||||||
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
|
||||||
else:
|
|
||||||
# Note that hkp: is in no way a secure protocol. Using a
|
|
||||||
# GPG key id is pointless from a security POV unless you
|
|
||||||
# absolutely trust your network and DNS.
|
|
||||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
|
||||||
'hkp://keyserver.ubuntu.com:80', '--recv',
|
|
||||||
key])
|
|
||||||
|
|
||||||
|
|
||||||
def configure_sources(update=False,
|
def configure_sources(update=False,
|
||||||
sources_var='install_sources',
|
sources_var='install_sources',
|
||||||
keys_var='install_keys'):
|
keys_var='install_keys'):
|
||||||
"""
|
"""Configure multiple sources from charm configuration.
|
||||||
Configure multiple sources from charm configuration.
|
|
||||||
|
|
||||||
The lists are encoded as yaml fragments in the configuration.
|
The lists are encoded as yaml fragments in the configuration.
|
||||||
The frament needs to be included as a string. Sources and their
|
The fragment needs to be included as a string. Sources and their
|
||||||
corresponding keys are of the types supported by add_source().
|
corresponding keys are of the types supported by add_source().
|
||||||
|
|
||||||
Example config:
|
Example config:
|
||||||
@ -368,12 +134,11 @@ def configure_sources(update=False,
|
|||||||
for source, key in zip(sources, keys):
|
for source, key in zip(sources, keys):
|
||||||
add_source(source, key)
|
add_source(source, key)
|
||||||
if update:
|
if update:
|
||||||
apt_update(fatal=True)
|
fetch.update(fatal=True)
|
||||||
|
|
||||||
|
|
||||||
def install_remote(source, *args, **kwargs):
|
def install_remote(source, *args, **kwargs):
|
||||||
"""
|
"""Install a file tree from a remote source.
|
||||||
Install a file tree from a remote source
|
|
||||||
|
|
||||||
The specified source should be a url of the form:
|
The specified source should be a url of the form:
|
||||||
scheme://[host]/path[#[option=value][&...]]
|
scheme://[host]/path[#[option=value][&...]]
|
||||||
@ -406,6 +171,7 @@ def install_remote(source, *args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def install_from_config(config_var_name):
|
def install_from_config(config_var_name):
|
||||||
|
"""Install a file from config."""
|
||||||
charm_config = config()
|
charm_config = config()
|
||||||
source = charm_config[config_var_name]
|
source = charm_config[config_var_name]
|
||||||
return install_remote(source)
|
return install_remote(source)
|
||||||
@ -428,40 +194,3 @@ def plugins(fetch_handlers=None):
|
|||||||
log("FetchHandler {} not found, skipping plugin".format(
|
log("FetchHandler {} not found, skipping plugin".format(
|
||||||
handler_name))
|
handler_name))
|
||||||
return plugin_list
|
return plugin_list
|
||||||
|
|
||||||
|
|
||||||
def _run_apt_command(cmd, fatal=False):
|
|
||||||
"""
|
|
||||||
Run an APT command, checking output and retrying if the fatal flag is set
|
|
||||||
to True.
|
|
||||||
|
|
||||||
:param: cmd: str: The apt command to run.
|
|
||||||
:param: fatal: bool: Whether the command's output should be checked and
|
|
||||||
retried.
|
|
||||||
"""
|
|
||||||
env = os.environ.copy()
|
|
||||||
|
|
||||||
if 'DEBIAN_FRONTEND' not in env:
|
|
||||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
retry_count = 0
|
|
||||||
result = None
|
|
||||||
|
|
||||||
# If the command is considered "fatal", we need to retry if the apt
|
|
||||||
# lock was not acquired.
|
|
||||||
|
|
||||||
while result is None or result == APT_NO_LOCK:
|
|
||||||
try:
|
|
||||||
result = subprocess.check_call(cmd, env=env)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
retry_count = retry_count + 1
|
|
||||||
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
|
||||||
raise
|
|
||||||
result = e.returncode
|
|
||||||
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
|
|
||||||
"".format(APT_NO_LOCK_RETRY_DELAY))
|
|
||||||
time.sleep(APT_NO_LOCK_RETRY_DELAY)
|
|
||||||
|
|
||||||
else:
|
|
||||||
subprocess.call(cmd, env=env)
|
|
||||||
|
@ -18,19 +18,20 @@ from charmhelpers.fetch import (
|
|||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource,
|
UnhandledSource,
|
||||||
filter_installed_packages,
|
filter_installed_packages,
|
||||||
apt_install,
|
install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
|
|
||||||
if filter_installed_packages(['bzr']) != []:
|
if filter_installed_packages(['bzr']) != []:
|
||||||
apt_install(['bzr'])
|
install(['bzr'])
|
||||||
if filter_installed_packages(['bzr']) != []:
|
if filter_installed_packages(['bzr']) != []:
|
||||||
raise NotImplementedError('Unable to install bzr')
|
raise NotImplementedError('Unable to install bzr')
|
||||||
|
|
||||||
|
|
||||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||||
"""Handler for bazaar branches via generic and lp URLs"""
|
"""Handler for bazaar branches via generic and lp URLs."""
|
||||||
|
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
|
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
|
||||||
|
171
hooks/charmhelpers/fetch/centos.py
Normal file
171
hooks/charmhelpers/fetch/centos.py
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import six
|
||||||
|
import yum
|
||||||
|
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
|
from charmhelpers.core.hookenv import log
|
||||||
|
|
||||||
|
YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM.
|
||||||
|
YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
|
||||||
|
YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
||||||
|
|
||||||
|
|
||||||
|
def filter_installed_packages(packages):
|
||||||
|
"""Return a list of packages that require installation."""
|
||||||
|
yb = yum.YumBase()
|
||||||
|
package_list = yb.doPackageLists()
|
||||||
|
temp_cache = {p.base_package_name: 1 for p in package_list['installed']}
|
||||||
|
|
||||||
|
_pkgs = [p for p in packages if not temp_cache.get(p, False)]
|
||||||
|
return _pkgs
|
||||||
|
|
||||||
|
|
||||||
|
def install(packages, options=None, fatal=False):
|
||||||
|
"""Install one or more packages."""
|
||||||
|
cmd = ['yum', '--assumeyes']
|
||||||
|
if options is not None:
|
||||||
|
cmd.extend(options)
|
||||||
|
cmd.append('install')
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Installing {} with options: {}".format(packages,
|
||||||
|
options))
|
||||||
|
_run_yum_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(options=None, fatal=False, dist=False):
|
||||||
|
"""Upgrade all packages."""
|
||||||
|
cmd = ['yum', '--assumeyes']
|
||||||
|
if options is not None:
|
||||||
|
cmd.extend(options)
|
||||||
|
cmd.append('upgrade')
|
||||||
|
log("Upgrading with options: {}".format(options))
|
||||||
|
_run_yum_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def update(fatal=False):
|
||||||
|
"""Update local yum cache."""
|
||||||
|
cmd = ['yum', '--assumeyes', 'update']
|
||||||
|
log("Update with fatal: {}".format(fatal))
|
||||||
|
_run_yum_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def purge(packages, fatal=False):
|
||||||
|
"""Purge one or more packages."""
|
||||||
|
cmd = ['yum', '--assumeyes', 'remove']
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Purging {}".format(packages))
|
||||||
|
_run_yum_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def yum_search(packages):
|
||||||
|
"""Search for a package."""
|
||||||
|
output = {}
|
||||||
|
cmd = ['yum', 'search']
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Searching for {}".format(packages))
|
||||||
|
result = subprocess.check_output(cmd)
|
||||||
|
for package in list(packages):
|
||||||
|
output[package] = package in result
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def add_source(source, key=None):
|
||||||
|
"""Add a package source to this system.
|
||||||
|
|
||||||
|
@param source: a URL with a rpm package
|
||||||
|
|
||||||
|
@param key: A key to be added to the system's keyring and used
|
||||||
|
to verify the signatures on packages. Ideally, this should be an
|
||||||
|
ASCII format GPG public key including the block headers. A GPG key
|
||||||
|
id may also be used, but be aware that only insecure protocols are
|
||||||
|
available to retrieve the actual public key from a public keyserver
|
||||||
|
placing your Juju environment at risk.
|
||||||
|
"""
|
||||||
|
if source is None:
|
||||||
|
log('Source is not present. Skipping')
|
||||||
|
return
|
||||||
|
|
||||||
|
if source.startswith('http'):
|
||||||
|
directory = '/etc/yum.repos.d/'
|
||||||
|
for filename in os.listdir(directory):
|
||||||
|
with open(directory + filename, 'r') as rpm_file:
|
||||||
|
if source in rpm_file.read():
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
log("Add source: {!r}".format(source))
|
||||||
|
# write in the charms.repo
|
||||||
|
with open(directory + 'Charms.repo', 'a') as rpm_file:
|
||||||
|
rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
|
||||||
|
rpm_file.write('name=%s\n' % source[7:])
|
||||||
|
rpm_file.write('baseurl=%s\n\n' % source)
|
||||||
|
else:
|
||||||
|
log("Unknown source: {!r}".format(source))
|
||||||
|
|
||||||
|
if key:
|
||||||
|
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||||
|
with NamedTemporaryFile('w+') as key_file:
|
||||||
|
key_file.write(key)
|
||||||
|
key_file.flush()
|
||||||
|
key_file.seek(0)
|
||||||
|
subprocess.check_call(['rpm', '--import', key_file])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['rpm', '--import', key])
|
||||||
|
|
||||||
|
|
||||||
|
def _run_yum_command(cmd, fatal=False):
|
||||||
|
"""Run an YUM command.
|
||||||
|
|
||||||
|
Checks the output and retry if the fatal flag is set to True.
|
||||||
|
|
||||||
|
:param: cmd: str: The yum command to run.
|
||||||
|
:param: fatal: bool: Whether the command's output should be checked and
|
||||||
|
retried.
|
||||||
|
"""
|
||||||
|
env = os.environ.copy()
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
retry_count = 0
|
||||||
|
result = None
|
||||||
|
|
||||||
|
# If the command is considered "fatal", we need to retry if the yum
|
||||||
|
# lock was not acquired.
|
||||||
|
|
||||||
|
while result is None or result == YUM_NO_LOCK:
|
||||||
|
try:
|
||||||
|
result = subprocess.check_call(cmd, env=env)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
retry_count = retry_count + 1
|
||||||
|
if retry_count > YUM_NO_LOCK_RETRY_COUNT:
|
||||||
|
raise
|
||||||
|
result = e.returncode
|
||||||
|
log("Couldn't acquire YUM lock. Will retry in {} seconds."
|
||||||
|
"".format(YUM_NO_LOCK_RETRY_DELAY))
|
||||||
|
time.sleep(YUM_NO_LOCK_RETRY_DELAY)
|
||||||
|
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd, env=env)
|
@ -18,17 +18,18 @@ from charmhelpers.fetch import (
|
|||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource,
|
UnhandledSource,
|
||||||
filter_installed_packages,
|
filter_installed_packages,
|
||||||
apt_install,
|
install,
|
||||||
)
|
)
|
||||||
|
|
||||||
if filter_installed_packages(['git']) != []:
|
if filter_installed_packages(['git']) != []:
|
||||||
apt_install(['git'])
|
install(['git'])
|
||||||
if filter_installed_packages(['git']) != []:
|
if filter_installed_packages(['git']) != []:
|
||||||
raise NotImplementedError('Unable to install git')
|
raise NotImplementedError('Unable to install git')
|
||||||
|
|
||||||
|
|
||||||
class GitUrlFetchHandler(BaseFetchHandler):
|
class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
"""Handler for git branches via generic and github URLs"""
|
"""Handler for git branches via generic and github URLs."""
|
||||||
|
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
# TODO (mattyw) no support for ssh git@ yet
|
# TODO (mattyw) no support for ssh git@ yet
|
||||||
|
316
hooks/charmhelpers/fetch/ubuntu.py
Normal file
316
hooks/charmhelpers/fetch/ubuntu.py
Normal file
@ -0,0 +1,316 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import six
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
lsb_release
|
||||||
|
)
|
||||||
|
from charmhelpers.core.hookenv import log
|
||||||
|
from charmhelpers.fetch import SourceConfigError
|
||||||
|
|
||||||
|
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||||
|
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||||
|
"""
|
||||||
|
|
||||||
|
PROPOSED_POCKET = """# Proposed
|
||||||
|
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
||||||
|
"""
|
||||||
|
|
||||||
|
CLOUD_ARCHIVE_POCKETS = {
|
||||||
|
# Folsom
|
||||||
|
'folsom': 'precise-updates/folsom',
|
||||||
|
'precise-folsom': 'precise-updates/folsom',
|
||||||
|
'precise-folsom/updates': 'precise-updates/folsom',
|
||||||
|
'precise-updates/folsom': 'precise-updates/folsom',
|
||||||
|
'folsom/proposed': 'precise-proposed/folsom',
|
||||||
|
'precise-folsom/proposed': 'precise-proposed/folsom',
|
||||||
|
'precise-proposed/folsom': 'precise-proposed/folsom',
|
||||||
|
# Grizzly
|
||||||
|
'grizzly': 'precise-updates/grizzly',
|
||||||
|
'precise-grizzly': 'precise-updates/grizzly',
|
||||||
|
'precise-grizzly/updates': 'precise-updates/grizzly',
|
||||||
|
'precise-updates/grizzly': 'precise-updates/grizzly',
|
||||||
|
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||||
|
'precise-grizzly/proposed': 'precise-proposed/grizzly',
|
||||||
|
'precise-proposed/grizzly': 'precise-proposed/grizzly',
|
||||||
|
# Havana
|
||||||
|
'havana': 'precise-updates/havana',
|
||||||
|
'precise-havana': 'precise-updates/havana',
|
||||||
|
'precise-havana/updates': 'precise-updates/havana',
|
||||||
|
'precise-updates/havana': 'precise-updates/havana',
|
||||||
|
'havana/proposed': 'precise-proposed/havana',
|
||||||
|
'precise-havana/proposed': 'precise-proposed/havana',
|
||||||
|
'precise-proposed/havana': 'precise-proposed/havana',
|
||||||
|
# Icehouse
|
||||||
|
'icehouse': 'precise-updates/icehouse',
|
||||||
|
'precise-icehouse': 'precise-updates/icehouse',
|
||||||
|
'precise-icehouse/updates': 'precise-updates/icehouse',
|
||||||
|
'precise-updates/icehouse': 'precise-updates/icehouse',
|
||||||
|
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||||
|
'precise-icehouse/proposed': 'precise-proposed/icehouse',
|
||||||
|
'precise-proposed/icehouse': 'precise-proposed/icehouse',
|
||||||
|
# Juno
|
||||||
|
'juno': 'trusty-updates/juno',
|
||||||
|
'trusty-juno': 'trusty-updates/juno',
|
||||||
|
'trusty-juno/updates': 'trusty-updates/juno',
|
||||||
|
'trusty-updates/juno': 'trusty-updates/juno',
|
||||||
|
'juno/proposed': 'trusty-proposed/juno',
|
||||||
|
'trusty-juno/proposed': 'trusty-proposed/juno',
|
||||||
|
'trusty-proposed/juno': 'trusty-proposed/juno',
|
||||||
|
# Kilo
|
||||||
|
'kilo': 'trusty-updates/kilo',
|
||||||
|
'trusty-kilo': 'trusty-updates/kilo',
|
||||||
|
'trusty-kilo/updates': 'trusty-updates/kilo',
|
||||||
|
'trusty-updates/kilo': 'trusty-updates/kilo',
|
||||||
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||||
|
# Liberty
|
||||||
|
'liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty/updates': 'trusty-updates/liberty',
|
||||||
|
'trusty-updates/liberty': 'trusty-updates/liberty',
|
||||||
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
||||||
|
# Mitaka
|
||||||
|
'mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka/updates': 'trusty-updates/mitaka',
|
||||||
|
'trusty-updates/mitaka': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
|
||||||
|
# Newton
|
||||||
|
'newton': 'xenial-updates/newton',
|
||||||
|
'xenial-newton': 'xenial-updates/newton',
|
||||||
|
'xenial-newton/updates': 'xenial-updates/newton',
|
||||||
|
'xenial-updates/newton': 'xenial-updates/newton',
|
||||||
|
'newton/proposed': 'xenial-proposed/newton',
|
||||||
|
'xenial-newton/proposed': 'xenial-proposed/newton',
|
||||||
|
'xenial-proposed/newton': 'xenial-proposed/newton',
|
||||||
|
}
|
||||||
|
|
||||||
|
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
|
||||||
|
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
|
||||||
|
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
||||||
|
|
||||||
|
|
||||||
|
def filter_installed_packages(packages):
|
||||||
|
"""Return a list of packages that require installation."""
|
||||||
|
cache = apt_cache()
|
||||||
|
_pkgs = []
|
||||||
|
for package in packages:
|
||||||
|
try:
|
||||||
|
p = cache[package]
|
||||||
|
p.current_ver or _pkgs.append(package)
|
||||||
|
except KeyError:
|
||||||
|
log('Package {} has no installation candidate.'.format(package),
|
||||||
|
level='WARNING')
|
||||||
|
_pkgs.append(package)
|
||||||
|
return _pkgs
|
||||||
|
|
||||||
|
|
||||||
|
def apt_cache(in_memory=True, progress=None):
|
||||||
|
"""Build and return an apt cache."""
|
||||||
|
from apt import apt_pkg
|
||||||
|
apt_pkg.init()
|
||||||
|
if in_memory:
|
||||||
|
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||||
|
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
||||||
|
return apt_pkg.Cache(progress)
|
||||||
|
|
||||||
|
|
||||||
|
def install(packages, options=None, fatal=False):
|
||||||
|
"""Install one or more packages."""
|
||||||
|
if options is None:
|
||||||
|
options = ['--option=Dpkg::Options::=--force-confold']
|
||||||
|
|
||||||
|
cmd = ['apt-get', '--assume-yes']
|
||||||
|
cmd.extend(options)
|
||||||
|
cmd.append('install')
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Installing {} with options: {}".format(packages,
|
||||||
|
options))
|
||||||
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(options=None, fatal=False, dist=False):
|
||||||
|
"""Upgrade all packages."""
|
||||||
|
if options is None:
|
||||||
|
options = ['--option=Dpkg::Options::=--force-confold']
|
||||||
|
|
||||||
|
cmd = ['apt-get', '--assume-yes']
|
||||||
|
cmd.extend(options)
|
||||||
|
if dist:
|
||||||
|
cmd.append('dist-upgrade')
|
||||||
|
else:
|
||||||
|
cmd.append('upgrade')
|
||||||
|
log("Upgrading with options: {}".format(options))
|
||||||
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def update(fatal=False):
|
||||||
|
"""Update local apt cache."""
|
||||||
|
cmd = ['apt-get', 'update']
|
||||||
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def purge(packages, fatal=False):
|
||||||
|
"""Purge one or more packages."""
|
||||||
|
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Purging {}".format(packages))
|
||||||
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_mark(packages, mark, fatal=False):
|
||||||
|
"""Flag one or more packages using apt-mark."""
|
||||||
|
log("Marking {} as {}".format(packages, mark))
|
||||||
|
cmd = ['apt-mark', mark]
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
subprocess.check_call(cmd, universal_newlines=True)
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd, universal_newlines=True)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_hold(packages, fatal=False):
|
||||||
|
return apt_mark(packages, 'hold', fatal=fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_unhold(packages, fatal=False):
|
||||||
|
return apt_mark(packages, 'unhold', fatal=fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def add_source(source, key=None):
|
||||||
|
"""Add a package source to this system.
|
||||||
|
|
||||||
|
@param source: a URL or sources.list entry, as supported by
|
||||||
|
add-apt-repository(1). Examples::
|
||||||
|
|
||||||
|
ppa:charmers/example
|
||||||
|
deb https://stub:key@private.example.com/ubuntu trusty main
|
||||||
|
|
||||||
|
In addition:
|
||||||
|
'proposed:' may be used to enable the standard 'proposed'
|
||||||
|
pocket for the release.
|
||||||
|
'cloud:' may be used to activate official cloud archive pockets,
|
||||||
|
such as 'cloud:icehouse'
|
||||||
|
'distro' may be used as a noop
|
||||||
|
|
||||||
|
@param key: A key to be added to the system's APT keyring and used
|
||||||
|
to verify the signatures on packages. Ideally, this should be an
|
||||||
|
ASCII format GPG public key including the block headers. A GPG key
|
||||||
|
id may also be used, but be aware that only insecure protocols are
|
||||||
|
available to retrieve the actual public key from a public keyserver
|
||||||
|
placing your Juju environment at risk. ppa and cloud archive keys
|
||||||
|
are securely added automtically, so sould not be provided.
|
||||||
|
"""
|
||||||
|
if source is None:
|
||||||
|
log('Source is not present. Skipping')
|
||||||
|
return
|
||||||
|
|
||||||
|
if (source.startswith('ppa:') or
|
||||||
|
source.startswith('http') or
|
||||||
|
source.startswith('deb ') or
|
||||||
|
source.startswith('cloud-archive:')):
|
||||||
|
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||||
|
elif source.startswith('cloud:'):
|
||||||
|
install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||||
|
fatal=True)
|
||||||
|
pocket = source.split(':')[-1]
|
||||||
|
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
||||||
|
raise SourceConfigError(
|
||||||
|
'Unsupported cloud: source option %s' %
|
||||||
|
pocket)
|
||||||
|
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||||
|
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||||
|
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||||
|
elif source == 'proposed':
|
||||||
|
release = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||||
|
apt.write(PROPOSED_POCKET.format(release))
|
||||||
|
elif source == 'distro':
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
log("Unknown source: {!r}".format(source))
|
||||||
|
|
||||||
|
if key:
|
||||||
|
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||||
|
with NamedTemporaryFile('w+') as key_file:
|
||||||
|
key_file.write(key)
|
||||||
|
key_file.flush()
|
||||||
|
key_file.seek(0)
|
||||||
|
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
||||||
|
else:
|
||||||
|
# Note that hkp: is in no way a secure protocol. Using a
|
||||||
|
# GPG key id is pointless from a security POV unless you
|
||||||
|
# absolutely trust your network and DNS.
|
||||||
|
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||||
|
'hkp://keyserver.ubuntu.com:80', '--recv',
|
||||||
|
key])
|
||||||
|
|
||||||
|
|
||||||
|
def _run_apt_command(cmd, fatal=False):
|
||||||
|
"""Run an APT command.
|
||||||
|
|
||||||
|
Checks the output and retries if the fatal flag is set
|
||||||
|
to True.
|
||||||
|
|
||||||
|
:param: cmd: str: The apt command to run.
|
||||||
|
:param: fatal: bool: Whether the command's output should be checked and
|
||||||
|
retried.
|
||||||
|
"""
|
||||||
|
env = os.environ.copy()
|
||||||
|
|
||||||
|
if 'DEBIAN_FRONTEND' not in env:
|
||||||
|
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
retry_count = 0
|
||||||
|
result = None
|
||||||
|
|
||||||
|
# If the command is considered "fatal", we need to retry if the apt
|
||||||
|
# lock was not acquired.
|
||||||
|
|
||||||
|
while result is None or result == APT_NO_LOCK:
|
||||||
|
try:
|
||||||
|
result = subprocess.check_call(cmd, env=env)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
retry_count = retry_count + 1
|
||||||
|
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
||||||
|
raise
|
||||||
|
result = e.returncode
|
||||||
|
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
|
||||||
|
"".format(APT_NO_LOCK_RETRY_DELAY))
|
||||||
|
time.sleep(APT_NO_LOCK_RETRY_DELAY)
|
||||||
|
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd, env=env)
|
19
hooks/charmhelpers/osplatform.py
Normal file
19
hooks/charmhelpers/osplatform.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
import platform
|
||||||
|
|
||||||
|
|
||||||
|
def get_platform():
|
||||||
|
"""Return the current OS platform.
|
||||||
|
|
||||||
|
For example: if current os platform is Ubuntu then a string "ubuntu"
|
||||||
|
will be returned (which is the name of the module).
|
||||||
|
This string is used to decide which platform module should be imported.
|
||||||
|
"""
|
||||||
|
tuple_platform = platform.linux_distribution()
|
||||||
|
current_platform = tuple_platform[0]
|
||||||
|
if "Ubuntu" in current_platform:
|
||||||
|
return "ubuntu"
|
||||||
|
elif "CentOS" in current_platform:
|
||||||
|
return "centos"
|
||||||
|
else:
|
||||||
|
raise RuntimeError("This module is not supported on {}."
|
||||||
|
.format(current_platform))
|
@ -103,6 +103,7 @@ from charmhelpers.contrib.openstack.utils import (
|
|||||||
pause_unit,
|
pause_unit,
|
||||||
resume_unit,
|
resume_unit,
|
||||||
is_unit_paused_set,
|
is_unit_paused_set,
|
||||||
|
os_application_version_set,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.decorators import (
|
from charmhelpers.core.decorators import (
|
||||||
@ -176,6 +177,8 @@ APACHE_SITE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
|
|||||||
APACHE_SITE_24_CONF = '/etc/apache2/sites-available/' \
|
APACHE_SITE_24_CONF = '/etc/apache2/sites-available/' \
|
||||||
'openstack_https_frontend.conf'
|
'openstack_https_frontend.conf'
|
||||||
|
|
||||||
|
VERSION_PACKAGE = 'cinder-common'
|
||||||
|
|
||||||
TEMPLATES = 'templates/'
|
TEMPLATES = 'templates/'
|
||||||
|
|
||||||
# The interface is said to be satisfied if anyone of the interfaces in
|
# The interface is said to be satisfied if anyone of the interfaces in
|
||||||
@ -925,6 +928,7 @@ def assess_status(configs):
|
|||||||
@returns None - this function is executed for its side-effect
|
@returns None - this function is executed for its side-effect
|
||||||
"""
|
"""
|
||||||
assess_status_func(configs)()
|
assess_status_func(configs)()
|
||||||
|
os_application_version_set(VERSION_PACKAGE)
|
||||||
|
|
||||||
|
|
||||||
def assess_status_func(configs):
|
def assess_status_func(configs):
|
||||||
|
@ -83,6 +83,56 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if not found:
|
if not found:
|
||||||
return 'endpoint not found'
|
return 'endpoint not found'
|
||||||
|
|
||||||
|
def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
|
public_port, expected):
|
||||||
|
"""Validate keystone v3 endpoint data.
|
||||||
|
|
||||||
|
Validate the v3 endpoint data which has changed from v2. The
|
||||||
|
ports are used to find the matching endpoint.
|
||||||
|
|
||||||
|
The new v3 endpoint data looks like:
|
||||||
|
|
||||||
|
[<Endpoint enabled=True,
|
||||||
|
id=0432655fc2f74d1e9fa17bdaa6f6e60b,
|
||||||
|
interface=admin,
|
||||||
|
links={u'self': u'<RESTful URL of this endpoint>'},
|
||||||
|
region=RegionOne,
|
||||||
|
region_id=RegionOne,
|
||||||
|
service_id=17f842a0dc084b928e476fafe67e4095,
|
||||||
|
url=http://10.5.6.5:9312>,
|
||||||
|
<Endpoint enabled=True,
|
||||||
|
id=6536cb6cb92f4f41bf22b079935c7707,
|
||||||
|
interface=admin,
|
||||||
|
links={u'self': u'<RESTful url of this endpoint>'},
|
||||||
|
region=RegionOne,
|
||||||
|
region_id=RegionOne,
|
||||||
|
service_id=72fc8736fb41435e8b3584205bb2cfa3,
|
||||||
|
url=http://10.5.6.6:35357/v3>,
|
||||||
|
... ]
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating v3 endpoint data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(endpoints)))
|
||||||
|
found = []
|
||||||
|
for ep in endpoints:
|
||||||
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
|
if ((admin_port in ep.url and ep.interface == 'admin') or
|
||||||
|
(internal_port in ep.url and ep.interface == 'internal') or
|
||||||
|
(public_port in ep.url and ep.interface == 'public')):
|
||||||
|
found.append(ep.interface)
|
||||||
|
# note we ignore the links member.
|
||||||
|
actual = {'id': ep.id,
|
||||||
|
'region': ep.region,
|
||||||
|
'region_id': ep.region_id,
|
||||||
|
'interface': self.not_null,
|
||||||
|
'url': ep.url,
|
||||||
|
'service_id': ep.service_id, }
|
||||||
|
ret = self._validate_dict_data(expected, actual)
|
||||||
|
if ret:
|
||||||
|
return 'unexpected endpoint data - {}'.format(ret)
|
||||||
|
|
||||||
|
if len(found) != 3:
|
||||||
|
return 'Unexpected number of endpoints found'
|
||||||
|
|
||||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
"""Validate service catalog endpoint data.
|
"""Validate service catalog endpoint data.
|
||||||
|
|
||||||
@ -100,6 +150,72 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return "endpoint {} does not exist".format(k)
|
return "endpoint {} does not exist".format(k)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
|
"""Validate the keystone v3 catalog endpoint data.
|
||||||
|
|
||||||
|
Validate a list of dictinaries that make up the keystone v3 service
|
||||||
|
catalogue.
|
||||||
|
|
||||||
|
It is in the form of:
|
||||||
|
|
||||||
|
|
||||||
|
{u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
|
||||||
|
u'interface': u'admin',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.224:35357/v3'},
|
||||||
|
{u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
|
||||||
|
u'interface': u'public',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.224:5000/v3'},
|
||||||
|
{u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
|
||||||
|
u'interface': u'internal',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.224:5000/v3'}],
|
||||||
|
u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
|
||||||
|
u'interface': u'public',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.223:9311'},
|
||||||
|
{u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
|
||||||
|
u'interface': u'internal',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.223:9311'},
|
||||||
|
{u'id': u'f629388955bc407f8b11d8b7ca168086',
|
||||||
|
u'interface': u'admin',
|
||||||
|
u'region': u'RegionOne',
|
||||||
|
u'region_id': u'RegionOne',
|
||||||
|
u'url': u'http://10.5.5.223:9312'}]}
|
||||||
|
|
||||||
|
Note, that an added complication is that the order of admin, public,
|
||||||
|
internal against 'interface' in each region.
|
||||||
|
|
||||||
|
Thus, the function sorts the expected and actual lists using the
|
||||||
|
interface key as a sort key, prior to the comparison.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating v3 service catalog endpoint data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for k, v in six.iteritems(expected):
|
||||||
|
if k in actual:
|
||||||
|
l_expected = sorted(v, key=lambda x: x['interface'])
|
||||||
|
l_actual = sorted(actual[k], key=lambda x: x['interface'])
|
||||||
|
if len(l_actual) != len(l_expected):
|
||||||
|
return ("endpoint {} has differing number of interfaces "
|
||||||
|
" - expected({}), actual({})"
|
||||||
|
.format(k, len(l_expected), len(l_actual)))
|
||||||
|
for i_expected, i_actual in zip(l_expected, l_actual):
|
||||||
|
self.log.debug("checking interface {}"
|
||||||
|
.format(i_expected['interface']))
|
||||||
|
ret = self._validate_dict_data(i_expected, i_actual)
|
||||||
|
if ret:
|
||||||
|
return self.endpoint_error(k, ret)
|
||||||
|
else:
|
||||||
|
return "endpoint {} does not exist".format(k)
|
||||||
|
return ret
|
||||||
|
|
||||||
def validate_tenant_data(self, expected, actual):
|
def validate_tenant_data(self, expected, actual):
|
||||||
"""Validate tenant data.
|
"""Validate tenant data.
|
||||||
|
|
||||||
@ -928,7 +1044,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
retry_delay=5,
|
retry_delay=5,
|
||||||
socket_timeout=1)
|
socket_timeout=1)
|
||||||
connection = pika.BlockingConnection(parameters)
|
connection = pika.BlockingConnection(parameters)
|
||||||
assert connection.server_properties['product'] == 'RabbitMQ'
|
assert connection.is_open is True
|
||||||
|
assert connection.is_closing is False
|
||||||
self.log.debug('Connect OK')
|
self.log.debug('Connect OK')
|
||||||
return connection
|
return connection
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -63,6 +63,7 @@ TO_PATCH = [
|
|||||||
'is_elected_leader',
|
'is_elected_leader',
|
||||||
'templating',
|
'templating',
|
||||||
'install_alternative',
|
'install_alternative',
|
||||||
|
'os_application_version_set',
|
||||||
# fetch
|
# fetch
|
||||||
'apt_update',
|
'apt_update',
|
||||||
'apt_upgrade',
|
'apt_upgrade',
|
||||||
@ -977,6 +978,9 @@ class TestCinderUtils(CharmTestCase):
|
|||||||
cinder_utils.assess_status('test-config')
|
cinder_utils.assess_status('test-config')
|
||||||
asf.assert_called_once_with('test-config')
|
asf.assert_called_once_with('test-config')
|
||||||
callee.assert_called_once_with()
|
callee.assert_called_once_with()
|
||||||
|
self.os_application_version_set.assert_called_with(
|
||||||
|
'cinder-common'
|
||||||
|
)
|
||||||
|
|
||||||
@patch.object(cinder_utils, 'get_optional_interfaces')
|
@patch.object(cinder_utils, 'get_optional_interfaces')
|
||||||
@patch.object(cinder_utils, 'check_optional_relations')
|
@patch.object(cinder_utils, 'check_optional_relations')
|
||||||
|
Loading…
Reference in New Issue
Block a user