Sync charm-helpers

Change-Id: I56e04d87484bff31e7800642b4355cbeca7cd581
This commit is contained in:
Chris MacNaughton 2019-07-12 15:03:32 +02:00 committed by Liam Young
parent 9364440075
commit 8527adaae0
7 changed files with 85 additions and 13 deletions

View File

@ -33,6 +33,7 @@ from charmhelpers.core.hookenv import (
hook_name,
local_unit,
log,
relation_get,
relation_ids,
relation_set,
relations_of_type,
@ -260,11 +261,23 @@ class NRPE(object):
relation = relation_ids('nrpe-external-master')
if relation:
log("Setting charm primary status {}".format(primary))
for rid in relation_ids('nrpe-external-master'):
for rid in relation:
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
self.remove_check_queue = set()
def add_check(self, *args, **kwargs):
shortname = None
if kwargs.get('shortname') is None:
if len(args) > 0:
shortname = args[0]
else:
shortname = kwargs['shortname']
self.checks.append(Check(*args, **kwargs))
try:
self.remove_check_queue.remove(shortname)
except KeyError:
pass
def remove_check(self, *args, **kwargs):
if kwargs.get('shortname') is None:
@ -281,6 +294,7 @@ class NRPE(object):
check = Check(*args, **kwargs)
check.remove(self.hostname)
self.remove_check_queue.add(kwargs['shortname'])
def write(self):
try:
@ -313,7 +327,24 @@ class NRPE(object):
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
for rid in monitor_ids:
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
reldata = relation_get(unit=local_unit(), rid=rid)
if 'monitors' in reldata:
# update the existing set of monitors with the new data
old_monitors = yaml.safe_load(reldata['monitors'])
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
# remove keys that are in the remove_check_queue
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
if k not in self.remove_check_queue}
# update/add nrpe_monitors
old_nrpe_monitors.update(nrpe_monitors)
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
# write back to the relation
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
else:
# write a brand new set of monitors, as no existing ones.
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
self.remove_check_queue.clear()
def get_nagios_hostcontext(relation_name='nrpe-external-master'):

View File

@ -120,6 +120,7 @@ OPENSTACK_RELEASES = (
'queens',
'rocky',
'stein',
'train',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
@ -139,6 +140,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
])
@ -159,6 +161,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2018.1', 'queens'),
('2018.2', 'rocky'),
('2019.1', 'stein'),
('2019.2', 'train'),
])
# The ugly duckling - must list releases oldest to newest
@ -194,7 +197,9 @@ SWIFT_CODENAMES = OrderedDict([
('rocky',
['2.18.0', '2.19.0']),
('stein',
['2.20.0']),
['2.20.0', '2.21.0']),
('train',
['2.22.0']),
])
# >= Liberty version->codename mapping
@ -208,6 +213,7 @@ PACKAGE_CODENAMES = {
('17', 'queens'),
('18', 'rocky'),
('19', 'stein'),
('20', 'train'),
]),
'neutron-common': OrderedDict([
('7', 'liberty'),
@ -218,6 +224,7 @@ PACKAGE_CODENAMES = {
('12', 'queens'),
('13', 'rocky'),
('14', 'stein'),
('15', 'train'),
]),
'cinder-common': OrderedDict([
('7', 'liberty'),
@ -228,6 +235,7 @@ PACKAGE_CODENAMES = {
('12', 'queens'),
('13', 'rocky'),
('14', 'stein'),
('15', 'train'),
]),
'keystone': OrderedDict([
('8', 'liberty'),
@ -238,6 +246,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
]),
'horizon-common': OrderedDict([
('8', 'liberty'),
@ -248,6 +257,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
]),
'ceilometer-common': OrderedDict([
('5', 'liberty'),
@ -258,6 +268,7 @@ PACKAGE_CODENAMES = {
('10', 'queens'),
('11', 'rocky'),
('12', 'stein'),
('13', 'train'),
]),
'heat-common': OrderedDict([
('5', 'liberty'),
@ -268,6 +279,7 @@ PACKAGE_CODENAMES = {
('10', 'queens'),
('11', 'rocky'),
('12', 'stein'),
('13', 'train'),
]),
'glance-common': OrderedDict([
('11', 'liberty'),
@ -278,6 +290,7 @@ PACKAGE_CODENAMES = {
('16', 'queens'),
('17', 'rocky'),
('18', 'stein'),
('19', 'train'),
]),
'openstack-dashboard': OrderedDict([
('8', 'liberty'),
@ -288,6 +301,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'),
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
]),
}

View File

@ -1482,13 +1482,28 @@ def send_request_if_needed(request, relation='ceph'):
relation_set(relation_id=rid, broker_req=request.request)
def has_broker_rsp(rid=None, unit=None):
"""Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data.
:param rid: The relation to check (default of None means current relation)
:type rid: Union[str, None]
:param unit: The remote unit to check (default of None means current unit)
:type unit: Union[str, None]
:returns: True if broker key exists and is set to something 'truthy'
:rtype: bool
"""
rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
return True if broker_rsp else False
def is_broker_action_done(action, rid=None, unit=None):
"""Check whether broker action has completed yet.
@param action: name of action to be performed
@returns True if action complete otherwise False
"""
rdata = relation_get(rid, unit) or {}
rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp:
return False
@ -1510,7 +1525,7 @@ def mark_broker_action_done(action, rid=None, unit=None):
@param action: name of action to be performed
@returns None
"""
rdata = relation_get(rid, unit) or {}
rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp:
return

View File

@ -110,17 +110,19 @@ def is_device_mounted(device):
return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False):
def mkfs_xfs(device, force=False, inode_size=1024):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
:param device: Full path to device to format
:ptype device: tr
:param force: Force operation
:ptype: force: boolean"""
:ptype: force: boolean
:param inode_size: XFS inode size in bytes
:ptype inode_size: int"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', 'size=1024', device]
cmd += ['-i', "size={}".format(inode_size), device]
check_call(cmd)

View File

@ -173,6 +173,14 @@ CLOUD_ARCHIVE_POCKETS = {
'stein/proposed': 'bionic-proposed/stein',
'bionic-stein/proposed': 'bionic-proposed/stein',
'bionic-proposed/stein': 'bionic-proposed/stein',
# Train
'train': 'bionic-updates/train',
'bionic-train': 'bionic-updates/train',
'bionic-train/updates': 'bionic-updates/train',
'bionic-updates/train': 'bionic-updates/train',
'train/proposed': 'bionic-proposed/train',
'bionic-train/proposed': 'bionic-proposed/train',
'bionic-proposed/train': 'bionic-proposed/train',
}
@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False):
for r, fn in six.iteritems(_mapping):
m = re.match(r, source)
if m:
# call the assoicated function with the captured groups
# raises SourceConfigError on error.
fn(*m.groups())
if key:
# Import key before adding the source which depends on it,
# as refreshing packages could fail otherwise.
try:
import_key(key)
except GPGKeyError as e:
raise SourceConfigError(str(e))
# call the associated function with the captured groups
# raises SourceConfigError on error.
fn(*m.groups())
break
else:
# nothing matched. log an error and maybe sys.exit

View File

@ -77,7 +77,7 @@ class HAClusterBasicDeployment(OpenStackAmuletDeployment):
def _add_services(self):
this_service = {'name': 'hacluster'}
other_services = [
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
self.get_percona_service_entry(),
{'name': 'keystone', 'units': NUM_UNITS},
]
super(HAClusterBasicDeployment, self)._add_services(this_service,

View File

@ -61,7 +61,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-stein --no-destroy
[testenv:func27-dfs]
# Charm Functional Test