Check peers before emitting identity data
The checks that keystone was performing before emitting identity data were applicable to any Openstack api charm so the check definitions have been moved to charmhelpers so other charms can use them. The checks as they were are encapsulated in `check_api_unit_ready` *1 Bug 1818113 was caused by keystone emitting identity data as soon as the leader was ready but ignoring the state of the peer units. This is now covered by a new check `check_api_application_ready` which performs all the local unit checks and then checks that all peers have reported as ready too. In addition `check_api_unit_ready` is now used when setting the units workload status and `check_api_application_ready` is used when setting the application workload status. *1 https://github.com/juju/charm-helpers/blob/master/charmhelpers/contrib/openstack/utils.py#L2289 *2 https://github.com/juju/charm-helpers/blob/master/charmhelpers/contrib/openstack/utils.py#L2330 Change-Id: I99830ab2c2482e8beef174424820907ce96fd5d7 Closes-Bug: #1818113
This commit is contained in:
parent
e01530959c
commit
53bcfd0a06
@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Common python helper functions used for OpenStack charms.
|
||||
from collections import OrderedDict
|
||||
from collections import OrderedDict, namedtuple
|
||||
from functools import wraps
|
||||
|
||||
import subprocess
|
||||
@ -36,15 +36,20 @@ from charmhelpers.contrib.network import ip
|
||||
from charmhelpers.core import unitdata
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
WL_STATES,
|
||||
action_fail,
|
||||
action_set,
|
||||
config,
|
||||
expected_peer_units,
|
||||
expected_related_units,
|
||||
log as juju_log,
|
||||
charm_dir,
|
||||
INFO,
|
||||
ERROR,
|
||||
metadata,
|
||||
related_units,
|
||||
relation_get,
|
||||
relation_id,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
status_set,
|
||||
@ -53,6 +58,7 @@ from charmhelpers.core.hookenv import (
|
||||
cached,
|
||||
leader_set,
|
||||
leader_get,
|
||||
local_unit,
|
||||
)
|
||||
|
||||
from charmhelpers.core.strutils import (
|
||||
@ -108,6 +114,10 @@ from charmhelpers.contrib.openstack.policyd import (
|
||||
POLICYD_CONFIG_NAME,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.ha.utils import (
|
||||
expect_ha,
|
||||
)
|
||||
|
||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||
|
||||
@ -1810,6 +1820,16 @@ def os_application_version_set(package):
|
||||
application_version_set(application_version)
|
||||
|
||||
|
||||
def os_application_status_set(check_function):
|
||||
"""Run the supplied function and set the application status accordingly.
|
||||
|
||||
:param check_function: Function to run to get app states and messages.
|
||||
:type check_function: function
|
||||
"""
|
||||
state, message = check_function()
|
||||
status_set(state, message, application_status=True)
|
||||
|
||||
|
||||
def enable_memcache(source=None, release=None, package=None):
|
||||
"""Determine if memcache should be enabled on the local unit
|
||||
|
||||
@ -2046,3 +2066,287 @@ def is_db_maintenance_mode(relid=None):
|
||||
'WARN')
|
||||
pass
|
||||
return True in notifications
|
||||
|
||||
|
||||
@cached
|
||||
def container_scoped_relations():
|
||||
"""Get all the container scoped relations
|
||||
|
||||
:returns: List of relation names
|
||||
:rtype: List
|
||||
"""
|
||||
md = metadata()
|
||||
relations = []
|
||||
for relation_type in ('provides', 'requires', 'peers'):
|
||||
for relation in md.get(relation_type, []):
|
||||
if md[relation_type][relation].get('scope') == 'container':
|
||||
relations.append(relation)
|
||||
return relations
|
||||
|
||||
|
||||
def is_db_ready(use_current_context=False, rel_name=None):
|
||||
"""Check remote database is ready to be used.
|
||||
|
||||
Database relations are expected to provide a list of 'allowed' units to
|
||||
confirm that the database is ready for use by those units.
|
||||
|
||||
If db relation has provided this information and local unit is a member,
|
||||
returns True otherwise False.
|
||||
|
||||
:param use_current_context: Whether to limit checks to current hook
|
||||
context.
|
||||
:type use_current_context: bool
|
||||
:param rel_name: Name of relation to check
|
||||
:type rel_name: string
|
||||
:returns: Whether remote db is ready.
|
||||
:rtype: bool
|
||||
:raises: Exception
|
||||
"""
|
||||
key = 'allowed_units'
|
||||
|
||||
rel_name = rel_name or 'shared-db'
|
||||
this_unit = local_unit()
|
||||
|
||||
if use_current_context:
|
||||
if relation_id() in relation_ids(rel_name):
|
||||
rids_units = [(None, None)]
|
||||
else:
|
||||
raise Exception("use_current_context=True but not in {} "
|
||||
"rel hook contexts (currently in {})."
|
||||
.format(rel_name, relation_id()))
|
||||
else:
|
||||
rids_units = [(r_id, u)
|
||||
for r_id in relation_ids(rel_name)
|
||||
for u in related_units(r_id)]
|
||||
|
||||
for rid, unit in rids_units:
|
||||
allowed_units = relation_get(rid=rid, unit=unit, attribute=key)
|
||||
if allowed_units and this_unit in allowed_units.split():
|
||||
juju_log("This unit ({}) is in allowed unit list from {}".format(
|
||||
this_unit,
|
||||
unit), 'DEBUG')
|
||||
return True
|
||||
|
||||
juju_log("This unit was not found in any allowed unit list")
|
||||
return False
|
||||
|
||||
|
||||
def is_expected_scale(peer_relation_name='cluster'):
|
||||
"""Query juju goal-state to determine whether our peer- and dependency-
|
||||
relations are at the expected scale.
|
||||
|
||||
Useful for deferring per unit per relation housekeeping work until we are
|
||||
ready to complete it successfully and without unnecessary repetiton.
|
||||
|
||||
Always returns True if version of juju used does not support goal-state.
|
||||
|
||||
:param peer_relation_name: Name of peer relation
|
||||
:type rel_name: string
|
||||
:returns: True or False
|
||||
:rtype: bool
|
||||
"""
|
||||
def _get_relation_id(rel_type):
|
||||
return next((rid for rid in relation_ids(reltype=rel_type)), None)
|
||||
|
||||
Relation = namedtuple('Relation', 'rel_type rel_id')
|
||||
peer_rid = _get_relation_id(peer_relation_name)
|
||||
# Units with no peers should still have a peer relation.
|
||||
if not peer_rid:
|
||||
juju_log('Not at expected scale, no peer relation found', 'DEBUG')
|
||||
return False
|
||||
expected_relations = [
|
||||
Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))]
|
||||
if expect_ha():
|
||||
expected_relations.append(
|
||||
Relation(
|
||||
rel_type='ha',
|
||||
rel_id=_get_relation_id('ha')))
|
||||
juju_log(
|
||||
'Checking scale of {} relations'.format(
|
||||
','.join([r.rel_type for r in expected_relations])),
|
||||
'DEBUG')
|
||||
try:
|
||||
if (len(related_units(relid=peer_rid)) <
|
||||
len(list(expected_peer_units()))):
|
||||
return False
|
||||
for rel in expected_relations:
|
||||
if not rel.rel_id:
|
||||
juju_log(
|
||||
'Expected to find {} relation, but it is missing'.format(
|
||||
rel.rel_type),
|
||||
'DEBUG')
|
||||
return False
|
||||
# Goal state returns every unit even for container scoped
|
||||
# relations but the charm only ever has a relation with
|
||||
# the local unit.
|
||||
if rel.rel_type in container_scoped_relations():
|
||||
expected_count = 1
|
||||
else:
|
||||
expected_count = len(
|
||||
list(expected_related_units(reltype=rel.rel_type)))
|
||||
if len(related_units(relid=rel.rel_id)) < expected_count:
|
||||
juju_log(
|
||||
('Not at expected scale, not enough units on {} '
|
||||
'relation'.format(rel.rel_type)),
|
||||
'DEBUG')
|
||||
return False
|
||||
except NotImplementedError:
|
||||
return True
|
||||
juju_log('All checks have passed, unit is at expected scale', 'DEBUG')
|
||||
return True
|
||||
|
||||
|
||||
def get_peer_key(unit_name):
|
||||
"""Get the peer key for this unit.
|
||||
|
||||
The peer key is the key a unit uses to publish its status down the peer
|
||||
relation
|
||||
|
||||
:param unit_name: Name of unit
|
||||
:type unit_name: string
|
||||
:returns: Peer key for given unit
|
||||
:rtype: string
|
||||
"""
|
||||
return 'unit-state-{}'.format(unit_name.replace('/', '-'))
|
||||
|
||||
|
||||
UNIT_READY = 'READY'
|
||||
UNIT_NOTREADY = 'NOTREADY'
|
||||
UNIT_UNKNOWN = 'UNKNOWN'
|
||||
UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN]
|
||||
|
||||
|
||||
def inform_peers_unit_state(state, relation_name='cluster'):
|
||||
"""Inform peers of the state of this unit.
|
||||
|
||||
:param state: State of unit to publish
|
||||
:type state: string
|
||||
:param relation_name: Name of relation to publish state on
|
||||
:type relation_name: string
|
||||
"""
|
||||
if state not in UNIT_STATES:
|
||||
raise ValueError(
|
||||
"Setting invalid state {} for unit".format(state))
|
||||
for r_id in relation_ids(relation_name):
|
||||
relation_set(relation_id=r_id,
|
||||
relation_settings={
|
||||
get_peer_key(local_unit()): state})
|
||||
|
||||
|
||||
def get_peers_unit_state(relation_name='cluster'):
|
||||
"""Get the state of all peers.
|
||||
|
||||
:param relation_name: Name of relation to check peers on.
|
||||
:type relation_name: string
|
||||
:returns: Unit states keyed on unit name.
|
||||
:rtype: dict
|
||||
:raises: ValueError
|
||||
"""
|
||||
r_ids = relation_ids(relation_name)
|
||||
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
|
||||
unit_states = {}
|
||||
for r_id, unit in rids_units:
|
||||
settings = relation_get(unit=unit, rid=r_id)
|
||||
unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN)
|
||||
if unit_states[unit] not in UNIT_STATES:
|
||||
raise ValueError(
|
||||
"Unit in unknown state {}".format(unit_states[unit]))
|
||||
return unit_states
|
||||
|
||||
|
||||
def are_peers_ready(relation_name='cluster'):
|
||||
"""Check if all peers are ready.
|
||||
|
||||
:param relation_name: Name of relation to check peers on.
|
||||
:type relation_name: string
|
||||
:returns: Whether all units are ready.
|
||||
:rtype: bool
|
||||
"""
|
||||
unit_states = get_peers_unit_state(relation_name)
|
||||
return all(v == UNIT_READY for v in unit_states.values())
|
||||
|
||||
|
||||
def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'):
|
||||
"""Inform peers if this unit is ready.
|
||||
|
||||
The check function should return a tuple (state, message). A state
|
||||
of 'READY' indicates the unit is READY.
|
||||
|
||||
:param check_unit_ready_func: Function to run to check readiness
|
||||
:type check_unit_ready_func: function
|
||||
:param relation_name: Name of relation to check peers on.
|
||||
:type relation_name: string
|
||||
"""
|
||||
unit_ready, msg = check_unit_ready_func()
|
||||
if unit_ready:
|
||||
state = UNIT_READY
|
||||
else:
|
||||
state = UNIT_NOTREADY
|
||||
juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG')
|
||||
inform_peers_unit_state(state, relation_name)
|
||||
|
||||
|
||||
def check_api_unit_ready(check_db_ready=True):
|
||||
"""Check if this unit is ready.
|
||||
|
||||
:param check_db_ready: Include checks of database readiness.
|
||||
:type check_db_ready: bool
|
||||
:returns: Whether unit state is ready and status message
|
||||
:rtype: (bool, str)
|
||||
"""
|
||||
unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready)
|
||||
return unit_state == WL_STATES.ACTIVE, msg
|
||||
|
||||
|
||||
def get_api_unit_status(check_db_ready=True):
|
||||
"""Return a workload status and message for this unit.
|
||||
|
||||
:param check_db_ready: Include checks of database readiness.
|
||||
:type check_db_ready: bool
|
||||
:returns: Workload state and message
|
||||
:rtype: (bool, str)
|
||||
"""
|
||||
unit_state = WL_STATES.ACTIVE
|
||||
msg = 'Unit is ready'
|
||||
if is_db_maintenance_mode():
|
||||
unit_state = WL_STATES.MAINTENANCE
|
||||
msg = 'Database in maintenance mode.'
|
||||
elif is_unit_paused_set():
|
||||
unit_state = WL_STATES.BLOCKED
|
||||
msg = 'Unit paused.'
|
||||
elif check_db_ready and not is_db_ready():
|
||||
unit_state = WL_STATES.WAITING
|
||||
msg = 'Allowed_units list provided but this unit not present'
|
||||
elif not is_db_initialised():
|
||||
unit_state = WL_STATES.WAITING
|
||||
msg = 'Database not initialised'
|
||||
elif not is_expected_scale():
|
||||
unit_state = WL_STATES.WAITING
|
||||
msg = 'Charm and its dependencies not yet at expected scale'
|
||||
juju_log(msg, 'DEBUG')
|
||||
return unit_state, msg
|
||||
|
||||
|
||||
def check_api_application_ready():
|
||||
"""Check if this application is ready.
|
||||
|
||||
:returns: Whether application state is ready and status message
|
||||
:rtype: (bool, str)
|
||||
"""
|
||||
app_state, msg = get_api_application_status()
|
||||
return app_state == WL_STATES.ACTIVE, msg
|
||||
|
||||
|
||||
def get_api_application_status():
|
||||
"""Return a workload status and message for this application.
|
||||
|
||||
:returns: Workload state and message
|
||||
:rtype: (bool, str)
|
||||
"""
|
||||
app_state, msg = get_api_unit_status()
|
||||
if app_state == WL_STATES.ACTIVE:
|
||||
if are_peers_ready():
|
||||
return WL_STATES.ACTIVE, 'Application Ready'
|
||||
else:
|
||||
return WL_STATES.WAITING, 'Some units are not ready'
|
||||
return app_state, msg
|
||||
|
@ -140,9 +140,16 @@ def vault_relation_complete(backend=None):
|
||||
:ptype backend: string
|
||||
:returns: whether the relation to vault is complete
|
||||
:rtype: bool"""
|
||||
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
|
||||
vault_kv()
|
||||
return vault_kv.complete
|
||||
try:
|
||||
import hvac
|
||||
except ImportError:
|
||||
return False
|
||||
try:
|
||||
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
|
||||
vault_kv()
|
||||
return vault_kv.complete
|
||||
except hvac.exceptions.InvalidRequest:
|
||||
return False
|
||||
|
||||
|
||||
# TODO: contrib a high level unwrap method to hvac that works
|
||||
|
@ -22,6 +22,7 @@
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import collections
|
||||
import errno
|
||||
import hashlib
|
||||
import math
|
||||
@ -93,6 +94,88 @@ LEGACY_PG_COUNT = 200
|
||||
DEFAULT_MINIMUM_PGS = 2
|
||||
|
||||
|
||||
class OsdPostUpgradeError(Exception):
|
||||
"""Error class for OSD post-upgrade operations."""
|
||||
pass
|
||||
|
||||
|
||||
class OSDSettingConflict(Exception):
|
||||
"""Error class for conflicting osd setting requests."""
|
||||
pass
|
||||
|
||||
|
||||
class OSDSettingNotAllowed(Exception):
|
||||
"""Error class for a disallowed setting."""
|
||||
pass
|
||||
|
||||
|
||||
OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed)
|
||||
|
||||
OSD_SETTING_WHITELIST = [
|
||||
'osd heartbeat grace',
|
||||
'osd heartbeat interval',
|
||||
]
|
||||
|
||||
|
||||
def _order_dict_by_key(rdict):
|
||||
"""Convert a dictionary into an OrderedDict sorted by key.
|
||||
|
||||
:param rdict: Dictionary to be ordered.
|
||||
:type rdict: dict
|
||||
:returns: Ordered Dictionary.
|
||||
:rtype: collections.OrderedDict
|
||||
"""
|
||||
return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0]))
|
||||
|
||||
|
||||
def get_osd_settings(relation_name):
|
||||
"""Consolidate requested osd settings from all clients.
|
||||
|
||||
Consolidate requested osd settings from all clients. Check that the
|
||||
requested setting is on the whitelist and it does not conflict with
|
||||
any other requested settings.
|
||||
|
||||
:returns: Dictionary of settings
|
||||
:rtype: dict
|
||||
|
||||
:raises: OSDSettingNotAllowed
|
||||
:raises: OSDSettingConflict
|
||||
"""
|
||||
rel_ids = relation_ids(relation_name)
|
||||
osd_settings = {}
|
||||
for relid in rel_ids:
|
||||
for unit in related_units(relid):
|
||||
unit_settings = relation_get('osd-settings', unit, relid) or '{}'
|
||||
unit_settings = json.loads(unit_settings)
|
||||
for key, value in unit_settings.items():
|
||||
if key not in OSD_SETTING_WHITELIST:
|
||||
msg = 'Illegal settings "{}"'.format(key)
|
||||
raise OSDSettingNotAllowed(msg)
|
||||
if key in osd_settings:
|
||||
if osd_settings[key] != unit_settings[key]:
|
||||
msg = 'Conflicting settings for "{}"'.format(key)
|
||||
raise OSDSettingConflict(msg)
|
||||
else:
|
||||
osd_settings[key] = value
|
||||
return _order_dict_by_key(osd_settings)
|
||||
|
||||
|
||||
def send_osd_settings():
|
||||
"""Pass on requested OSD settings to osd units."""
|
||||
try:
|
||||
settings = get_osd_settings('client')
|
||||
except OSD_SETTING_EXCEPTIONS as e:
|
||||
# There is a problem with the settings, not passing them on. Update
|
||||
# status will notify the user.
|
||||
log(e, level=ERROR)
|
||||
return
|
||||
data = {
|
||||
'osd-settings': json.dumps(settings, sort_keys=True)}
|
||||
for relid in relation_ids('osd'):
|
||||
relation_set(relation_id=relid,
|
||||
relation_settings=data)
|
||||
|
||||
|
||||
def validator(value, valid_type, valid_range=None):
|
||||
"""
|
||||
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
||||
@ -1635,5 +1718,67 @@ class CephConfContext(object):
|
||||
continue
|
||||
|
||||
ceph_conf[key] = conf[key]
|
||||
|
||||
return ceph_conf
|
||||
|
||||
|
||||
class CephOSDConfContext(CephConfContext):
|
||||
"""Ceph config (ceph.conf) context.
|
||||
|
||||
Consolidates settings from config-flags via CephConfContext with
|
||||
settings provided by the mons. The config-flag values are preserved in
|
||||
conf['osd'], settings from the mons which do not clash with config-flag
|
||||
settings are in conf['osd_from_client'] and finally settings which do
|
||||
clash are in conf['osd_from_client_conflict']. Rather than silently drop
|
||||
the conflicting settings they are provided in the context so they can be
|
||||
rendered commented out to give some visability to the admin.
|
||||
"""
|
||||
|
||||
def __init__(self, permitted_sections=None):
|
||||
super(CephOSDConfContext, self).__init__(
|
||||
permitted_sections=permitted_sections)
|
||||
try:
|
||||
self.settings_from_mons = get_osd_settings('mon')
|
||||
except OSDSettingConflict:
|
||||
log(
|
||||
"OSD settings from mons are inconsistent, ignoring them",
|
||||
level=WARNING)
|
||||
self.settings_from_mons = {}
|
||||
|
||||
def filter_osd_from_mon_settings(self):
|
||||
"""Filter settings from client relation against config-flags.
|
||||
|
||||
:returns: A tuple (
|
||||
,config-flag values,
|
||||
,client settings which do not conflict with config-flag values,
|
||||
,client settings which confilct with config-flag values)
|
||||
:rtype: (OrderedDict, OrderedDict, OrderedDict)
|
||||
"""
|
||||
ceph_conf = super(CephOSDConfContext, self).__call__()
|
||||
conflicting_entries = {}
|
||||
clear_entries = {}
|
||||
for key, value in self.settings_from_mons.items():
|
||||
if key in ceph_conf.get('osd', {}):
|
||||
if ceph_conf['osd'][key] != value:
|
||||
conflicting_entries[key] = value
|
||||
else:
|
||||
clear_entries[key] = value
|
||||
clear_entries = _order_dict_by_key(clear_entries)
|
||||
conflicting_entries = _order_dict_by_key(conflicting_entries)
|
||||
return ceph_conf, clear_entries, conflicting_entries
|
||||
|
||||
def __call__(self):
|
||||
"""Construct OSD config context.
|
||||
|
||||
Standard context with two additional special keys.
|
||||
osd_from_client_conflict: client settings which confilct with
|
||||
config-flag values
|
||||
osd_from_client: settings which do not conflict with config-flag
|
||||
values
|
||||
|
||||
:returns: OSD config context dict.
|
||||
:rtype: dict
|
||||
"""
|
||||
conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings()
|
||||
conf['osd_from_client_conflict'] = osd_conflict
|
||||
conf['osd_from_client'] = osd_clear
|
||||
return conf
|
||||
|
@ -32,6 +32,10 @@ def loopback_devices():
|
||||
|
||||
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
||||
|
||||
or:
|
||||
|
||||
/dev/loop0: [0807]:961814 (/tmp/my.img (deleted))
|
||||
|
||||
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
||||
'''
|
||||
loopbacks = {}
|
||||
@ -39,9 +43,9 @@ def loopback_devices():
|
||||
output = check_output(cmd)
|
||||
if six.PY3:
|
||||
output = output.decode('utf-8')
|
||||
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
|
||||
devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
|
||||
for dev, _, f in devs:
|
||||
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
|
||||
loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
|
||||
return loopbacks
|
||||
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
from __future__ import print_function
|
||||
import copy
|
||||
from distutils.version import LooseVersion
|
||||
from enum import Enum
|
||||
from functools import wraps
|
||||
from collections import namedtuple
|
||||
import glob
|
||||
@ -57,6 +58,14 @@ RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
|
||||
'This may not be compatible with software you are '
|
||||
'running in your shell.')
|
||||
|
||||
|
||||
class WL_STATES(Enum):
|
||||
ACTIVE = 'active'
|
||||
BLOCKED = 'blocked'
|
||||
MAINTENANCE = 'maintenance'
|
||||
WAITING = 'waiting'
|
||||
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
@ -1088,22 +1097,31 @@ def function_tag():
|
||||
return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
|
||||
|
||||
|
||||
def status_set(workload_state, message):
|
||||
def status_set(workload_state, message, application_status=False):
|
||||
"""Set the workload state with a message
|
||||
|
||||
Use status-set to set the workload state with a message which is visible
|
||||
to the user via juju status. If the status-set command is not found then
|
||||
assume this is juju < 1.23 and juju-log the message unstead.
|
||||
assume this is juju < 1.23 and juju-log the message instead.
|
||||
|
||||
workload_state -- valid juju workload state.
|
||||
message -- status update message
|
||||
workload_state -- valid juju workload state. str or WL_STATES
|
||||
message -- status update message
|
||||
application_status -- Whether this is an application state set
|
||||
"""
|
||||
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
|
||||
if workload_state not in valid_states:
|
||||
# Extract the value if workload_state is an Enum
|
||||
try:
|
||||
workload_state = workload_state.value
|
||||
except AttributeError:
|
||||
pass
|
||||
workload_state = workload_state.lower()
|
||||
if workload_state not in [s.lower() for s in WL_STATES.__members__.keys()]:
|
||||
raise ValueError(
|
||||
'{!r} is not a valid workload state'.format(workload_state)
|
||||
)
|
||||
cmd = ['status-set', workload_state, message]
|
||||
cmd = ['status-set']
|
||||
if application_status:
|
||||
cmd.append('--application')
|
||||
cmd.extend([workload_state, message])
|
||||
try:
|
||||
ret = subprocess.call(cmd)
|
||||
if ret == 0:
|
||||
@ -1526,13 +1544,13 @@ def env_proxy_settings(selected_settings=None):
|
||||
"""Get proxy settings from process environment variables.
|
||||
|
||||
Get charm proxy settings from environment variables that correspond to
|
||||
juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
|
||||
see lp:1782236) in a format suitable for passing to an application that
|
||||
reacts to proxy settings passed as environment variables. Some applications
|
||||
support lowercase or uppercase notation (e.g. curl), some support only
|
||||
lowercase (e.g. wget), there are also subjectively rare cases of only
|
||||
uppercase notation support. no_proxy CIDR and wildcard support also varies
|
||||
between runtimes and applications as there is no enforced standard.
|
||||
juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see
|
||||
lp:1782236) and juju-ftp-proxy in a format suitable for passing to an
|
||||
application that reacts to proxy settings passed as environment variables.
|
||||
Some applications support lowercase or uppercase notation (e.g. curl), some
|
||||
support only lowercase (e.g. wget), there are also subjectively rare cases
|
||||
of only uppercase notation support. no_proxy CIDR and wildcard support also
|
||||
varies between runtimes and applications as there is no enforced standard.
|
||||
|
||||
Some applications may connect to multiple destinations and expose config
|
||||
options that would affect only proxy settings for a specific destination
|
||||
|
@ -78,7 +78,10 @@ from charmhelpers.contrib.openstack.utils import (
|
||||
enable_memcache,
|
||||
series_upgrade_prepare,
|
||||
series_upgrade_complete,
|
||||
is_db_maintenance_mode,
|
||||
inform_peers_if_ready,
|
||||
check_api_unit_ready,
|
||||
check_api_application_ready,
|
||||
is_db_ready,
|
||||
)
|
||||
|
||||
from keystone_context import fernet_enabled
|
||||
@ -105,9 +108,7 @@ from keystone_utils import (
|
||||
TOKEN_FLUSH_CRON_FILE,
|
||||
setup_ipv6,
|
||||
send_notifications,
|
||||
is_db_ready,
|
||||
is_db_initialised,
|
||||
is_expected_scale,
|
||||
filter_null,
|
||||
is_service_present,
|
||||
delete_service_entry,
|
||||
@ -303,6 +304,7 @@ def config_changed_postupgrade():
|
||||
ha_joined(relation_id=r_id)
|
||||
|
||||
notify_middleware_with_release_version()
|
||||
inform_peers_if_ready(check_api_unit_ready)
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-joined')
|
||||
@ -325,26 +327,15 @@ def db_joined():
|
||||
hostname=host)
|
||||
|
||||
|
||||
def update_all_identity_relation_units(check_db_ready=True):
|
||||
if is_db_maintenance_mode():
|
||||
log('Database maintenance mode, aborting hook.', level=INFO)
|
||||
return
|
||||
if is_unit_paused_set():
|
||||
return
|
||||
if check_db_ready and not is_db_ready():
|
||||
log('Allowed_units list provided and this unit not present',
|
||||
def update_all_identity_relation_units():
|
||||
unit_ready, _ = check_api_application_ready()
|
||||
if not unit_ready:
|
||||
log(
|
||||
("Keystone charm unit not ready - deferring identity-relation "
|
||||
"updates"),
|
||||
level=INFO)
|
||||
return
|
||||
|
||||
if not is_db_initialised():
|
||||
log("Database not yet initialised - deferring identity-relation "
|
||||
"updates", level=INFO)
|
||||
return
|
||||
if not is_expected_scale():
|
||||
log("Keystone charm and it's dependencies not yet at expected scale "
|
||||
"- deferring identity-relation updates", level=INFO)
|
||||
return
|
||||
|
||||
log('Firing identity_changed hook for all related services.')
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
@ -388,7 +379,7 @@ def leader_init_db_if_ready(use_current_context=False):
|
||||
|
||||
if is_db_initialised():
|
||||
log("Database already initialised - skipping db init", level=DEBUG)
|
||||
update_all_identity_relation_units(check_db_ready=False)
|
||||
update_all_identity_relation_units()
|
||||
return
|
||||
|
||||
# Bugs 1353135 & 1187508. Dbs can appear to be ready before the
|
||||
@ -406,8 +397,8 @@ def leader_init_db_if_ready(use_current_context=False):
|
||||
os_release('keystone')) >= 'liberty':
|
||||
CONFIGS.write(POLICY_JSON)
|
||||
# Ensure any existing service entries are updated in the
|
||||
# new database backend. Also avoid duplicate db ready check.
|
||||
update_all_identity_relation_units(check_db_ready=False)
|
||||
# new database backend.
|
||||
update_all_identity_relation_units()
|
||||
update_all_domain_backends()
|
||||
|
||||
|
||||
@ -423,6 +414,7 @@ def db_changed():
|
||||
os_release('keystone')) >= 'liberty':
|
||||
CONFIGS.write(POLICY_JSON)
|
||||
update_all_identity_relation_units()
|
||||
inform_peers_if_ready(check_api_unit_ready)
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-departed',
|
||||
@ -558,6 +550,7 @@ def cluster_changed():
|
||||
update_all_identity_relation_units()
|
||||
|
||||
CONFIGS.write_all()
|
||||
inform_peers_if_ready(check_api_unit_ready)
|
||||
|
||||
|
||||
@hooks.hook('leader-elected')
|
||||
@ -569,6 +562,7 @@ def leader_elected():
|
||||
CONFIGS.write(TOKEN_FLUSH_CRON_FILE)
|
||||
|
||||
update_all_identity_relation_units()
|
||||
inform_peers_if_ready(check_api_unit_ready)
|
||||
|
||||
|
||||
@hooks.hook('leader-settings-changed')
|
||||
@ -595,6 +589,7 @@ def leader_settings_changed():
|
||||
key_write()
|
||||
|
||||
update_all_identity_relation_units()
|
||||
inform_peers_if_ready(check_api_unit_ready)
|
||||
|
||||
|
||||
@hooks.hook('ha-relation-joined')
|
||||
@ -618,6 +613,7 @@ def ha_changed():
|
||||
update_all_identity_relation_units()
|
||||
update_all_domain_backends()
|
||||
update_all_fid_backends()
|
||||
inform_peers_if_ready(check_api_unit_ready)
|
||||
|
||||
|
||||
@hooks.hook('identity-admin-relation-changed')
|
||||
@ -744,6 +740,7 @@ def upgrade_charm():
|
||||
os_release('keystone'),
|
||||
'keystone',
|
||||
restart_handler=lambda: service_restart('apache2'))
|
||||
inform_peers_if_ready(check_api_unit_ready)
|
||||
|
||||
|
||||
@hooks.hook('update-status')
|
||||
@ -884,6 +881,7 @@ def certs_changed(relation_id=None, unit=None):
|
||||
update_all_identity_relation_units()
|
||||
update_all_domain_backends()
|
||||
update_all_fid_backends()
|
||||
inform_peers_if_ready(check_api_unit_ready)
|
||||
|
||||
|
||||
def notify_middleware_with_release_version():
|
||||
|
@ -42,10 +42,6 @@ from charmhelpers.contrib.network.ip import (
|
||||
get_ipv6_addr
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.ha.utils import (
|
||||
expect_ha,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
resolve_address,
|
||||
PUBLIC,
|
||||
@ -63,6 +59,7 @@ from charmhelpers.contrib.openstack.utils import (
|
||||
resume_unit,
|
||||
make_assess_status_func,
|
||||
os_application_version_set,
|
||||
os_application_status_set,
|
||||
CompareOpenStackReleases,
|
||||
reset_os_release,
|
||||
snap_install_requested,
|
||||
@ -70,6 +67,8 @@ from charmhelpers.contrib.openstack.utils import (
|
||||
get_snaps_install_info_from_origin,
|
||||
enable_memcache,
|
||||
is_unit_paused_set,
|
||||
check_api_unit_ready,
|
||||
get_api_application_status,
|
||||
)
|
||||
|
||||
from charmhelpers.core.decorators import (
|
||||
@ -78,16 +77,12 @@ from charmhelpers.core.decorators import (
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
atexit,
|
||||
cached,
|
||||
config,
|
||||
expected_peer_units,
|
||||
expected_related_units,
|
||||
is_leader,
|
||||
leader_get,
|
||||
leader_set,
|
||||
log,
|
||||
local_unit,
|
||||
metadata,
|
||||
relation_get,
|
||||
relation_set,
|
||||
relation_id,
|
||||
@ -2296,6 +2291,9 @@ def check_extra_for_assess_status(configs):
|
||||
._decode_password_security_compliance_string(conf) is None)):
|
||||
return ('blocked',
|
||||
"'password-security-compliance' is invalid")
|
||||
unit_ready, msg = check_api_unit_ready()
|
||||
if not unit_ready:
|
||||
return ('blocked', msg)
|
||||
# return 'unknown' as the lowest priority to not clobber an existing
|
||||
# status.
|
||||
return 'unknown', ''
|
||||
@ -2316,6 +2314,8 @@ def assess_status(configs):
|
||||
"""
|
||||
assess_status_func(configs)()
|
||||
os_application_version_set(VERSION_PACKAGE)
|
||||
if is_leader():
|
||||
os_application_status_set(get_api_application_status)
|
||||
|
||||
|
||||
def assess_status_func(configs, exclude_ha_resource=False):
|
||||
@ -2574,63 +2574,6 @@ def fernet_keys_rotate_and_sync(log_func=log):
|
||||
level=INFO)
|
||||
|
||||
|
||||
@cached
|
||||
def container_scoped_relations():
|
||||
'''Get all the container scoped relations'''
|
||||
md = metadata()
|
||||
relations = []
|
||||
for relation_type in ('provides', 'requires', 'peers'):
|
||||
for relation in md.get(relation_type, []):
|
||||
if md[relation_type][relation].get('scope') == 'container':
|
||||
relations.append(relation)
|
||||
return relations
|
||||
|
||||
|
||||
def is_expected_scale():
|
||||
"""Query juju goal-state to determine whether our peer- and dependency-
|
||||
relations are at the expected scale.
|
||||
|
||||
Useful for deferring per unit per relation housekeeping work until we are
|
||||
ready to complete it successfully and without unnecessary repetiton.
|
||||
|
||||
Always returns True if version of juju used does not support goal-state.
|
||||
|
||||
:returns: True or False
|
||||
:rtype: bool
|
||||
"""
|
||||
peer_type = 'cluster'
|
||||
peer_rid = next((rid for rid in relation_ids(reltype=peer_type)), None)
|
||||
if not peer_rid:
|
||||
return False
|
||||
deps = [
|
||||
('shared-db',
|
||||
next((rid for rid in relation_ids(reltype='shared-db')), None)),
|
||||
]
|
||||
if expect_ha():
|
||||
deps.append(('ha',
|
||||
next((rid for rid in relation_ids(reltype='ha')), None)))
|
||||
try:
|
||||
if (len(related_units(relid=peer_rid)) <
|
||||
len(list(expected_peer_units()))):
|
||||
return False
|
||||
for dep in deps:
|
||||
if not dep[1]:
|
||||
return False
|
||||
# Goal state returns every unit even for container scoped
|
||||
# relations but the charm only ever has a relation with
|
||||
# the local unit.
|
||||
if dep[0] in container_scoped_relations():
|
||||
expected_count = 1
|
||||
else:
|
||||
expected_count = len(
|
||||
list(expected_related_units(reltype=dep[0])))
|
||||
if len(related_units(relid=dep[1])) < expected_count:
|
||||
return False
|
||||
except NotImplementedError:
|
||||
return True
|
||||
return True
|
||||
|
||||
|
||||
def assemble_endpoints(settings):
|
||||
"""
|
||||
Assemble multiple endpoints from relation data. service name
|
||||
|
@ -64,6 +64,8 @@ TO_PATCH = [
|
||||
# charmhelpers.contrib.openstack.utils
|
||||
'configure_installation_source',
|
||||
'snap_install_requested',
|
||||
'check_api_application_ready',
|
||||
'inform_peers_if_ready',
|
||||
# charmhelpers.contrib.openstack.ip
|
||||
'resolve_address',
|
||||
# charmhelpers.contrib.openstack.ha.utils
|
||||
@ -104,7 +106,6 @@ TO_PATCH = [
|
||||
'run_in_apache',
|
||||
# unitdata
|
||||
'unitdata',
|
||||
'is_db_maintenance_mode',
|
||||
]
|
||||
|
||||
|
||||
@ -198,7 +199,6 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_db_changed_missing_relation_data(self, configs,
|
||||
mock_log):
|
||||
self.is_db_maintenance_mode_return_value = False
|
||||
configs.complete_contexts = MagicMock()
|
||||
configs.complete_contexts.return_value = []
|
||||
hooks.db_changed()
|
||||
@ -208,7 +208,6 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
|
||||
@patch.object(hooks, 'update_all_identity_relation_units')
|
||||
def _shared_db_test(self, configs, unit_name, mock_update_all):
|
||||
self.is_db_maintenance_mode_return_value = False
|
||||
self.relation_get.return_value = 'keystone/0 keystone/3'
|
||||
configs.complete_contexts = MagicMock()
|
||||
configs.complete_contexts.return_value = ['shared-db']
|
||||
@ -384,7 +383,6 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
.assert_called_once_with(ANY, "keystone", restart_handler=ANY))
|
||||
|
||||
@patch.object(hooks, 'maybe_do_policyd_overrides_on_config_changed')
|
||||
@patch.object(hooks, 'is_expected_scale')
|
||||
@patch.object(hooks, 'os_release')
|
||||
@patch.object(hooks, 'run_in_apache')
|
||||
@patch.object(hooks, 'is_db_initialised')
|
||||
@ -395,13 +393,12 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
mock_db_init,
|
||||
mock_run_in_apache,
|
||||
os_release,
|
||||
is_expected_scale,
|
||||
mock_maybe_do_policyd_overrides_on_config_changed
|
||||
):
|
||||
self.check_api_application_ready.return_value = (True, 'msg')
|
||||
os_release.return_value = 'ocata'
|
||||
self.enable_memcache.return_value = False
|
||||
mock_run_in_apache.return_value = False
|
||||
is_expected_scale.return_value = True
|
||||
|
||||
self.openstack_upgrade_available.return_value = True
|
||||
self.test_config.set('action-managed-upgrade', True)
|
||||
@ -678,7 +675,7 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
self.is_db_ready.assert_called_with(use_current_context=False)
|
||||
self.migrate_database.assert_called_with()
|
||||
mock_bootstrap_keystone.assert_called_once_with(configs=ANY)
|
||||
update.assert_called_with(check_db_ready=False)
|
||||
update.assert_called_with()
|
||||
|
||||
@patch.object(hooks, 'update_all_identity_relation_units')
|
||||
def test_leader_init_db_not_leader(self, update):
|
||||
@ -717,7 +714,6 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
self.assertFalse(self.migrate_database.called)
|
||||
self.assertFalse(update.called)
|
||||
|
||||
@patch.object(hooks, 'is_expected_scale')
|
||||
@patch.object(hooks, 'configure_https')
|
||||
@patch.object(hooks, 'admin_relation_changed')
|
||||
@patch.object(hooks, 'identity_credentials_changed')
|
||||
@ -729,12 +725,10 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
identity_changed,
|
||||
identity_credentials_changed,
|
||||
admin_relation_changed,
|
||||
configure_https,
|
||||
is_expected_scale):
|
||||
configure_https):
|
||||
""" Verify all identity relations are updated """
|
||||
self.is_db_maintenance_mode.return_value = False
|
||||
self.check_api_application_ready.return_value = (True, 'msg')
|
||||
is_db_initialized.return_value = True
|
||||
is_expected_scale.return_value = True
|
||||
self.relation_ids.return_value = ['identity-relation:0']
|
||||
self.related_units.return_value = ['unit/0']
|
||||
log_calls = [call('Firing identity_changed hook for all related '
|
||||
@ -743,7 +737,7 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
'services.'),
|
||||
call('Firing identity_credentials_changed hook for all '
|
||||
'related services.')]
|
||||
hooks.update_all_identity_relation_units(check_db_ready=False)
|
||||
hooks.update_all_identity_relation_units()
|
||||
identity_changed.assert_called_with(
|
||||
relation_id='identity-relation:0',
|
||||
remote_unit='unit/0')
|
||||
@ -757,56 +751,30 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_update_all_db_not_ready(self, configs, configure_https):
|
||||
""" Verify update identity relations when DB is not ready """
|
||||
self.is_db_maintenance_mode.return_value = False
|
||||
self.is_db_ready.return_value = False
|
||||
hooks.update_all_identity_relation_units(check_db_ready=True)
|
||||
self.assertTrue(self.is_db_ready.called)
|
||||
self.log.assert_called_with('Allowed_units list provided and this '
|
||||
'unit not present', level='INFO')
|
||||
self.check_api_application_ready.return_value = (False, 'msg')
|
||||
hooks.update_all_identity_relation_units()
|
||||
self.log.assert_called_with(
|
||||
("Keystone charm unit not ready - deferring identity-relation "
|
||||
"updates"),
|
||||
level='INFO')
|
||||
self.assertFalse(self.relation_ids.called)
|
||||
|
||||
@patch.object(hooks, 'configure_https')
|
||||
@patch.object(hooks, 'is_db_initialised')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_update_all_db_not_initializd(self, configs, is_db_initialized,
|
||||
configure_https):
|
||||
""" Verify update identity relations when DB is not initialized """
|
||||
self.is_db_maintenance_mode.return_value = False
|
||||
is_db_initialized.return_value = False
|
||||
hooks.update_all_identity_relation_units(check_db_ready=False)
|
||||
self.assertFalse(self.is_db_ready.called)
|
||||
self.log.assert_called_with('Database not yet initialised - '
|
||||
'deferring identity-relation updates',
|
||||
level='INFO')
|
||||
self.assertFalse(self.relation_ids.called)
|
||||
|
||||
@patch.object(hooks, 'is_expected_scale')
|
||||
@patch.object(hooks, 'configure_https')
|
||||
@patch.object(hooks, 'is_db_initialised')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_update_all_leader(self, configs, is_db_initialized,
|
||||
configure_https, is_expected_scale):
|
||||
def test_update_all_leader(self, configs, configure_https):
|
||||
""" Verify update identity relations when the leader"""
|
||||
self.is_db_maintenance_mode.return_value = False
|
||||
self.is_elected_leader.return_value = True
|
||||
is_db_initialized.return_value = True
|
||||
is_expected_scale.return_value = True
|
||||
hooks.update_all_identity_relation_units(check_db_ready=False)
|
||||
self.check_api_application_ready.return_value = (True, 'msg')
|
||||
hooks.update_all_identity_relation_units()
|
||||
# Still updates relations
|
||||
self.assertTrue(self.relation_ids.called)
|
||||
|
||||
@patch.object(hooks, 'is_expected_scale')
|
||||
@patch.object(hooks, 'configure_https')
|
||||
@patch.object(hooks, 'is_db_initialised')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
def test_update_all_not_leader(self, configs, is_db_initialized,
|
||||
configure_https, is_expected_scale):
|
||||
def test_update_all_not_leader(self, configs, configure_https):
|
||||
""" Verify update identity relations when not the leader"""
|
||||
self.is_db_maintenance_mode.return_value = False
|
||||
self.check_api_application_ready.return_value = (True, 'msg')
|
||||
self.is_elected_leader.return_value = False
|
||||
is_db_initialized.return_value = True
|
||||
is_expected_scale.return_value = True
|
||||
hooks.update_all_identity_relation_units(check_db_ready=False)
|
||||
hooks.update_all_identity_relation_units()
|
||||
self.assertFalse(self.ensure_initial_admin.called)
|
||||
# Still updates relations
|
||||
self.assertTrue(self.relation_ids.called)
|
||||
|
@ -73,6 +73,7 @@ TO_PATCH = [
|
||||
'time',
|
||||
'pwgen',
|
||||
'os_application_version_set',
|
||||
'os_application_status_set',
|
||||
'reset_os_release',
|
||||
]
|
||||
|
||||
@ -1116,7 +1117,9 @@ class TestKeystoneUtils(CharmTestCase):
|
||||
x = utils.get_file_stored_domain_id('/a/file')
|
||||
self.assertEqual(x, 'some_data')
|
||||
|
||||
def test_assess_status(self):
|
||||
@patch.object(utils, 'is_leader')
|
||||
def test_assess_status(self, is_leader):
|
||||
is_leader.return_value = True
|
||||
with patch.object(utils, 'assess_status_func') as asf:
|
||||
callee = MagicMock()
|
||||
asf.return_value = callee
|
||||
@ -1126,6 +1129,7 @@ class TestKeystoneUtils(CharmTestCase):
|
||||
self.os_application_version_set.assert_called_with(
|
||||
utils.VERSION_PACKAGE
|
||||
)
|
||||
self.assertTrue(self.os_application_status_set.called)
|
||||
|
||||
@patch.object(utils, 'determine_ports')
|
||||
@patch.object(utils, 'services')
|
||||
@ -1682,98 +1686,6 @@ class TestKeystoneUtils(CharmTestCase):
|
||||
mock_fernet_rotate.assert_called_once_with()
|
||||
mock_key_leader_set.assert_called_once_with()
|
||||
|
||||
@patch.object(utils, 'container_scoped_relations')
|
||||
@patch.object(utils, 'expected_related_units')
|
||||
@patch.object(utils, 'expected_peer_units')
|
||||
@patch.object(utils, 'related_units')
|
||||
@patch.object(utils, 'expect_ha')
|
||||
@patch.object(utils, 'relation_ids')
|
||||
def test_is_expected_scale(self, relation_ids, expect_ha, related_units,
|
||||
expected_peer_units, expected_related_units,
|
||||
container_scoped_relations):
|
||||
container_scoped_relations.return_value = ['ha']
|
||||
relation_ids.return_value = ['FAKE_RID']
|
||||
expect_ha.return_value = False
|
||||
related_units.return_value = ['unit/0', 'unit/1', 'unit/2']
|
||||
expected_peer_units.return_value = iter(related_units.return_value)
|
||||
expected_related_units.return_value = iter(related_units.return_value)
|
||||
self.assertTrue(utils.is_expected_scale())
|
||||
relation_ids.assert_has_calls([
|
||||
call(reltype='cluster'),
|
||||
call(reltype='shared-db')])
|
||||
related_units.assert_called_with(relid='FAKE_RID')
|
||||
|
||||
@patch.object(utils, 'container_scoped_relations')
|
||||
@patch.object(utils, 'expected_related_units')
|
||||
@patch.object(utils, 'expected_peer_units')
|
||||
@patch.object(utils, 'related_units')
|
||||
@patch.object(utils, 'expect_ha')
|
||||
@patch.object(utils, 'relation_ids')
|
||||
def test_is_expected_scale_ha(self, relation_ids, expect_ha, related_units,
|
||||
expected_peer_units, expected_related_units,
|
||||
container_scoped_relations):
|
||||
container_scoped_relations.return_value = ['ha']
|
||||
relation_ids.return_value = ['FAKE_RID']
|
||||
expect_ha.return_value = True
|
||||
related_units.return_value = ['unit/0', 'unit/1', 'unit/2']
|
||||
expected_peer_units.return_value = iter(related_units.return_value)
|
||||
expected_related_units.return_value = iter(related_units.return_value)
|
||||
self.assertTrue(utils.is_expected_scale())
|
||||
relation_ids.assert_has_calls([
|
||||
call(reltype='cluster'),
|
||||
call(reltype='shared-db'),
|
||||
call(reltype='ha')])
|
||||
related_units.assert_called_with(relid='FAKE_RID')
|
||||
|
||||
@patch.object(utils, 'expected_related_units')
|
||||
@patch.object(utils, 'expected_peer_units')
|
||||
@patch.object(utils, 'related_units')
|
||||
@patch.object(utils, 'expect_ha')
|
||||
@patch.object(utils, 'relation_ids')
|
||||
def test_not_is_expected_scale(self, relation_ids, expect_ha,
|
||||
related_units, expected_peer_units,
|
||||
expected_related_units):
|
||||
relation_ids.return_value = ['FAKE_RID']
|
||||
expect_ha.return_value = False
|
||||
related_units.return_value = ['unit/0', 'unit/1']
|
||||
expected_peer_units.return_value = iter(['unit/0', 'unit/1', 'unit/2'])
|
||||
expected_related_units.return_value = iter(
|
||||
['unit/0', 'unit/1', 'unit/2'])
|
||||
self.assertFalse(utils.is_expected_scale())
|
||||
relation_ids.assert_has_calls([
|
||||
call(reltype='cluster'),
|
||||
call(reltype='shared-db')])
|
||||
related_units.assert_called_with(relid='FAKE_RID')
|
||||
|
||||
@patch.object(utils, 'expected_related_units')
|
||||
@patch.object(utils, 'expected_peer_units')
|
||||
@patch.object(utils, 'related_units')
|
||||
@patch.object(utils, 'expect_ha')
|
||||
@patch.object(utils, 'relation_ids')
|
||||
def test_is_expected_scale_no_goal_state_support(self, relation_ids,
|
||||
expect_ha, related_units,
|
||||
expected_peer_units,
|
||||
expected_related_units):
|
||||
relation_ids.return_value = ['FAKE_RID']
|
||||
related_units.return_value = ['unit/0', 'unit/1', 'unit/2']
|
||||
expected_peer_units.side_effect = NotImplementedError
|
||||
self.assertTrue(utils.is_expected_scale())
|
||||
expected_related_units.assert_not_called()
|
||||
|
||||
@patch.object(utils, 'metadata')
|
||||
def test_container_scoped_relations(self, metadata):
|
||||
_metadata = {
|
||||
'provides': {
|
||||
'amqp': {'interface': 'rabbitmq'},
|
||||
'identity-service': {'interface': 'keystone'},
|
||||
'ha': {
|
||||
'interface': 'hacluster',
|
||||
'scope': 'container'}},
|
||||
'peers': {
|
||||
'cluster': {'interface': 'openstack-ha'}}}
|
||||
metadata.return_value = _metadata
|
||||
self.assertEqual(utils.container_scoped_relations(), ['ha'])
|
||||
|
||||
@patch.object(utils, 'resource_map')
|
||||
@patch.object(utils.os.path, 'isdir')
|
||||
def test_restart_map(self, osp_isdir, resource_map):
|
||||
|
Loading…
Reference in New Issue
Block a user