Merge "Add support for pacemaker-remotes"

This commit is contained in:
Zuul 2019-04-03 14:23:47 +00:00 committed by Gerrit Code Review
commit 438677fe7e
7 changed files with 610 additions and 34 deletions

View File

@ -77,11 +77,16 @@ from utils import (
get_corosync_conf,
assert_charm_supports_ipv6,
get_cluster_nodes,
get_member_ready_nodes,
get_pcmkr_key,
parse_data,
configure_corosync,
configure_stonith,
configure_monitor_host,
configure_cluster_global,
configure_pacemaker_remote_resources,
configure_pacemaker_remote_stonith_resource,
configure_resources_on_remotes,
enable_lsb_services,
disable_lsb_services,
disable_upstart_services,
@ -90,6 +95,7 @@ from utils import (
setup_maas_api,
setup_ocf_files,
set_unit_status,
set_cluster_symmetry,
ocf_file_exists,
kill_legacy_ocf_daemon_process,
try_pcmk_wait,
@ -231,6 +237,7 @@ def hanode_relation_joined(relid=None):
'ha-relation-changed',
'peer-availability-relation-joined',
'peer-availability-relation-changed',
'pacemaker-remote-relation-changed',
'hanode-relation-changed')
def ha_relation_changed():
# Check that we are related to a principle and that
@ -325,6 +332,8 @@ def ha_relation_changed():
# Only configure the cluster resources
# from the oldest peer unit.
if is_leader():
log('Setting cluster symmetry', level=INFO)
set_cluster_symmetry()
log('Deleting Resources' % (delete_resources), level=DEBUG)
for res_name in delete_resources:
if pcmk.crm_opt_exists(res_name):
@ -456,6 +465,21 @@ def ha_relation_changed():
cmd = 'crm resource cleanup %s' % grp_name
pcmk.commit(cmd)
# All members of the cluster need to be registered before resources
# that reference them can be created.
if len(get_member_ready_nodes()) >= int(config('cluster_count')):
log('Configuring any remote nodes', level=INFO)
remote_resources = configure_pacemaker_remote_resources()
stonith_resource = configure_pacemaker_remote_stonith_resource()
resources.update(remote_resources)
resources.update(stonith_resource)
configure_resources_on_remotes(
resources=resources,
clones=clones,
groups=groups)
else:
log('Deferring configuration of any remote nodes', level=INFO)
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id, clustered="yes")
@ -542,6 +566,16 @@ def series_upgrade_complete():
resume_unit()
@hooks.hook('pacemaker-remote-relation-joined')
def send_auth_key():
key = get_pcmkr_key()
if key:
for rel_id in relation_ids('pacemaker-remote'):
relation_set(
relation_id=rel_id,
**{'pacemaker-key': key})
if __name__ == '__main__':
try:
hooks.execute(sys.argv)

View File

@ -0,0 +1 @@
hooks.py

View File

@ -0,0 +1 @@
hooks.py

View File

@ -598,34 +598,50 @@ def configure_cluster_global():
pcmk.commit(cmd)
def configure_maas_stonith_resource(stonith_hostname):
def configure_maas_stonith_resource(stonith_hostnames):
"""Create stonith resource for the given hostname.
:param stonith_hostname: The hostname that the stonith management system
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: str
:type stonith_hostname: List
"""
log('Checking for existing stonith resource', level=DEBUG)
stonith_res_name = 'st-{}'.format(stonith_hostname.split('.')[0])
if not pcmk.is_resource_present(stonith_res_name):
ctxt = {
'url': config('maas_url'),
'apikey': config('maas_credentials'),
'hostnames': stonith_hostname,
'stonith_resource_name': stonith_res_name}
if all(ctxt.values()):
hostnames = []
for host in stonith_hostnames:
hostnames.append(host)
if '.' in host:
hostnames.append(host.split('.')[0])
hostnames = list(set(hostnames))
ctxt = {
'url': config('maas_url'),
'apikey': config('maas_credentials'),
'hostnames': ' '.join(sorted(hostnames))}
if all(ctxt.values()):
maas_login_params = "url='{url}' apikey='{apikey}'".format(**ctxt)
maas_rsc_hash = pcmk.resource_checksum(
'st',
'stonith:external/maas',
res_params=maas_login_params)[:7]
ctxt['stonith_resource_name'] = 'st-maas-{}'.format(maas_rsc_hash)
ctxt['resource_params'] = (
"params url='{url}' apikey='{apikey}' hostnames='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25").format(**ctxt)
if pcmk.is_resource_present(ctxt['stonith_resource_name']):
pcmk.crm_update_resource(
ctxt['stonith_resource_name'],
'stonith:external/maas',
ctxt['resource_params'])
else:
cmd = (
"crm configure primitive {stonith_resource_name} "
"stonith:external/maas "
"params url='{url}' apikey='{apikey}' hostnames={hostnames} "
"op monitor interval=25 start-delay=25 "
"timeout=25").format(**ctxt)
"stonith:external/maas {resource_params}").format(**ctxt)
pcmk.commit(cmd, failure_is_fatal=True)
else:
raise ValueError("Missing configuration: {}".format(ctxt))
pcmk.commit(
"crm configure property stonith-enabled=true",
failure_is_fatal=True)
else:
raise ValueError("Missing configuration: {}".format(ctxt))
return {ctxt['stonith_resource_name']: 'stonith:external/maas'}
def get_ip_addr_from_resource_params(params):
@ -638,6 +654,199 @@ def get_ip_addr_from_resource_params(params):
return res.group(1) if res else None
def need_resources_on_remotes():
"""Whether to run resources on remote nodes.
Check the 'enable-resources' setting accross the remote units. If it is
absent or inconsistent then raise a ValueError.
:returns: Whether to run resources on remote nodes
:rtype: bool
:raises: ValueError
"""
responses = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
data = parse_data(relid, unit, 'enable-resources')
# parse_data returns {} if key is absent.
if type(data) is bool:
responses.append(data)
if len(set(responses)) == 1:
run_resources_on_remotes = responses[0]
else:
msg = "Inconsistent or absent enable-resources setting {}".format(
responses)
log(msg, level=WARNING)
raise ValueError(msg)
return run_resources_on_remotes
def set_cluster_symmetry():
"""Set the cluster symmetry.
By default the cluster is an Opt-out cluster (equivalent to
symmetric-cluster=true) this means that any resource can run anywhere
unless a node explicitly Opts-out. When using pacemaker-remotes there may
be hundreds of nodes and if they are not prepared to run resources the
cluster should be switched to an Opt-in cluster.
"""
try:
symmetric = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculated desired symmetric-cluster setting'
log(msg, level=WARNING)
return
log('Configuring symmetric-cluster: {}'.format(symmetric), level=DEBUG)
cmd = "crm configure property symmetric-cluster={}".format(
str(symmetric).lower())
pcmk.commit(cmd, failure_is_fatal=True)
def add_location_rules_for_local_nodes(res_name):
"""Add location rules for running resource on local nodes.
Add location rules allowing the given resource to run on local nodes (eg
not remote nodes).
:param res_name: Resource name to create location rules for.
:type res_name: str
"""
for node in pcmk.list_nodes():
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
if not pcmk.crm_opt_exists(loc_constraint_name):
cmd = 'crm -w -F configure location {} {} 0: {}'.format(
loc_constraint_name,
res_name,
node)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
def configure_pacemaker_remote(remote_hostname):
"""Create a resource corresponding to the pacemaker remote node.
:param remote_hostname: Remote hostname used for registering remote node.
:type remote_hostname: str
:returns: Name of resource for pacemaker remote node.
:rtype: str
"""
resource_name = remote_hostname.split('.')[0]
if not pcmk.is_resource_present(resource_name):
cmd = (
"crm configure primitive {} ocf:pacemaker:remote "
"params server={} reconnect_interval=60 "
"op monitor interval=30s").format(resource_name,
remote_hostname)
pcmk.commit(cmd, failure_is_fatal=True)
return resource_name
def cleanup_remote_nodes(remote_nodes):
"""Cleanup pacemaker remote resources
Remove all status records of the resource and
probe the node afterwards.
:param remote_nodes: List of resource names associated with remote nodes
:type remote_nodes: list
"""
for res_name in remote_nodes:
cmd = 'crm resource cleanup {}'.format(res_name)
# Resource cleanups seem to fail occasionally even on healthy nodes
# Bug #1822962. Given this cleanup task is just housekeeping log
# the message if a failure occurs and move on.
if pcmk.commit(cmd, failure_is_fatal=False) == 0:
log(
'Cleanup of resource {} succeeded'.format(res_name),
level=DEBUG)
else:
log(
'Cleanup of resource {} failed'.format(res_name),
level=WARNING)
def configure_pacemaker_remote_stonith_resource():
"""Create a maas stonith resource for the pacemaker-remotes.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = []
stonith_resource = {}
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
stonith_hostname = parse_data(relid, unit, 'stonith-hostname')
if stonith_hostname:
hostnames.append(stonith_hostname)
if hostnames:
stonith_resource = configure_maas_stonith_resource(hostnames)
return stonith_resource
def configure_pacemaker_remote_resources():
"""Create resources corresponding to the pacemaker remote nodes.
Create resources, location constraints and stonith resources for pacemaker
remote node.
:returns: resource dict {res_name: res_type, ...}
:rtype: dict
"""
log('Checking for pacemaker-remote nodes', level=DEBUG)
resources = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
remote_hostname = parse_data(relid, unit, 'remote-hostname')
if remote_hostname:
resource_name = configure_pacemaker_remote(remote_hostname)
resources.append(resource_name)
cleanup_remote_nodes(resources)
return {name: 'ocf:pacemaker:remote' for name in resources}
def configure_resources_on_remotes(resources=None, clones=None, groups=None):
"""Add location rules as needed for resources, clones and groups
If remote nodes should not run resources then add location rules then add
location rules to enable them on local nodes.
:param resources: Resource definitions
:type resources: dict
:param clones: Clone definitions
:type clones: dict
:param groups: Group definitions
:type groups: dict
"""
clones = clones or {}
groups = groups or {}
try:
resources_on_remote = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculate whether resources should run on remotes'
log(msg, level=WARNING)
return
if resources_on_remote:
msg = ('Resources are permitted to run on remotes, no need to create '
'location constraints')
log(msg, level=WARNING)
return
for res_name, res_type in resources.items():
if res_name not in list(clones.values()) + list(groups.values()):
add_location_rules_for_local_nodes(res_name)
for cl_name in clones:
add_location_rules_for_local_nodes(cl_name)
# Limit clone resources to only running on X number of nodes where X
# is the number of local nodes. Otherwise they will show as offline
# on the remote nodes.
node_count = len(pcmk.list_nodes())
cmd = ('crm_resource --resource {} --set-parameter clone-max '
'--meta --parameter-value {}').format(cl_name, node_count)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
for grp_name in groups:
add_location_rules_for_local_nodes(grp_name)
def restart_corosync_on_change():
"""Simple decorator to restart corosync if any of its config changes"""
def wrap(f):

View File

@ -18,6 +18,8 @@ requires:
peer-availability:
interface: juju-info
scope: container
pacemaker-remote:
interface: pacemaker-remote
provides:
ha:
interface: hacluster

View File

@ -39,6 +39,11 @@ class TestCorosyncConf(unittest.TestCase):
shutil.rmtree(self.tmpdir)
os.remove(self.tmpfile.name)
@mock.patch.object(hooks, 'get_member_ready_nodes')
@mock.patch.object(hooks, 'configure_resources_on_remotes')
@mock.patch.object(hooks, 'configure_pacemaker_remote_stonith_resource')
@mock.patch.object(hooks, 'configure_pacemaker_remote_resources')
@mock.patch.object(hooks, 'set_cluster_symmetry')
@mock.patch.object(hooks, 'write_maas_dns_address')
@mock.patch('pcmk.wait_for_pcmk')
@mock.patch('pcmk.crm_opt_exists')
@ -61,7 +66,12 @@ class TestCorosyncConf(unittest.TestCase):
configure_stonith, configure_monitor_host,
configure_cluster_global, configure_corosync,
is_leader, crm_opt_exists,
wait_for_pcmk, write_maas_dns_address):
wait_for_pcmk, write_maas_dns_address,
set_cluster_symmetry,
configure_pacemaker_remote_resources,
configure_pacemaker_remote_stonith_resource,
configure_resources_on_remotes,
get_member_ready_nodes):
def fake_crm_opt_exists(res_name):
# res_ubuntu will take the "update resource" route
@ -72,6 +82,8 @@ class TestCorosyncConf(unittest.TestCase):
is_leader.return_value = True
related_units.return_value = ['ha/0', 'ha/1', 'ha/2']
get_cluster_nodes.return_value = ['10.0.3.2', '10.0.3.3', '10.0.3.4']
get_member_ready_nodes.return_value = ['10.0.3.2', '10.0.3.3',
'10.0.3.4']
relation_ids.return_value = ['hanode:1']
get_corosync_conf.return_value = True
cfg = {'debug': False,
@ -108,6 +120,8 @@ class TestCorosyncConf(unittest.TestCase):
configure_monitor_host.assert_called_with()
configure_cluster_global.assert_called_with()
configure_corosync.assert_called_with()
set_cluster_symmetry.assert_called_with()
configure_pacemaker_remote_resources.assert_called_with()
write_maas_dns_address.assert_not_called()
for kw, key in [('location', 'locations'),
@ -131,6 +145,11 @@ class TestCorosyncConf(unittest.TestCase):
commit.assert_any_call(
'crm -w -F configure %s %s %s' % (kw, name, params))
@mock.patch.object(hooks, 'get_member_ready_nodes')
@mock.patch.object(hooks, 'configure_resources_on_remotes')
@mock.patch.object(hooks, 'configure_pacemaker_remote_stonith_resource')
@mock.patch.object(hooks, 'configure_pacemaker_remote_resources')
@mock.patch.object(hooks, 'set_cluster_symmetry')
@mock.patch.object(hooks, 'write_maas_dns_address')
@mock.patch.object(hooks, 'setup_maas_api')
@mock.patch.object(hooks, 'validate_dns_ha')
@ -149,21 +168,22 @@ class TestCorosyncConf(unittest.TestCase):
@mock.patch('pcmk.commit')
@mock.patch.object(hooks, 'config')
@mock.patch.object(hooks, 'parse_data')
def test_ha_relation_changed_dns_ha(self, parse_data, config, commit,
get_corosync_conf, relation_ids,
relation_set, get_cluster_nodes,
related_units, configure_stonith,
configure_monitor_host,
configure_cluster_global,
configure_corosync, is_leader,
crm_opt_exists,
wait_for_pcmk, validate_dns_ha,
setup_maas_api, write_maas_dns_addr):
def test_ha_relation_changed_dns_ha(
self, parse_data, config, commit, get_corosync_conf, relation_ids,
relation_set, get_cluster_nodes, related_units, configure_stonith,
configure_monitor_host, configure_cluster_global,
configure_corosync, is_leader, crm_opt_exists, wait_for_pcmk,
validate_dns_ha, setup_maas_api, write_maas_dns_addr,
set_cluster_symmetry, configure_pacemaker_remote_resources,
configure_pacemaker_remote_stonith_resource,
configure_resources_on_remotes, get_member_ready_nodes):
validate_dns_ha.return_value = True
crm_opt_exists.return_value = False
is_leader.return_value = True
related_units.return_value = ['ha/0', 'ha/1', 'ha/2']
get_cluster_nodes.return_value = ['10.0.3.2', '10.0.3.3', '10.0.3.4']
get_member_ready_nodes.return_value = ['10.0.3.2', '10.0.3.3',
'10.0.3.4']
relation_ids.return_value = ['ha:1']
get_corosync_conf.return_value = True
cfg = {'debug': False,
@ -363,3 +383,14 @@ class TestHooks(test_utils.CharmTestCase):
relation_id='hanode:1',
relation_settings={'private-address': '10.10.10.2'}
)
@mock.patch.object(hooks, 'get_pcmkr_key')
@mock.patch.object(hooks, 'relation_ids')
@mock.patch.object(hooks, 'relation_set')
def test_send_auth_key(self, relation_set, relation_ids, get_pcmkr_key):
relation_ids.return_value = ['relid1']
get_pcmkr_key.return_value = 'pcmkrkey'
hooks.send_auth_key()
relation_set.assert_called_once_with(
relation_id='relid1',
**{'pacemaker-key': 'pcmkrkey'})

View File

@ -577,6 +577,209 @@ class UtilsTestCase(unittest.TestCase):
render_template.assert_has_calls(expect_render_calls)
mkdir.assert_called_once_with('/etc/corosync/uidgid.d')
@mock.patch.object(utils, 'relation_get')
@mock.patch.object(utils, 'related_units')
@mock.patch.object(utils, 'relation_ids')
def test_need_resources_on_remotes_all_false(self, relation_ids,
related_units, relation_get):
rdata = {
'pacemaker-remote:49': {
'pacemaker-remote/0': {'enable-resources': "false"},
'pacemaker-remote/1': {'enable-resources': "false"},
'pacemaker-remote/2': {'enable-resources': "false"}}}
relation_ids.side_effect = lambda x: rdata.keys()
related_units.side_effect = lambda x: rdata[x].keys()
relation_get.side_effect = lambda x, y, z: rdata[z][y].get(x)
self.assertFalse(utils.need_resources_on_remotes())
@mock.patch.object(utils, 'relation_get')
@mock.patch.object(utils, 'related_units')
@mock.patch.object(utils, 'relation_ids')
def test_need_resources_on_remotes_all_true(self, relation_ids,
related_units,
relation_get):
rdata = {
'pacemaker-remote:49': {
'pacemaker-remote/0': {'enable-resources': "true"},
'pacemaker-remote/1': {'enable-resources': "true"},
'pacemaker-remote/2': {'enable-resources': "true"}}}
relation_ids.side_effect = lambda x: rdata.keys()
related_units.side_effect = lambda x: rdata[x].keys()
relation_get.side_effect = lambda x, y, z: rdata[z][y].get(x)
self.assertTrue(utils.need_resources_on_remotes())
@mock.patch.object(utils, 'relation_get')
@mock.patch.object(utils, 'related_units')
@mock.patch.object(utils, 'relation_ids')
def test_need_resources_on_remotes_mix(self, relation_ids, related_units,
relation_get):
rdata = {
'pacemaker-remote:49': {
'pacemaker-remote/0': {'enable-resources': "true"},
'pacemaker-remote/1': {'enable-resources': "false"},
'pacemaker-remote/2': {'enable-resources': "true"}}}
relation_ids.side_effect = lambda x: rdata.keys()
related_units.side_effect = lambda x: rdata[x].keys()
relation_get.side_effect = lambda x, y, z: rdata[z][y].get(x)
with self.assertRaises(ValueError):
self.assertTrue(utils.need_resources_on_remotes())
@mock.patch.object(utils, 'relation_get')
@mock.patch.object(utils, 'related_units')
@mock.patch.object(utils, 'relation_ids')
def test_need_resources_on_remotes_missing(self, relation_ids,
related_units,
relation_get):
rdata = {
'pacemaker-remote:49': {
'pacemaker-remote/0': {},
'pacemaker-remote/1': {},
'pacemaker-remote/2': {}}}
relation_ids.side_effect = lambda x: rdata.keys()
related_units.side_effect = lambda x: rdata[x].keys()
relation_get.side_effect = lambda x, y, z: rdata[z][y].get(x, None)
with self.assertRaises(ValueError):
self.assertTrue(utils.need_resources_on_remotes())
@mock.patch.object(utils, 'need_resources_on_remotes')
@mock.patch('pcmk.commit')
def test_set_cluster_symmetry_true(self, commit,
need_resources_on_remotes):
need_resources_on_remotes.return_value = True
utils.set_cluster_symmetry()
commit.assert_called_once_with(
'crm configure property symmetric-cluster=true',
failure_is_fatal=True)
@mock.patch.object(utils, 'need_resources_on_remotes')
@mock.patch('pcmk.commit')
def test_set_cluster_symmetry_false(self, commit,
need_resources_on_remotes):
need_resources_on_remotes.return_value = False
utils.set_cluster_symmetry()
commit.assert_called_once_with(
'crm configure property symmetric-cluster=false',
failure_is_fatal=True)
@mock.patch.object(utils, 'need_resources_on_remotes')
@mock.patch('pcmk.commit')
def test_set_cluster_symmetry_unknown(self, commit,
need_resources_on_remotes):
need_resources_on_remotes.side_effect = ValueError()
utils.set_cluster_symmetry()
self.assertFalse(commit.called)
@mock.patch('pcmk.commit')
@mock.patch('pcmk.crm_opt_exists')
@mock.patch('pcmk.list_nodes')
def test_add_location_rules_for_local_nodes(self, list_nodes,
crm_opt_exists, commit):
existing_resources = ['loc-res1-node1']
list_nodes.return_value = ['node1', 'node2']
crm_opt_exists.side_effect = lambda x: x in existing_resources
utils.add_location_rules_for_local_nodes('res1')
commit.assert_called_once_with(
'crm -w -F configure location loc-res1-node2 res1 0: node2',
failure_is_fatal=True)
@mock.patch('pcmk.is_resource_present')
@mock.patch('pcmk.commit')
def test_configure_pacemaker_remote(self, commit, is_resource_present):
is_resource_present.return_value = False
self.assertEqual(
utils.configure_pacemaker_remote(
'juju-aa0ba5-zaza-ed2ce6f303f0-10'),
'juju-aa0ba5-zaza-ed2ce6f303f0-10')
commit.assert_called_once_with(
'crm configure primitive juju-aa0ba5-zaza-ed2ce6f303f0-10 '
'ocf:pacemaker:remote params '
'server=juju-aa0ba5-zaza-ed2ce6f303f0-10 '
'reconnect_interval=60 op monitor interval=30s',
failure_is_fatal=True)
@mock.patch('pcmk.is_resource_present')
@mock.patch('pcmk.commit')
def test_configure_pacemaker_remote_fqdn(self, commit,
is_resource_present):
is_resource_present.return_value = False
self.assertEqual(
utils.configure_pacemaker_remote(
'juju-aa0ba5-zaza-ed2ce6f303f0-10.maas'),
'juju-aa0ba5-zaza-ed2ce6f303f0-10')
commit.assert_called_once_with(
'crm configure primitive juju-aa0ba5-zaza-ed2ce6f303f0-10 '
'ocf:pacemaker:remote params '
'server=juju-aa0ba5-zaza-ed2ce6f303f0-10.maas '
'reconnect_interval=60 op monitor interval=30s',
failure_is_fatal=True)
@mock.patch('pcmk.is_resource_present')
@mock.patch('pcmk.commit')
def test_configure_pacemaker_remote_duplicate(self, commit,
is_resource_present):
is_resource_present.return_value = True
self.assertEqual(
utils.configure_pacemaker_remote(
'juju-aa0ba5-zaza-ed2ce6f303f0-10.maas'),
'juju-aa0ba5-zaza-ed2ce6f303f0-10')
self.assertFalse(commit.called)
@mock.patch('pcmk.commit')
def test_cleanup_remote_nodes(self, commit):
commit.return_value = 0
utils.cleanup_remote_nodes(['res-node1', 'res-node2'])
commit_calls = [
mock.call(
'crm resource cleanup res-node1',
failure_is_fatal=False),
mock.call(
'crm resource cleanup res-node2',
failure_is_fatal=False)]
commit.assert_has_calls(commit_calls)
@mock.patch.object(utils, 'relation_get')
@mock.patch.object(utils, 'related_units')
@mock.patch.object(utils, 'relation_ids')
@mock.patch.object(utils, 'add_location_rules_for_local_nodes')
@mock.patch.object(utils, 'configure_pacemaker_remote')
@mock.patch.object(utils, 'configure_maas_stonith_resource')
@mock.patch.object(utils, 'cleanup_remote_nodes')
def test_configure_pacemaker_remote_resources(
self,
cleanup_remote_nodes,
configure_maas_stonith_resource,
configure_pacemaker_remote,
add_location_rules_for_local_nodes,
relation_ids,
related_units,
relation_get):
rdata = {
'pacemaker-remote:49': {
'pacemaker-remote/0': {
'remote-hostname': '"node1"',
'stonith-hostname': '"st-node1"'},
'pacemaker-remote/1': {
'remote-hostname': '"node2"'},
'pacemaker-remote/2': {
'stonith-hostname': '"st-node3"'}}}
relation_ids.side_effect = lambda x: rdata.keys()
related_units.side_effect = lambda x: sorted(rdata[x].keys())
relation_get.side_effect = lambda x, y, z: rdata[z][y].get(x, None)
configure_pacemaker_remote.side_effect = lambda x: 'res-{}'.format(x)
utils.configure_pacemaker_remote_resources()
remote_calls = [
mock.call('node1'),
mock.call('node2')]
configure_pacemaker_remote.assert_has_calls(
remote_calls,
any_order=True)
cleanup_remote_nodes.assert_called_once_with(
['res-node1', 'res-node2'])
@mock.patch.object(utils, 'config')
@mock.patch('pcmk.commit')
@mock.patch('pcmk.is_resource_present')
@ -587,12 +790,12 @@ class UtilsTestCase(unittest.TestCase):
'maas_credentials': 'apikey'}
is_resource_present.return_value = False
config.side_effect = lambda x: cfg.get(x)
utils.configure_maas_stonith_resource('node1')
utils.configure_maas_stonith_resource(['node1'])
cmd = (
"crm configure primitive st-node1 "
"crm configure primitive st-maas-3975c9d "
"stonith:external/maas "
"params url='http://maas/2.0' apikey='apikey' "
"hostnames=node1 "
"hostnames='node1' "
"op monitor interval=25 start-delay=25 "
"timeout=25")
commit_calls = [
@ -606,7 +809,9 @@ class UtilsTestCase(unittest.TestCase):
@mock.patch.object(utils, 'config')
@mock.patch('pcmk.commit')
@mock.patch('pcmk.is_resource_present')
@mock.patch('pcmk.crm_update_resource')
def test_configure_maas_stonith_resource_duplicate(self,
crm_update_resource,
is_resource_present,
commit, config):
cfg = {
@ -614,8 +819,15 @@ class UtilsTestCase(unittest.TestCase):
'maas_credentials': 'apikey'}
is_resource_present.return_value = True
config.side_effect = lambda x: cfg.get(x)
utils.configure_maas_stonith_resource('node1')
self.assertFalse(commit.called)
utils.configure_maas_stonith_resource(['node1'])
crm_update_resource.assert_called_once_with(
'st-maas-3975c9d',
'stonith:external/maas',
("params url='http://maas/2.0' apikey='apikey' hostnames='node1' "
"op monitor interval=25 start-delay=25 timeout=25"))
commit.assert_called_once_with(
'crm configure property stonith-enabled=true',
failure_is_fatal=True)
@mock.patch.object(utils, 'config')
@mock.patch('pcmk.commit')
@ -675,3 +887,89 @@ class UtilsTestCase(unittest.TestCase):
def test_get_member_ready_nodes(self, get_node_flags):
utils.get_member_ready_nodes()
get_node_flags.assert_called_once_with('member_ready')
@mock.patch('pcmk.commit')
@mock.patch('pcmk.list_nodes')
@mock.patch.object(utils, 'add_location_rules_for_local_nodes')
@mock.patch.object(utils, 'need_resources_on_remotes')
def test_configure_resources_on_remotes(self, need_resources_on_remotes,
add_location_rules_for_local_nodes,
list_nodes, commit):
list_nodes.return_value = ['node1', 'node2', 'node3']
need_resources_on_remotes.return_value = False
clones = {
'cl_res_masakari_haproxy': u'res_masakari_haproxy'}
resources = {
'res_masakari_1e39e82_vip': u'ocf:heartbeat:IPaddr2',
'res_masakari_flump': u'ocf:heartbeat:IPaddr2',
'res_masakari_haproxy': u'lsb:haproxy'}
groups = {
'grp_masakari_vips': 'res_masakari_1e39e82_vip'}
utils.configure_resources_on_remotes(
resources=resources,
clones=clones,
groups=groups)
add_loc_calls = [
mock.call('cl_res_masakari_haproxy'),
mock.call('res_masakari_flump'),
mock.call('grp_masakari_vips')]
add_location_rules_for_local_nodes.assert_has_calls(
add_loc_calls,
any_order=True)
commit.assert_called_once_with(
'crm_resource --resource cl_res_masakari_haproxy '
'--set-parameter clone-max '
'--meta --parameter-value 3',
failure_is_fatal=True)
@mock.patch('pcmk.commit')
@mock.patch('pcmk.list_nodes')
@mock.patch.object(utils, 'add_location_rules_for_local_nodes')
@mock.patch.object(utils, 'need_resources_on_remotes')
def test_configure_resources_on_remotes_true(
self,
need_resources_on_remotes,
add_location_rules_for_local_nodes,
list_nodes,
commit):
list_nodes.return_value = ['node1', 'node2', 'node3']
need_resources_on_remotes.return_value = True
clones = {
'cl_res_masakari_haproxy': u'res_masakari_haproxy'}
resources = {
'res_masakari_1e39e82_vip': u'ocf:heartbeat:IPaddr2',
'res_masakari_flump': u'ocf:heartbeat:IPaddr2',
'res_masakari_haproxy': u'lsb:haproxy'}
groups = {
'grp_masakari_vips': 'res_masakari_1e39e82_vip'}
utils.configure_resources_on_remotes(
resources=resources,
clones=clones,
groups=groups)
self.assertFalse(commit.called)
@mock.patch('pcmk.commit')
@mock.patch('pcmk.list_nodes')
@mock.patch.object(utils, 'add_location_rules_for_local_nodes')
@mock.patch.object(utils, 'need_resources_on_remotes')
def test_configure_resources_on_remotes_unknown(
self,
need_resources_on_remotes,
add_location_rules_for_local_nodes,
list_nodes,
commit):
list_nodes.return_value = ['node1', 'node2', 'node3']
need_resources_on_remotes.side_effect = ValueError
clones = {
'cl_res_masakari_haproxy': u'res_masakari_haproxy'}
resources = {
'res_masakari_1e39e82_vip': u'ocf:heartbeat:IPaddr2',
'res_masakari_flump': u'ocf:heartbeat:IPaddr2',
'res_masakari_haproxy': u'lsb:haproxy'}
groups = {
'grp_masakari_vips': 'res_masakari_1e39e82_vip'}
utils.configure_resources_on_remotes(
resources=resources,
clones=clones,
groups=groups)
self.assertFalse(commit.called)