Merge "Use chelper generate_ha_relation_data for ha rel"
This commit is contained in:
commit
26451cebfc
@ -168,7 +168,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
|
||||
'odl-controller', 'cinder-backup', 'nexentaedge-data',
|
||||
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||
'cinder-nexentaedge', 'nexentaedge-mgmt']))
|
||||
'cinder-nexentaedge', 'nexentaedge-mgmt',
|
||||
'ceilometer-agent']))
|
||||
|
||||
if self.openstack:
|
||||
for svc in services:
|
||||
|
@ -23,6 +23,7 @@
|
||||
Helpers for high availability.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
import re
|
||||
@ -35,7 +36,6 @@ from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
status_set,
|
||||
DEBUG,
|
||||
WARNING,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
@ -124,13 +124,29 @@ def expect_ha():
|
||||
return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
|
||||
|
||||
|
||||
def generate_ha_relation_data(service):
|
||||
def generate_ha_relation_data(service, extra_settings=None):
|
||||
""" Generate relation data for ha relation
|
||||
|
||||
Based on configuration options and unit interfaces, generate a json
|
||||
encoded dict of relation data items for the hacluster relation,
|
||||
providing configuration for DNS HA or VIP's + haproxy clone sets.
|
||||
|
||||
Example of supplying additional settings::
|
||||
|
||||
COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
|
||||
AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
|
||||
AGENT_CA_PARAMS = 'op monitor interval="5s"'
|
||||
|
||||
ha_console_settings = {
|
||||
'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
|
||||
'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
|
||||
'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
|
||||
'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS})
|
||||
generate_ha_relation_data('nova', extra_settings=ha_console_settings)
|
||||
|
||||
|
||||
@param service: Name of the service being configured
|
||||
@param extra_settings: Dict of additional resource data
|
||||
@returns dict: json encoded data for use with relation_set
|
||||
"""
|
||||
_haproxy_res = 'res_{}_haproxy'.format(service)
|
||||
@ -149,6 +165,13 @@ def generate_ha_relation_data(service):
|
||||
},
|
||||
}
|
||||
|
||||
if extra_settings:
|
||||
for k, v in extra_settings.items():
|
||||
if _relation_data.get(k):
|
||||
_relation_data[k].update(v)
|
||||
else:
|
||||
_relation_data[k] = v
|
||||
|
||||
if config('dns-ha'):
|
||||
update_hacluster_dns_ha(service, _relation_data)
|
||||
else:
|
||||
@ -232,39 +255,67 @@ def update_hacluster_vip(service, relation_data):
|
||||
"""
|
||||
cluster_config = get_hacluster_config()
|
||||
vip_group = []
|
||||
vips_to_delete = []
|
||||
for vip in cluster_config['vip'].split():
|
||||
if is_ipv6(vip):
|
||||
res_neutron_vip = 'ocf:heartbeat:IPv6addr'
|
||||
res_vip = 'ocf:heartbeat:IPv6addr'
|
||||
vip_params = 'ipv6addr'
|
||||
else:
|
||||
res_neutron_vip = 'ocf:heartbeat:IPaddr2'
|
||||
res_vip = 'ocf:heartbeat:IPaddr2'
|
||||
vip_params = 'ip'
|
||||
|
||||
iface = (get_iface_for_address(vip) or
|
||||
config('vip_iface'))
|
||||
netmask = (get_netmask_for_address(vip) or
|
||||
config('vip_cidr'))
|
||||
iface = get_iface_for_address(vip)
|
||||
netmask = get_netmask_for_address(vip)
|
||||
|
||||
fallback_params = False
|
||||
if iface is None:
|
||||
iface = config('vip_iface')
|
||||
fallback_params = True
|
||||
if netmask is None:
|
||||
netmask = config('vip_cidr')
|
||||
fallback_params = True
|
||||
|
||||
if iface is not None:
|
||||
# NOTE(jamespage): Delete old VIP resources
|
||||
# Old style naming encoding iface in name
|
||||
# does not work well in environments where
|
||||
# interface/subnet wiring is not consistent
|
||||
vip_key = 'res_{}_{}_vip'.format(service, iface)
|
||||
if vip_key in vip_group:
|
||||
if vip not in relation_data['resource_params'][vip_key]:
|
||||
vip_key = '{}_{}'.format(vip_key, vip_params)
|
||||
else:
|
||||
log("Resource '%s' (vip='%s') already exists in "
|
||||
"vip group - skipping" % (vip_key, vip), WARNING)
|
||||
continue
|
||||
if vip_key in vips_to_delete:
|
||||
vip_key = '{}_{}'.format(vip_key, vip_params)
|
||||
vips_to_delete.append(vip_key)
|
||||
|
||||
vip_key = 'res_{}_{}_vip'.format(
|
||||
service,
|
||||
hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])
|
||||
|
||||
relation_data['resources'][vip_key] = res_vip
|
||||
# NOTE(jamespage):
|
||||
# Use option provided vip params if these where used
|
||||
# instead of auto-detected values
|
||||
if fallback_params:
|
||||
relation_data['resource_params'][vip_key] = (
|
||||
'params {ip}="{vip}" cidr_netmask="{netmask}" '
|
||||
'nic="{iface}"'.format(ip=vip_params,
|
||||
vip=vip,
|
||||
iface=iface,
|
||||
netmask=netmask)
|
||||
)
|
||||
else:
|
||||
# NOTE(jamespage):
|
||||
# let heartbeat figure out which interface and
|
||||
# netmask to configure, which works nicely
|
||||
# when network interface naming is not
|
||||
# consistent across units.
|
||||
relation_data['resource_params'][vip_key] = (
|
||||
'params {ip}="{vip}"'.format(ip=vip_params,
|
||||
vip=vip))
|
||||
|
||||
relation_data['resources'][vip_key] = res_neutron_vip
|
||||
relation_data['resource_params'][vip_key] = (
|
||||
'params {ip}="{vip}" cidr_netmask="{netmask}" '
|
||||
'nic="{iface}"'.format(ip=vip_params,
|
||||
vip=vip,
|
||||
iface=iface,
|
||||
netmask=netmask)
|
||||
)
|
||||
vip_group.append(vip_key)
|
||||
|
||||
if vips_to_delete:
|
||||
relation_data['delete_resources'] = vips_to_delete
|
||||
|
||||
if len(vip_group) >= 1:
|
||||
relation_data['groups'] = {
|
||||
'grp_{}_vips'.format(service): ' '.join(vip_group)
|
||||
|
@ -73,6 +73,8 @@ from charmhelpers.core.host import (
|
||||
service_running,
|
||||
service_pause,
|
||||
service_resume,
|
||||
service_stop,
|
||||
service_start,
|
||||
restart_on_change_helper,
|
||||
)
|
||||
from charmhelpers.fetch import (
|
||||
@ -299,7 +301,7 @@ def get_os_codename_install_source(src):
|
||||
rel = ''
|
||||
if src is None:
|
||||
return rel
|
||||
if src in ['distro', 'distro-proposed']:
|
||||
if src in ['distro', 'distro-proposed', 'proposed']:
|
||||
try:
|
||||
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||
except KeyError:
|
||||
@ -1303,6 +1305,65 @@ def is_unit_paused_set():
|
||||
return False
|
||||
|
||||
|
||||
def manage_payload_services(action, services=None, charm_func=None):
|
||||
"""Run an action against all services.
|
||||
|
||||
An optional charm_func() can be called. It should raise an Exception to
|
||||
indicate that the function failed. If it was succesfull it should return
|
||||
None or an optional message.
|
||||
|
||||
The signature for charm_func is:
|
||||
charm_func() -> message: str
|
||||
|
||||
charm_func() is executed after any services are stopped, if supplied.
|
||||
|
||||
The services object can either be:
|
||||
- None : no services were passed (an empty dict is returned)
|
||||
- a list of strings
|
||||
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
|
||||
- An array of [{'service': service_name, ...}, ...]
|
||||
|
||||
:param action: Action to run: pause, resume, start or stop.
|
||||
:type action: str
|
||||
:param services: See above
|
||||
:type services: See above
|
||||
:param charm_func: function to run for custom charm pausing.
|
||||
:type charm_func: f()
|
||||
:returns: Status boolean and list of messages
|
||||
:rtype: (bool, [])
|
||||
:raises: RuntimeError
|
||||
"""
|
||||
actions = {
|
||||
'pause': service_pause,
|
||||
'resume': service_resume,
|
||||
'start': service_start,
|
||||
'stop': service_stop}
|
||||
action = action.lower()
|
||||
if action not in actions.keys():
|
||||
raise RuntimeError(
|
||||
"action: {} must be one of: {}".format(action,
|
||||
', '.join(actions.keys())))
|
||||
services = _extract_services_list_helper(services)
|
||||
messages = []
|
||||
success = True
|
||||
if services:
|
||||
for service in services.keys():
|
||||
rc = actions[action](service)
|
||||
if not rc:
|
||||
success = False
|
||||
messages.append("{} didn't {} cleanly.".format(service,
|
||||
action))
|
||||
if charm_func:
|
||||
try:
|
||||
message = charm_func()
|
||||
if message:
|
||||
messages.append(message)
|
||||
except Exception as e:
|
||||
success = False
|
||||
messages.append(str(e))
|
||||
return success, messages
|
||||
|
||||
|
||||
def pause_unit(assess_status_func, services=None, ports=None,
|
||||
charm_func=None):
|
||||
"""Pause a unit by stopping the services and setting 'unit-paused'
|
||||
@ -1333,20 +1394,10 @@ def pause_unit(assess_status_func, services=None, ports=None,
|
||||
@returns None
|
||||
@raises Exception(message) on an error for action_fail().
|
||||
"""
|
||||
services = _extract_services_list_helper(services)
|
||||
messages = []
|
||||
if services:
|
||||
for service in services.keys():
|
||||
stopped = service_pause(service)
|
||||
if not stopped:
|
||||
messages.append("{} didn't stop cleanly.".format(service))
|
||||
if charm_func:
|
||||
try:
|
||||
message = charm_func()
|
||||
if message:
|
||||
messages.append(message)
|
||||
except Exception as e:
|
||||
message.append(str(e))
|
||||
_, messages = manage_payload_services(
|
||||
'pause',
|
||||
services=services,
|
||||
charm_func=charm_func)
|
||||
set_unit_paused()
|
||||
if assess_status_func:
|
||||
message = assess_status_func()
|
||||
@ -1385,20 +1436,10 @@ def resume_unit(assess_status_func, services=None, ports=None,
|
||||
@returns None
|
||||
@raises Exception(message) on an error for action_fail().
|
||||
"""
|
||||
services = _extract_services_list_helper(services)
|
||||
messages = []
|
||||
if services:
|
||||
for service in services.keys():
|
||||
started = service_resume(service)
|
||||
if not started:
|
||||
messages.append("{} didn't start cleanly.".format(service))
|
||||
if charm_func:
|
||||
try:
|
||||
message = charm_func()
|
||||
if message:
|
||||
messages.append(message)
|
||||
except Exception as e:
|
||||
message.append(str(e))
|
||||
_, messages = manage_payload_services(
|
||||
'resume',
|
||||
services=services,
|
||||
charm_func=charm_func)
|
||||
clear_unit_paused()
|
||||
if assess_status_func:
|
||||
message = assess_status_func()
|
||||
|
@ -36,8 +36,10 @@ def loopback_devices():
|
||||
'''
|
||||
loopbacks = {}
|
||||
cmd = ['losetup', '-a']
|
||||
devs = [d.strip().split(' ') for d in
|
||||
check_output(cmd).splitlines() if d != '']
|
||||
output = check_output(cmd)
|
||||
if six.PY3:
|
||||
output = output.decode('utf-8')
|
||||
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
|
||||
for dev, _, f in devs:
|
||||
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
|
||||
return loopbacks
|
||||
|
@ -128,13 +128,12 @@ from keystone_utils import (
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
is_elected_leader,
|
||||
get_hacluster_config,
|
||||
https,
|
||||
is_clustered,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.ha.utils import (
|
||||
update_dns_ha_resource_params,
|
||||
generate_ha_relation_data,
|
||||
expect_ha,
|
||||
)
|
||||
|
||||
@ -149,9 +148,6 @@ from charmhelpers.contrib.openstack.ip import (
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_iface_for_address,
|
||||
get_netmask_for_address,
|
||||
is_ipv6,
|
||||
get_relation_ip,
|
||||
)
|
||||
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
|
||||
@ -545,71 +541,8 @@ def leader_settings_changed():
|
||||
|
||||
@hooks.hook('ha-relation-joined')
|
||||
def ha_joined(relation_id=None):
|
||||
cluster_config = get_hacluster_config()
|
||||
resources = {
|
||||
'res_ks_haproxy': 'lsb:haproxy',
|
||||
}
|
||||
resource_params = {
|
||||
'res_ks_haproxy': 'op monitor interval="5s"'
|
||||
}
|
||||
|
||||
if config('dns-ha'):
|
||||
update_dns_ha_resource_params(relation_id=relation_id,
|
||||
resources=resources,
|
||||
resource_params=resource_params)
|
||||
else:
|
||||
vip_group = []
|
||||
for vip in cluster_config['vip'].split():
|
||||
if is_ipv6(vip):
|
||||
res_ks_vip = 'ocf:heartbeat:IPv6addr'
|
||||
vip_params = 'ipv6addr'
|
||||
else:
|
||||
res_ks_vip = 'ocf:heartbeat:IPaddr2'
|
||||
vip_params = 'ip'
|
||||
|
||||
iface = (get_iface_for_address(vip) or
|
||||
config('vip_iface'))
|
||||
netmask = (get_netmask_for_address(vip) or
|
||||
config('vip_cidr'))
|
||||
|
||||
if iface is not None:
|
||||
vip_key = 'res_ks_{}_vip'.format(iface)
|
||||
if vip_key in vip_group:
|
||||
if vip not in resource_params[vip_key]:
|
||||
vip_key = '{}_{}'.format(vip_key, vip_params)
|
||||
else:
|
||||
log("Resource '{0}' (vip='{1}') already exists in "
|
||||
"vip group - skipping"
|
||||
.format(vip_key, vip), WARNING)
|
||||
continue
|
||||
|
||||
vip_group.append(vip_key)
|
||||
resources[vip_key] = res_ks_vip
|
||||
resource_params[vip_key] = (
|
||||
'params {ip}="{vip}" cidr_netmask="{netmask}"'
|
||||
' nic="{iface}"'.format(ip=vip_params,
|
||||
vip=vip,
|
||||
iface=iface,
|
||||
netmask=netmask)
|
||||
)
|
||||
|
||||
if len(vip_group) >= 1:
|
||||
relation_set(relation_id=relation_id,
|
||||
groups={CLUSTER_RES: ' '.join(vip_group)})
|
||||
|
||||
init_services = {
|
||||
'res_ks_haproxy': 'haproxy'
|
||||
}
|
||||
clones = {
|
||||
'cl_ks_haproxy': 'res_ks_haproxy'
|
||||
}
|
||||
relation_set(relation_id=relation_id,
|
||||
init_services=init_services,
|
||||
corosync_bindiface=cluster_config['ha-bindiface'],
|
||||
corosync_mcastport=cluster_config['ha-mcastport'],
|
||||
resources=resources,
|
||||
resource_params=resource_params,
|
||||
clones=clones)
|
||||
settings = generate_ha_relation_data('ks')
|
||||
relation_set(relation_id=relation_id, **settings)
|
||||
|
||||
|
||||
@hooks.hook('ha-relation-changed')
|
||||
|
@ -66,11 +66,9 @@ TO_PATCH = [
|
||||
# charmhelpers.contrib.openstack.ip
|
||||
'resolve_address',
|
||||
# charmhelpers.contrib.openstack.ha.utils
|
||||
'update_dns_ha_resource_params',
|
||||
'expect_ha',
|
||||
# charmhelpers.contrib.hahelpers.cluster_utils
|
||||
'is_elected_leader',
|
||||
'get_hacluster_config',
|
||||
'is_clustered',
|
||||
'enable_memcache',
|
||||
# keystone_utils
|
||||
@ -95,9 +93,8 @@ TO_PATCH = [
|
||||
# other
|
||||
'check_call',
|
||||
'execd_preinstall',
|
||||
'generate_ha_relation_data',
|
||||
# ip
|
||||
'get_iface_for_address',
|
||||
'get_netmask_for_address',
|
||||
'is_service_present',
|
||||
'delete_service_entry',
|
||||
'os_release',
|
||||
@ -472,165 +469,10 @@ class KeystoneRelationTests(CharmTestCase):
|
||||
self.assertTrue(update.called)
|
||||
|
||||
def test_ha_joined(self):
|
||||
self.get_hacluster_config.return_value = {
|
||||
'vip': '10.10.10.10',
|
||||
'ha-bindiface': 'em0',
|
||||
'ha-mcastport': '8080'
|
||||
}
|
||||
self.get_iface_for_address.return_value = 'em1'
|
||||
self.get_netmask_for_address.return_value = '255.255.255.0'
|
||||
hooks.ha_joined()
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_ks_haproxy': 'haproxy'},
|
||||
'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPaddr2',
|
||||
'res_ks_haproxy': 'lsb:haproxy'},
|
||||
'resource_params': {
|
||||
'res_ks_em1_vip': 'params ip="10.10.10.10"'
|
||||
' cidr_netmask="255.255.255.0" nic="em1"',
|
||||
'res_ks_haproxy': 'op monitor interval="5s"'},
|
||||
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
|
||||
}
|
||||
self.relation_set.assert_called_with(**args)
|
||||
|
||||
def test_ha_joined_duplicate_vip_key(self):
|
||||
self.get_hacluster_config.return_value = {
|
||||
'vip': '10.10.10.10 10.10.10.10',
|
||||
'ha-bindiface': 'em0',
|
||||
'ha-mcastport': '8080'
|
||||
}
|
||||
self.get_iface_for_address.return_value = 'em1'
|
||||
self.get_netmask_for_address.return_value = '255.255.255.0'
|
||||
hooks.ha_joined()
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_ks_haproxy': 'haproxy'},
|
||||
'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPaddr2',
|
||||
'res_ks_haproxy': 'lsb:haproxy'},
|
||||
'resource_params': {
|
||||
'res_ks_em1_vip': 'params ip="10.10.10.10"'
|
||||
' cidr_netmask="255.255.255.0" nic="em1"',
|
||||
'res_ks_haproxy': 'op monitor interval="5s"'},
|
||||
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
|
||||
}
|
||||
self.relation_set.assert_called_with(**args)
|
||||
|
||||
def test_ha_joined_dual_stack_vips(self):
|
||||
self.get_hacluster_config.return_value = {
|
||||
'vip': '10.10.10.10 2001:db8::abc',
|
||||
'ha-bindiface': 'em0',
|
||||
'ha-mcastport': '8080'
|
||||
}
|
||||
self.get_iface_for_address.return_value = 'em1'
|
||||
self.get_netmask_for_address.return_value = '255.255.255.0'
|
||||
hooks.ha_joined()
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_ks_haproxy': 'haproxy'},
|
||||
'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPaddr2',
|
||||
'res_ks_em1_vip_ipv6addr': 'ocf:heartbeat:IPv6addr',
|
||||
'res_ks_haproxy': 'lsb:haproxy'},
|
||||
'resource_params': {
|
||||
'res_ks_em1_vip': 'params ip="10.10.10.10"'
|
||||
' cidr_netmask="255.255.255.0" nic="em1"',
|
||||
'res_ks_em1_vip_ipv6addr': 'params ipv6addr="2001:db8::abc"'
|
||||
' cidr_netmask="255.255.255.0" nic="em1"',
|
||||
'res_ks_haproxy': 'op monitor interval="5s"'},
|
||||
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
|
||||
}
|
||||
self.relation_set.assert_called_with(**args)
|
||||
|
||||
def test_ha_joined_no_bound_ip(self):
|
||||
self.get_hacluster_config.return_value = {
|
||||
'vip': '10.10.10.10',
|
||||
'ha-bindiface': 'em0',
|
||||
'ha-mcastport': '8080'
|
||||
}
|
||||
self.test_config.set('vip_iface', 'eth120')
|
||||
self.test_config.set('vip_cidr', '21')
|
||||
self.get_iface_for_address.return_value = None
|
||||
self.get_netmask_for_address.return_value = None
|
||||
hooks.ha_joined()
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_ks_haproxy': 'haproxy'},
|
||||
'resources': {'res_ks_eth120_vip': 'ocf:heartbeat:IPaddr2',
|
||||
'res_ks_haproxy': 'lsb:haproxy'},
|
||||
'resource_params': {
|
||||
'res_ks_eth120_vip': 'params ip="10.10.10.10"'
|
||||
' cidr_netmask="21" nic="eth120"',
|
||||
'res_ks_haproxy': 'op monitor interval="5s"'},
|
||||
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
|
||||
}
|
||||
self.relation_set.assert_called_with(**args)
|
||||
|
||||
def test_ha_joined_with_ipv6(self):
|
||||
self.test_config.set('prefer-ipv6', True)
|
||||
self.get_hacluster_config.return_value = {
|
||||
'vip': '2001:db8:1::1',
|
||||
'ha-bindiface': 'em0',
|
||||
'ha-mcastport': '8080'
|
||||
}
|
||||
self.get_iface_for_address.return_value = 'em1'
|
||||
self.get_netmask_for_address.return_value = '64'
|
||||
hooks.ha_joined()
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_ks_haproxy': 'haproxy'},
|
||||
'resources': {'res_ks_em1_vip': 'ocf:heartbeat:IPv6addr',
|
||||
'res_ks_haproxy': 'lsb:haproxy'},
|
||||
'resource_params': {
|
||||
'res_ks_em1_vip': 'params ipv6addr="2001:db8:1::1"'
|
||||
' cidr_netmask="64" nic="em1"',
|
||||
'res_ks_haproxy': 'op monitor interval="5s"'},
|
||||
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
|
||||
}
|
||||
self.relation_set.assert_called_with(**args)
|
||||
|
||||
def test_ha_joined_dns_ha(self):
|
||||
def _fake_update(resources, resource_params, relation_id=None):
|
||||
resources.update({'res_keystone_public_hostname': 'ocf:maas:dns'})
|
||||
resource_params.update({'res_keystone_public_hostname':
|
||||
'params fqdn="keystone.maas" '
|
||||
'ip_address="10.0.0.1"'})
|
||||
|
||||
self.test_config.set('dns-ha', True)
|
||||
self.get_hacluster_config.return_value = {
|
||||
'vip': None,
|
||||
'ha-bindiface': 'em0',
|
||||
'ha-mcastport': '8080',
|
||||
'os-admin-hostname': None,
|
||||
'os-internal-hostname': None,
|
||||
'os-public-hostname': 'keystone.maas',
|
||||
}
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_ks_haproxy': 'haproxy'},
|
||||
'resources': {'res_keystone_public_hostname': 'ocf:maas:dns',
|
||||
'res_ks_haproxy': 'lsb:haproxy'},
|
||||
'resource_params': {
|
||||
'res_keystone_public_hostname': 'params fqdn="keystone.maas" '
|
||||
'ip_address="10.0.0.1"',
|
||||
'res_ks_haproxy': 'op monitor interval="5s"'},
|
||||
'clones': {'cl_ks_haproxy': 'res_ks_haproxy'}
|
||||
}
|
||||
self.update_dns_ha_resource_params.side_effect = _fake_update
|
||||
|
||||
hooks.ha_joined()
|
||||
self.assertTrue(self.update_dns_ha_resource_params.called)
|
||||
self.relation_set.assert_called_with(**args)
|
||||
self.generate_ha_relation_data.return_value = {'rel_data': 'data'}
|
||||
hooks.ha_joined(relation_id='rid:23')
|
||||
self.relation_set.assert_called_once_with(
|
||||
relation_id='rid:23', rel_data='data')
|
||||
|
||||
@patch('keystone_utils.log')
|
||||
@patch.object(hooks, 'CONFIGS')
|
||||
|
Loading…
Reference in New Issue
Block a user