Use chelper generate_ha_relation_data for ha rel
Use the generate_ha_relation_data helper from charmhelpers to generate the data to send down the relation to the hacluster charm. This results in a few changes in behaviour: 1) The charm will no longer specify a nic name to bind the vip. This is because Pacemaker VIP resources are able to automatically detect and configure correct iface and netmask parameters based on local configuration of the unit. 2) The original iface named VIP resource will be stopped and deleted prior to the creation of the new short hash named VIP resource. Change-Id: I116b1ffd02803b71ecbb3e6612ee392698b6eaa4
This commit is contained in:
parent
ade222568d
commit
8c7bc961a9
@ -168,7 +168,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
|
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
|
||||||
'odl-controller', 'cinder-backup', 'nexentaedge-data',
|
'odl-controller', 'cinder-backup', 'nexentaedge-data',
|
||||||
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
|
||||||
'cinder-nexentaedge', 'nexentaedge-mgmt']))
|
'cinder-nexentaedge', 'nexentaedge-mgmt',
|
||||||
|
'ceilometer-agent']))
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
Helpers for high availability.
|
Helpers for high availability.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import hashlib
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import re
|
import re
|
||||||
@ -35,7 +36,6 @@ from charmhelpers.core.hookenv import (
|
|||||||
config,
|
config,
|
||||||
status_set,
|
status_set,
|
||||||
DEBUG,
|
DEBUG,
|
||||||
WARNING,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
@ -124,13 +124,29 @@ def expect_ha():
|
|||||||
return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
|
return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
|
||||||
|
|
||||||
|
|
||||||
def generate_ha_relation_data(service):
|
def generate_ha_relation_data(service, extra_settings=None):
|
||||||
""" Generate relation data for ha relation
|
""" Generate relation data for ha relation
|
||||||
|
|
||||||
Based on configuration options and unit interfaces, generate a json
|
Based on configuration options and unit interfaces, generate a json
|
||||||
encoded dict of relation data items for the hacluster relation,
|
encoded dict of relation data items for the hacluster relation,
|
||||||
providing configuration for DNS HA or VIP's + haproxy clone sets.
|
providing configuration for DNS HA or VIP's + haproxy clone sets.
|
||||||
|
|
||||||
|
Example of supplying additional settings::
|
||||||
|
|
||||||
|
COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
|
||||||
|
AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
|
||||||
|
AGENT_CA_PARAMS = 'op monitor interval="5s"'
|
||||||
|
|
||||||
|
ha_console_settings = {
|
||||||
|
'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
|
||||||
|
'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
|
||||||
|
'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
|
||||||
|
'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS})
|
||||||
|
generate_ha_relation_data('nova', extra_settings=ha_console_settings)
|
||||||
|
|
||||||
|
|
||||||
|
@param service: Name of the service being configured
|
||||||
|
@param extra_settings: Dict of additional resource data
|
||||||
@returns dict: json encoded data for use with relation_set
|
@returns dict: json encoded data for use with relation_set
|
||||||
"""
|
"""
|
||||||
_haproxy_res = 'res_{}_haproxy'.format(service)
|
_haproxy_res = 'res_{}_haproxy'.format(service)
|
||||||
@ -149,6 +165,13 @@ def generate_ha_relation_data(service):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if extra_settings:
|
||||||
|
for k, v in extra_settings.items():
|
||||||
|
if _relation_data.get(k):
|
||||||
|
_relation_data[k].update(v)
|
||||||
|
else:
|
||||||
|
_relation_data[k] = v
|
||||||
|
|
||||||
if config('dns-ha'):
|
if config('dns-ha'):
|
||||||
update_hacluster_dns_ha(service, _relation_data)
|
update_hacluster_dns_ha(service, _relation_data)
|
||||||
else:
|
else:
|
||||||
@ -232,30 +255,45 @@ def update_hacluster_vip(service, relation_data):
|
|||||||
"""
|
"""
|
||||||
cluster_config = get_hacluster_config()
|
cluster_config = get_hacluster_config()
|
||||||
vip_group = []
|
vip_group = []
|
||||||
|
vips_to_delete = []
|
||||||
for vip in cluster_config['vip'].split():
|
for vip in cluster_config['vip'].split():
|
||||||
if is_ipv6(vip):
|
if is_ipv6(vip):
|
||||||
res_neutron_vip = 'ocf:heartbeat:IPv6addr'
|
res_vip = 'ocf:heartbeat:IPv6addr'
|
||||||
vip_params = 'ipv6addr'
|
vip_params = 'ipv6addr'
|
||||||
else:
|
else:
|
||||||
res_neutron_vip = 'ocf:heartbeat:IPaddr2'
|
res_vip = 'ocf:heartbeat:IPaddr2'
|
||||||
vip_params = 'ip'
|
vip_params = 'ip'
|
||||||
|
|
||||||
iface = (get_iface_for_address(vip) or
|
iface = get_iface_for_address(vip)
|
||||||
config('vip_iface'))
|
netmask = get_netmask_for_address(vip)
|
||||||
netmask = (get_netmask_for_address(vip) or
|
|
||||||
config('vip_cidr'))
|
fallback_params = False
|
||||||
|
if iface is None:
|
||||||
|
iface = config('vip_iface')
|
||||||
|
fallback_params = True
|
||||||
|
if netmask is None:
|
||||||
|
netmask = config('vip_cidr')
|
||||||
|
fallback_params = True
|
||||||
|
|
||||||
if iface is not None:
|
if iface is not None:
|
||||||
|
# NOTE(jamespage): Delete old VIP resources
|
||||||
|
# Old style naming encoding iface in name
|
||||||
|
# does not work well in environments where
|
||||||
|
# interface/subnet wiring is not consistent
|
||||||
vip_key = 'res_{}_{}_vip'.format(service, iface)
|
vip_key = 'res_{}_{}_vip'.format(service, iface)
|
||||||
if vip_key in vip_group:
|
if vip_key in vips_to_delete:
|
||||||
if vip not in relation_data['resource_params'][vip_key]:
|
|
||||||
vip_key = '{}_{}'.format(vip_key, vip_params)
|
vip_key = '{}_{}'.format(vip_key, vip_params)
|
||||||
else:
|
vips_to_delete.append(vip_key)
|
||||||
log("Resource '%s' (vip='%s') already exists in "
|
|
||||||
"vip group - skipping" % (vip_key, vip), WARNING)
|
|
||||||
continue
|
|
||||||
|
|
||||||
relation_data['resources'][vip_key] = res_neutron_vip
|
vip_key = 'res_{}_{}_vip'.format(
|
||||||
|
service,
|
||||||
|
hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])
|
||||||
|
|
||||||
|
relation_data['resources'][vip_key] = res_vip
|
||||||
|
# NOTE(jamespage):
|
||||||
|
# Use option provided vip params if these where used
|
||||||
|
# instead of auto-detected values
|
||||||
|
if fallback_params:
|
||||||
relation_data['resource_params'][vip_key] = (
|
relation_data['resource_params'][vip_key] = (
|
||||||
'params {ip}="{vip}" cidr_netmask="{netmask}" '
|
'params {ip}="{vip}" cidr_netmask="{netmask}" '
|
||||||
'nic="{iface}"'.format(ip=vip_params,
|
'nic="{iface}"'.format(ip=vip_params,
|
||||||
@ -263,9 +301,29 @@ def update_hacluster_vip(service, relation_data):
|
|||||||
iface=iface,
|
iface=iface,
|
||||||
netmask=netmask)
|
netmask=netmask)
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
# NOTE(jamespage):
|
||||||
|
# let heartbeat figure out which interface and
|
||||||
|
# netmask to configure, which works nicely
|
||||||
|
# when network interface naming is not
|
||||||
|
# consistent across units.
|
||||||
|
relation_data['resource_params'][vip_key] = (
|
||||||
|
'params {ip}="{vip}"'.format(ip=vip_params,
|
||||||
|
vip=vip))
|
||||||
|
|
||||||
vip_group.append(vip_key)
|
vip_group.append(vip_key)
|
||||||
|
|
||||||
|
if vips_to_delete:
|
||||||
|
try:
|
||||||
|
relation_data['delete_resources'].extend(vips_to_delete)
|
||||||
|
except KeyError:
|
||||||
|
relation_data['delete_resources'] = vips_to_delete
|
||||||
|
|
||||||
if len(vip_group) >= 1:
|
if len(vip_group) >= 1:
|
||||||
|
key = 'grp_{}_vips'.format(service)
|
||||||
|
try:
|
||||||
|
relation_data['groups'][key] = ' '.join(vip_group)
|
||||||
|
except KeyError:
|
||||||
relation_data['groups'] = {
|
relation_data['groups'] = {
|
||||||
'grp_{}_vips'.format(service): ' '.join(vip_group)
|
key: ' '.join(vip_group)
|
||||||
}
|
}
|
||||||
|
@ -73,6 +73,8 @@ from charmhelpers.core.host import (
|
|||||||
service_running,
|
service_running,
|
||||||
service_pause,
|
service_pause,
|
||||||
service_resume,
|
service_resume,
|
||||||
|
service_stop,
|
||||||
|
service_start,
|
||||||
restart_on_change_helper,
|
restart_on_change_helper,
|
||||||
)
|
)
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
@ -299,7 +301,7 @@ def get_os_codename_install_source(src):
|
|||||||
rel = ''
|
rel = ''
|
||||||
if src is None:
|
if src is None:
|
||||||
return rel
|
return rel
|
||||||
if src in ['distro', 'distro-proposed']:
|
if src in ['distro', 'distro-proposed', 'proposed']:
|
||||||
try:
|
try:
|
||||||
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@ -1303,6 +1305,65 @@ def is_unit_paused_set():
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def manage_payload_services(action, services=None, charm_func=None):
|
||||||
|
"""Run an action against all services.
|
||||||
|
|
||||||
|
An optional charm_func() can be called. It should raise an Exception to
|
||||||
|
indicate that the function failed. If it was succesfull it should return
|
||||||
|
None or an optional message.
|
||||||
|
|
||||||
|
The signature for charm_func is:
|
||||||
|
charm_func() -> message: str
|
||||||
|
|
||||||
|
charm_func() is executed after any services are stopped, if supplied.
|
||||||
|
|
||||||
|
The services object can either be:
|
||||||
|
- None : no services were passed (an empty dict is returned)
|
||||||
|
- a list of strings
|
||||||
|
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
|
||||||
|
- An array of [{'service': service_name, ...}, ...]
|
||||||
|
|
||||||
|
:param action: Action to run: pause, resume, start or stop.
|
||||||
|
:type action: str
|
||||||
|
:param services: See above
|
||||||
|
:type services: See above
|
||||||
|
:param charm_func: function to run for custom charm pausing.
|
||||||
|
:type charm_func: f()
|
||||||
|
:returns: Status boolean and list of messages
|
||||||
|
:rtype: (bool, [])
|
||||||
|
:raises: RuntimeError
|
||||||
|
"""
|
||||||
|
actions = {
|
||||||
|
'pause': service_pause,
|
||||||
|
'resume': service_resume,
|
||||||
|
'start': service_start,
|
||||||
|
'stop': service_stop}
|
||||||
|
action = action.lower()
|
||||||
|
if action not in actions.keys():
|
||||||
|
raise RuntimeError(
|
||||||
|
"action: {} must be one of: {}".format(action,
|
||||||
|
', '.join(actions.keys())))
|
||||||
|
services = _extract_services_list_helper(services)
|
||||||
|
messages = []
|
||||||
|
success = True
|
||||||
|
if services:
|
||||||
|
for service in services.keys():
|
||||||
|
rc = actions[action](service)
|
||||||
|
if not rc:
|
||||||
|
success = False
|
||||||
|
messages.append("{} didn't {} cleanly.".format(service,
|
||||||
|
action))
|
||||||
|
if charm_func:
|
||||||
|
try:
|
||||||
|
message = charm_func()
|
||||||
|
if message:
|
||||||
|
messages.append(message)
|
||||||
|
except Exception as e:
|
||||||
|
success = False
|
||||||
|
messages.append(str(e))
|
||||||
|
return success, messages
|
||||||
|
|
||||||
|
|
||||||
def pause_unit(assess_status_func, services=None, ports=None,
|
def pause_unit(assess_status_func, services=None, ports=None,
|
||||||
charm_func=None):
|
charm_func=None):
|
||||||
"""Pause a unit by stopping the services and setting 'unit-paused'
|
"""Pause a unit by stopping the services and setting 'unit-paused'
|
||||||
@ -1333,20 +1394,10 @@ def pause_unit(assess_status_func, services=None, ports=None,
|
|||||||
@returns None
|
@returns None
|
||||||
@raises Exception(message) on an error for action_fail().
|
@raises Exception(message) on an error for action_fail().
|
||||||
"""
|
"""
|
||||||
services = _extract_services_list_helper(services)
|
_, messages = manage_payload_services(
|
||||||
messages = []
|
'pause',
|
||||||
if services:
|
services=services,
|
||||||
for service in services.keys():
|
charm_func=charm_func)
|
||||||
stopped = service_pause(service)
|
|
||||||
if not stopped:
|
|
||||||
messages.append("{} didn't stop cleanly.".format(service))
|
|
||||||
if charm_func:
|
|
||||||
try:
|
|
||||||
message = charm_func()
|
|
||||||
if message:
|
|
||||||
messages.append(message)
|
|
||||||
except Exception as e:
|
|
||||||
message.append(str(e))
|
|
||||||
set_unit_paused()
|
set_unit_paused()
|
||||||
if assess_status_func:
|
if assess_status_func:
|
||||||
message = assess_status_func()
|
message = assess_status_func()
|
||||||
@ -1385,20 +1436,10 @@ def resume_unit(assess_status_func, services=None, ports=None,
|
|||||||
@returns None
|
@returns None
|
||||||
@raises Exception(message) on an error for action_fail().
|
@raises Exception(message) on an error for action_fail().
|
||||||
"""
|
"""
|
||||||
services = _extract_services_list_helper(services)
|
_, messages = manage_payload_services(
|
||||||
messages = []
|
'resume',
|
||||||
if services:
|
services=services,
|
||||||
for service in services.keys():
|
charm_func=charm_func)
|
||||||
started = service_resume(service)
|
|
||||||
if not started:
|
|
||||||
messages.append("{} didn't start cleanly.".format(service))
|
|
||||||
if charm_func:
|
|
||||||
try:
|
|
||||||
message = charm_func()
|
|
||||||
if message:
|
|
||||||
messages.append(message)
|
|
||||||
except Exception as e:
|
|
||||||
message.append(str(e))
|
|
||||||
clear_unit_paused()
|
clear_unit_paused()
|
||||||
if assess_status_func:
|
if assess_status_func:
|
||||||
message = assess_status_func()
|
message = assess_status_func()
|
||||||
|
@ -36,8 +36,10 @@ def loopback_devices():
|
|||||||
'''
|
'''
|
||||||
loopbacks = {}
|
loopbacks = {}
|
||||||
cmd = ['losetup', '-a']
|
cmd = ['losetup', '-a']
|
||||||
devs = [d.strip().split(' ') for d in
|
output = check_output(cmd)
|
||||||
check_output(cmd).splitlines() if d != '']
|
if six.PY3:
|
||||||
|
output = output.decode('utf-8')
|
||||||
|
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
|
||||||
for dev, _, f in devs:
|
for dev, _, f in devs:
|
||||||
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
|
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
|
||||||
return loopbacks
|
return loopbacks
|
||||||
|
@ -52,13 +52,9 @@ from charmhelpers.fetch import (
|
|||||||
|
|
||||||
from charmhelpers.contrib.hahelpers.cluster import (
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
is_elected_leader,
|
is_elected_leader,
|
||||||
get_hacluster_config,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
get_iface_for_address,
|
|
||||||
get_netmask_for_address,
|
|
||||||
is_ipv6,
|
|
||||||
get_relation_ip,
|
get_relation_ip,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -71,7 +67,7 @@ from charmhelpers.contrib.openstack.utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.openstack.ha.utils import (
|
from charmhelpers.contrib.openstack.ha.utils import (
|
||||||
update_dns_ha_resource_params,
|
generate_ha_relation_data,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.openstack.ip import (
|
from charmhelpers.contrib.openstack.ip import (
|
||||||
@ -357,73 +353,8 @@ def cluster_changed():
|
|||||||
|
|
||||||
@hooks.hook('ha-relation-joined')
|
@hooks.hook('ha-relation-joined')
|
||||||
def ha_joined(relation_id=None):
|
def ha_joined(relation_id=None):
|
||||||
cluster_config = get_hacluster_config()
|
settings = generate_ha_relation_data('heat')
|
||||||
|
relation_set(relation_id=relation_id, **settings)
|
||||||
resources = {
|
|
||||||
'res_heat_haproxy': 'lsb:haproxy'
|
|
||||||
}
|
|
||||||
|
|
||||||
resource_params = {
|
|
||||||
'res_heat_haproxy': 'op monitor interval="5s"'
|
|
||||||
}
|
|
||||||
|
|
||||||
if config('dns-ha'):
|
|
||||||
update_dns_ha_resource_params(relation_id=relation_id,
|
|
||||||
resources=resources,
|
|
||||||
resource_params=resource_params)
|
|
||||||
else:
|
|
||||||
vip_group = []
|
|
||||||
for vip in cluster_config['vip'].split():
|
|
||||||
if is_ipv6(vip):
|
|
||||||
res_heat_vip = 'ocf:heartbeat:IPv6addr'
|
|
||||||
vip_params = 'ipv6addr'
|
|
||||||
else:
|
|
||||||
res_heat_vip = 'ocf:heartbeat:IPaddr2'
|
|
||||||
vip_params = 'ip'
|
|
||||||
|
|
||||||
iface = (get_iface_for_address(vip) or
|
|
||||||
config('vip_iface'))
|
|
||||||
netmask = (get_netmask_for_address(vip) or
|
|
||||||
config('vip_cidr'))
|
|
||||||
|
|
||||||
if iface is not None:
|
|
||||||
vip_key = 'res_heat_{}_vip'.format(iface)
|
|
||||||
if vip_key in vip_group:
|
|
||||||
if vip not in resource_params[vip_key]:
|
|
||||||
vip_key = '{}_{}'.format(vip_key, vip_params)
|
|
||||||
else:
|
|
||||||
log("Resource '{}' (vip='{}') already exists in "
|
|
||||||
"vip group - skipping".format(vip_key, vip),
|
|
||||||
WARNING)
|
|
||||||
continue
|
|
||||||
|
|
||||||
resources[vip_key] = res_heat_vip
|
|
||||||
resource_params[vip_key] = (
|
|
||||||
'params {ip}="{vip}" cidr_netmask="{netmask}"'
|
|
||||||
' nic="{iface}"'.format(ip=vip_params,
|
|
||||||
vip=vip,
|
|
||||||
iface=iface,
|
|
||||||
netmask=netmask)
|
|
||||||
)
|
|
||||||
vip_group.append(vip_key)
|
|
||||||
|
|
||||||
if len(vip_group) >= 1:
|
|
||||||
relation_set(relation_id=relation_id,
|
|
||||||
groups={'grp_heat_vips': ' '.join(vip_group)})
|
|
||||||
|
|
||||||
init_services = {
|
|
||||||
'res_heat_haproxy': 'haproxy'
|
|
||||||
}
|
|
||||||
clones = {
|
|
||||||
'cl_heat_haproxy': 'res_heat_haproxy'
|
|
||||||
}
|
|
||||||
relation_set(relation_id=relation_id,
|
|
||||||
init_services=init_services,
|
|
||||||
corosync_bindiface=cluster_config['ha-bindiface'],
|
|
||||||
corosync_mcastport=cluster_config['ha-mcastport'],
|
|
||||||
resources=resources,
|
|
||||||
resource_params=resource_params,
|
|
||||||
clones=clones)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('ha-relation-changed')
|
@hooks.hook('ha-relation-changed')
|
||||||
|
@ -63,7 +63,7 @@ TO_PATCH = [
|
|||||||
'charm_dir',
|
'charm_dir',
|
||||||
'sync_db_with_multi_ipv6_addresses',
|
'sync_db_with_multi_ipv6_addresses',
|
||||||
# charmhelpers.contrib.openstack.ha.utils
|
# charmhelpers.contrib.openstack.ha.utils
|
||||||
'update_dns_ha_resource_params',
|
'generate_ha_relation_data',
|
||||||
# charmhelpers.contrib.hahelpers.cluster_utils
|
# charmhelpers.contrib.hahelpers.cluster_utils
|
||||||
# heat_utils
|
# heat_utils
|
||||||
'restart_map',
|
'restart_map',
|
||||||
@ -79,10 +79,7 @@ TO_PATCH = [
|
|||||||
'relation_ids',
|
'relation_ids',
|
||||||
'relation_get',
|
'relation_get',
|
||||||
'local_unit',
|
'local_unit',
|
||||||
'get_hacluster_config',
|
|
||||||
'get_iface_for_address',
|
|
||||||
'get_relation_ip',
|
'get_relation_ip',
|
||||||
'get_netmask_for_address',
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -327,64 +324,8 @@ class HeatRelationTests(CharmTestCase):
|
|||||||
relations.db_changed()
|
relations.db_changed()
|
||||||
self.assertFalse(self.migrate_database.called)
|
self.assertFalse(self.migrate_database.called)
|
||||||
|
|
||||||
@patch.object(relations, 'CONFIGS')
|
def test_ha_relation_joined(self):
|
||||||
def test_ha_joined(self, configs):
|
self.generate_ha_relation_data.return_value = {'rel_data': 'data'}
|
||||||
self.get_hacluster_config.return_value = {
|
relations.ha_joined(relation_id='rid:23')
|
||||||
'ha-bindiface': 'eth0',
|
self.relation_set.assert_called_once_with(
|
||||||
'ha-mcastport': '5959',
|
relation_id='rid:23', rel_data='data')
|
||||||
'vip': '10.5.105.3'
|
|
||||||
}
|
|
||||||
self.get_iface_for_address.return_value = 'eth0'
|
|
||||||
self.get_netmask_for_address.return_value = '255.255.255.0'
|
|
||||||
relations.ha_joined()
|
|
||||||
expected = {
|
|
||||||
'relation_id': None,
|
|
||||||
'init_services': {'res_heat_haproxy': 'haproxy'},
|
|
||||||
'corosync_bindiface': 'eth0',
|
|
||||||
'corosync_mcastport': '5959',
|
|
||||||
'resources': {
|
|
||||||
'res_heat_haproxy': 'lsb:haproxy',
|
|
||||||
'res_heat_eth0_vip': 'ocf:heartbeat:IPaddr2'},
|
|
||||||
'resource_params': {
|
|
||||||
'res_heat_haproxy': 'op monitor interval="5s"',
|
|
||||||
'res_heat_eth0_vip': ('params ip="10.5.105.3" '
|
|
||||||
'cidr_netmask="255.255.255.0" '
|
|
||||||
'nic="eth0"')},
|
|
||||||
'clones': {'cl_heat_haproxy': 'res_heat_haproxy'}
|
|
||||||
}
|
|
||||||
self.relation_set.assert_called_with(**expected)
|
|
||||||
|
|
||||||
def test_ha_joined_dns_ha(self):
|
|
||||||
def _fake_update(resources, resource_params, relation_id=None):
|
|
||||||
resources.update({'res_heat_public_hostname': 'ocf:maas:dns'})
|
|
||||||
resource_params.update({'res_heat_public_hostname':
|
|
||||||
'params fqdn="keystone.maas" '
|
|
||||||
'ip_address="10.0.0.1"'})
|
|
||||||
|
|
||||||
self.test_config.set('dns-ha', True)
|
|
||||||
self.get_hacluster_config.return_value = {
|
|
||||||
'vip': None,
|
|
||||||
'ha-bindiface': 'em0',
|
|
||||||
'ha-mcastport': '8080',
|
|
||||||
'os-admin-hostname': None,
|
|
||||||
'os-internal-hostname': None,
|
|
||||||
'os-public-hostname': 'keystone.maas',
|
|
||||||
}
|
|
||||||
args = {
|
|
||||||
'relation_id': None,
|
|
||||||
'corosync_bindiface': 'em0',
|
|
||||||
'corosync_mcastport': '8080',
|
|
||||||
'init_services': {'res_heat_haproxy': 'haproxy'},
|
|
||||||
'resources': {'res_heat_public_hostname': 'ocf:maas:dns',
|
|
||||||
'res_heat_haproxy': 'lsb:haproxy'},
|
|
||||||
'resource_params': {
|
|
||||||
'res_heat_public_hostname': 'params fqdn="keystone.maas" '
|
|
||||||
'ip_address="10.0.0.1"',
|
|
||||||
'res_heat_haproxy': 'op monitor interval="5s"'},
|
|
||||||
'clones': {'cl_heat_haproxy': 'res_heat_haproxy'}
|
|
||||||
}
|
|
||||||
self.update_dns_ha_resource_params.side_effect = _fake_update
|
|
||||||
|
|
||||||
relations.ha_joined()
|
|
||||||
self.assertTrue(self.update_dns_ha_resource_params.called)
|
|
||||||
self.relation_set.assert_called_with(**args)
|
|
||||||
|
Loading…
Reference in New Issue
Block a user