Implement DNS high availability. Pass the correct information to
hacluster to register a DNS entry with MAAS 2.0 or greater rather
than using a virtual IP.

Charm-helpers sync to bring in DNS HA helpers

Change-Id: I543095a210e668f30c3617990b3ee668a3143dd9
This commit is contained in:
David Ames 2016-06-15 11:08:31 -07:00
parent 89cee399db
commit 31ccb187c6
13 changed files with 463 additions and 69 deletions

1
.gitignore vendored
View File

@ -5,3 +5,4 @@ bin
tags tags
*.sw[nop] *.sw[nop]
*.pyc *.pyc
.unit-state.db

View File

@ -39,6 +39,40 @@ installed in each nova node, and be related with Ceilometer service:
Ceilometer provides an API service that can be used to retrieve Ceilometer provides an API service that can be used to retrieve
Openstack metrics. Openstack metrics.
HA/Clustering
-------------
There are two mutually exclusive high availability options: using virtual
IP(s) or DNS. In both cases, a relationship to hacluster is required which
provides the corosync back end HA functionality.
To use virtual IP(s) the clustered nodes must be on the same subnet such that
the VIP is a valid IP on the subnet for one of the node's interfaces and each
node has an interface in said subnet. The VIP becomes a highly-available API
endpoint.
At a minimum, the config option 'vip' must be set in order to use virtual IP
HA. If multiple networks are being used, a VIP should be provided for each
network, separated by spaces. Optionally, vip_iface or vip_cidr may be
specified.
To use DNS high availability there are several prerequisites. However, DNS HA
does not require the clustered nodes to be on the same subnet.
Currently the DNS HA feature is only available for MAAS 2.0 or greater
environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must
have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s)
must be pre-registered in MAAS before use with DNS HA.
At a minimum, the config option 'dns-ha' must be set to true and at least one
of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must
be set in order to use DNS HA. One or more of the above hostnames may be set.
The charm will throw an exception in the following circumstances:
If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster
If both 'vip' and 'dns-ha' are set as they are mutually exclusive
If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are
set
Network Space support Network Space support
--------------------- ---------------------

View File

@ -41,10 +41,11 @@ from charmhelpers.core.hookenv import (
relation_get, relation_get,
config as config_get, config as config_get,
INFO, INFO,
ERROR, DEBUG,
WARNING, WARNING,
unit_get, unit_get,
is_leader as juju_is_leader is_leader as juju_is_leader,
status_set,
) )
from charmhelpers.core.decorators import ( from charmhelpers.core.decorators import (
retry_on_exception, retry_on_exception,
@ -60,6 +61,10 @@ class HAIncompleteConfig(Exception):
pass pass
class HAIncorrectConfig(Exception):
pass
class CRMResourceNotFound(Exception): class CRMResourceNotFound(Exception):
pass pass
@ -274,27 +279,71 @@ def get_hacluster_config(exclude_keys=None):
Obtains all relevant configuration from charm configuration required Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster: for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip ha-bindiface, ha-mcastport, vip, os-internal-hostname,
os-admin-hostname, os-public-hostname
param: exclude_keys: list of setting key(s) to be excluded. param: exclude_keys: list of setting key(s) to be excluded.
returns: dict: A dict containing settings keyed by setting name. returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing. raises: HAIncompleteConfig if settings are missing or incorrect.
''' '''
settings = ['ha-bindiface', 'ha-mcastport', 'vip'] settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
'os-admin-hostname', 'os-public-hostname']
conf = {} conf = {}
for setting in settings: for setting in settings:
if exclude_keys and setting in exclude_keys: if exclude_keys and setting in exclude_keys:
continue continue
conf[setting] = config_get(setting) conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in six.iteritems(conf) if v is None] if not valid_hacluster_config():
if missing: raise HAIncorrectConfig('Insufficient or incorrect config data to '
log('Insufficient config data to configure hacluster.', level=ERROR) 'configure hacluster.')
raise HAIncompleteConfig
return conf return conf
def valid_hacluster_config():
'''
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
must be set.
Note: ha-bindiface and ha-macastport both have defaults and will always
be set. We only care that either vip or dns-ha is set.
:returns: boolean: valid config returns true.
raises: HAIncompatibileConfig if settings conflict.
raises: HAIncompleteConfig if settings are missing.
'''
vip = config_get('vip')
dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability')
status_set('blocked', msg)
raise HAIncorrectConfig(msg)
# If dns-ha then one of os-*-hostname must be set
if dns:
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
'os-public-hostname']
# At this point it is unknown if one or all of the possible
# network spaces are in HA. Validate at least one is set which is
# the minimum required.
for setting in dns_settings:
if config_get(setting):
log('DNS HA: At least one hostname is set {}: {}'
''.format(setting, config_get(setting)),
level=DEBUG)
return True
msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
'DNS HA')
status_set('blocked', msg)
raise HAIncompleteConfig(msg)
log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
return True
def canonical_url(configs, vip_setting='vip'): def canonical_url(configs, vip_setting='vip'):
''' '''
Returns the correct HTTP URL to this host given the state of HTTPS Returns the correct HTTP URL to this host given the state of HTTPS

View File

@ -43,9 +43,6 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.openstack = openstack self.openstack = openstack
self.source = source self.source = source
self.stable = stable self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def get_logger(self, name="deployment-logger", level=logging.DEBUG): def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout.""" """Get a logger object that will log to stdout."""
@ -72,38 +69,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.log.info('OpenStackAmuletDeployment: determine branch locations') self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the lp:~openstack-charmers namespace # Charms outside the ~openstack-charmers
base_charms = ['mysql', 'mongodb', 'nrpe'] base_charms = {
'mysql': ['precise', 'trusty'],
# Force these charms to current series even when using an older series. 'mongodb': ['precise', 'trusty'],
# ie. Use trusty/nrpe even when series is precise, as the P charm 'nrpe': ['precise', 'trusty'],
# does not possess the necessary external master config and hooks. }
force_series_current = ['nrpe']
if self.series in ['precise', 'trusty']:
base_series = self.series
else:
base_series = self.current_next
for svc in other_services: for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
# If a location has been explicitly set, use it # If a location has been explicitly set, use it
if svc.get('location'): if svc.get('location'):
continue continue
if self.stable:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
if svc['name'] in base_charms: if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}' # NOTE: not all charms have support for all series we
svc['location'] = temp.format(base_series, # want/need to test against, so fix to most recent
# that each base charm supports
target_series = self.series
if self.series not in base_charms[svc['name']]:
target_series = base_charms[svc['name']][-1]
svc['location'] = 'cs:{}/{}'.format(target_series,
svc['name'])
elif self.stable:
svc['location'] = 'cs:{}/{}'.format(self.series,
svc['name']) svc['name'])
else: else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
svc['location'] = temp.format(self.current_next, self.series,
svc['name']) svc['name']
)
return other_services return other_services

View File

@ -0,0 +1,111 @@
# Copyright 2014-2016 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2016 Canonical Ltd.
#
# Authors:
# Openstack Charmers <
#
"""
Helpers for high availability.
"""
import re
from charmhelpers.core.hookenv import (
log,
relation_set,
charm_name,
config,
status_set,
DEBUG,
)
from charmhelpers.contrib.openstack.ip import (
resolve_address,
)
class DNSHAException(Exception):
"""Raised when an error occurs setting up DNS HA
"""
pass
def update_dns_ha_resource_params(resources, resource_params,
relation_id=None,
crm_ocf='ocf:maas:dns'):
""" Check for os-*-hostname settings and update resource dictionaries for
the HA relation.
@param resources: Pointer to dictionary of resources.
Usually instantiated in ha_joined().
@param resource_params: Pointer to dictionary of resource parameters.
Usually instantiated in ha_joined()
@param relation_id: Relation ID of the ha relation
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
settings = ['os-admin-hostname', 'os-internal-hostname',
'os-public-hostname']
# Check which DNS settings are set and update dictionaries
hostname_group = []
for setting in settings:
hostname = config(setting)
if hostname is None:
log('DNS HA: Hostname setting {} is None. Ignoring.'
''.format(setting),
DEBUG)
continue
m = re.search('os-(.+?)-hostname', setting)
if m:
networkspace = m.group(1)
else:
msg = ('Unexpected DNS hostname setting: {}. '
'Cannot determine network space name'
''.format(setting))
status_set('blocked', msg)
raise DNSHAException(msg)
hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname),
DEBUG)
continue
hostname_group.append(hostname_key)
resources[hostname_key] = crm_ocf
resource_params[hostname_key] = (
'params fqdn="{}" ip_address="{}" '
''.format(hostname, resolve_address(endpoint_type=networkspace,
override=False)))
if len(hostname_group) >= 1:
log('DNS HA: Hostname group is set with {} as members. '
'Informing the ha relation'.format(' '.join(hostname_group)),
DEBUG)
relation_set(relation_id=relation_id, groups={
'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
else:
msg = 'DNS HA: Hostname group has no members.'
status_set('blocked', msg)
raise DNSHAException(msg)

View File

@ -109,7 +109,7 @@ def _get_address_override(endpoint_type=PUBLIC):
return addr_override.format(service_name=service_name()) return addr_override.format(service_name=service_name())
def resolve_address(endpoint_type=PUBLIC): def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config. """Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on If unit is clustered with vip(s) and has net splits defined, return vip on
@ -119,7 +119,10 @@ def resolve_address(endpoint_type=PUBLIC):
split if one is configured, or a Juju 2.0 extra-binding has been used. split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type :param endpoint_type: Network endpoing type
:param override: Accept hostname overrides or not
""" """
resolved_address = None
if override:
resolved_address = _get_address_override(endpoint_type) resolved_address = _get_address_override(endpoint_type)
if resolved_address: if resolved_address:
return resolved_address return resolved_address

View File

@ -51,6 +51,7 @@ from charmhelpers.core.hookenv import (
related_units, related_units,
relation_ids, relation_ids,
relation_set, relation_set,
service_name,
status_set, status_set,
hook_name hook_name
) )
@ -207,6 +208,27 @@ PACKAGE_CODENAMES = {
]), ]),
} }
GIT_DEFAULT_REPOS = {
'requirements': 'git://github.com/openstack/requirements',
'cinder': 'git://github.com/openstack/cinder',
'glance': 'git://github.com/openstack/glance',
'horizon': 'git://github.com/openstack/horizon',
'keystone': 'git://github.com/openstack/keystone',
'neutron': 'git://github.com/openstack/neutron',
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
'nova': 'git://github.com/openstack/nova',
}
GIT_DEFAULT_BRANCHES = {
'icehouse': 'icehouse-eol',
'kilo': 'stable/kilo',
'liberty': 'stable/liberty',
'mitaka': 'stable/mitaka',
'master': 'master',
}
DEFAULT_LOOPBACK_SIZE = '5G' DEFAULT_LOOPBACK_SIZE = '5G'
@ -703,6 +725,53 @@ def git_install_requested():
requirements_dir = None requirements_dir = None
def git_default_repos(projects):
"""
Returns default repos if a default openstack-origin-git value is specified.
"""
service = service_name()
for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
if projects == default:
# add the requirements repo first
repo = {
'name': 'requirements',
'repository': GIT_DEFAULT_REPOS['requirements'],
'branch': branch,
}
repos = [repo]
# neutron and nova charms require some additional repos
if service == 'neutron':
for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']:
repo = {
'name': svc,
'repository': GIT_DEFAULT_REPOS[svc],
'branch': branch,
}
repos.append(repo)
elif service == 'nova':
repo = {
'name': 'neutron',
'repository': GIT_DEFAULT_REPOS['neutron'],
'branch': branch,
}
repos.append(repo)
# finally add the current service's repo
repo = {
'name': service,
'repository': GIT_DEFAULT_REPOS[service],
'branch': branch,
}
repos.append(repo)
return yaml.dump(dict(repositories=repos))
return projects
def _git_yaml_load(projects_yaml): def _git_yaml_load(projects_yaml):
""" """
Load the specified yaml into a dictionary. Load the specified yaml into a dictionary.

View File

@ -1231,7 +1231,7 @@ class CephConfContext(object):
permitted = self.permitted_sections permitted = self.permitted_sections
if permitted: if permitted:
diff = set(conf.keys()).symmetric_difference(set(permitted)) diff = set(conf.keys()).difference(set(permitted))
if diff: if diff:
log("Config-flags contains invalid keys '%s' - they will be " log("Config-flags contains invalid keys '%s' - they will be "
"ignored" % (', '.join(diff)), level=WARNING) "ignored" % (', '.join(diff)), level=WARNING)

View File

@ -176,7 +176,7 @@ def init_is_systemd():
def adduser(username, password=None, shell='/bin/bash', system_user=False, def adduser(username, password=None, shell='/bin/bash', system_user=False,
primary_group=None, secondary_groups=None): primary_group=None, secondary_groups=None, uid=None):
"""Add a user to the system. """Add a user to the system.
Will log but otherwise succeed if the user already exists. Will log but otherwise succeed if the user already exists.
@ -187,15 +187,21 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False,
:param bool system_user: Whether to create a login or system user :param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to username :param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups :param list secondary_groups: Optional list of additional groups
:param int uid: UID for user being created
:returns: The password database entry struct, as returned by `pwd.getpwnam` :returns: The password database entry struct, as returned by `pwd.getpwnam`
""" """
try: try:
user_info = pwd.getpwnam(username) user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username)) log('user {0} already exists!'.format(username))
if uid:
user_info = pwd.getpwuid(int(uid))
log('user with uid {0} already exists!'.format(uid))
except KeyError: except KeyError:
log('creating user {0}'.format(username)) log('creating user {0}'.format(username))
cmd = ['useradd'] cmd = ['useradd']
if uid:
cmd.extend(['--uid', str(uid)])
if system_user or password is None: if system_user or password is None:
cmd.append('--system') cmd.append('--system')
else: else:
@ -230,14 +236,58 @@ def user_exists(username):
return user_exists return user_exists
def add_group(group_name, system_group=False): def uid_exists(uid):
"""Add a group to the system""" """Check if a uid exists"""
try:
pwd.getpwuid(uid)
uid_exists = True
except KeyError:
uid_exists = False
return uid_exists
def group_exists(groupname):
"""Check if a group exists"""
try:
grp.getgrnam(groupname)
group_exists = True
except KeyError:
group_exists = False
return group_exists
def gid_exists(gid):
"""Check if a gid exists"""
try:
grp.getgrgid(gid)
gid_exists = True
except KeyError:
gid_exists = False
return gid_exists
def add_group(group_name, system_group=False, gid=None):
"""Add a group to the system
Will log but otherwise succeed if the group already exists.
:param str group_name: group to create
:param bool system_group: Create system group
:param int gid: GID for user being created
:returns: The password database entry struct, as returned by `grp.getgrnam`
"""
try: try:
group_info = grp.getgrnam(group_name) group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name)) log('group {0} already exists!'.format(group_name))
if gid:
group_info = grp.getgrgid(gid)
log('group with gid {0} already exists!'.format(gid))
except KeyError: except KeyError:
log('creating group {0}'.format(group_name)) log('creating group {0}'.format(group_name))
cmd = ['addgroup'] cmd = ['addgroup']
if gid:
cmd.extend(['--gid', str(gid)])
if system_group: if system_group:
cmd.append('--system') cmd.append('--system')
else: else:

View File

@ -125,7 +125,37 @@ options:
create the following public endpoints for ceilometer: create the following public endpoints for ceilometer:
https://ceilometer.example.com:8777/ https://ceilometer.example.com:8777/
os-internal-hostname:
type: string
default:
description: |
The hostname or address of the internal endpoints created for ceilometer
in the keystone identity provider.
This value will be used for internal endpoints. For example, an
os-internal-hostname set to 'ceilometer.internal.example.com' with ssl
enabled will create the following internal endpoints for ceilometer:
https://ceilometer.internal.example.com:8777/
os-admin-hostname:
type: string
default:
description: |
The hostname or address of the admin endpoints created for ceilometer
in the keystone identity provider.
This value will be used for admin endpoints. For example, an
os-admin-hostname set to 'ceilometer.admin.example.com' with ssl enabled
will create the following admin endpoints for ceilometer:
https://ceilometer.admin.example.com:8777/
# HA configuration settings # HA configuration settings
dns-ha:
type: boolean
default: False
description: |
Use DNS HA with MAAS 2.0. Note if this is set do not set vip
settings below.
vip: vip:
type: string type: string
default: default:

View File

@ -30,6 +30,9 @@ from charmhelpers.contrib.openstack.utils import (
pausable_restart_on_change as restart_on_change, pausable_restart_on_change as restart_on_change,
is_unit_paused_set, is_unit_paused_set,
) )
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
)
from ceilometer_utils import ( from ceilometer_utils import (
get_packages, get_packages,
CEILOMETER_DB, CEILOMETER_DB,
@ -205,7 +208,7 @@ def cluster_changed():
@hooks.hook('ha-relation-joined') @hooks.hook('ha-relation-joined')
def ha_joined(): def ha_joined(relation_id=None):
cluster_config = get_hacluster_config() cluster_config = get_hacluster_config()
resources = { resources = {
@ -229,6 +232,11 @@ def ha_joined():
(amqp_ssl_port)) (amqp_ssl_port))
resource_params['res_ceilometer_agent_central'] = params resource_params['res_ceilometer_agent_central'] = params
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = [] vip_group = []
for vip in cluster_config['vip'].split(): for vip in cluster_config['vip'].split():
res_ceilometer_vip = 'ocf:heartbeat:IPaddr2' res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
@ -240,7 +248,8 @@ def ha_joined():
resources[vip_key] = res_ceilometer_vip resources[vip_key] = res_ceilometer_vip
resource_params[vip_key] = ( resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"' 'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params, ' nic="{iface}"'
''.format(ip=vip_params,
vip=vip, vip=vip,
iface=iface, iface=iface,
netmask=get_netmask_for_address(vip)) netmask=get_netmask_for_address(vip))
@ -256,7 +265,8 @@ def ha_joined():
clones = { clones = {
'cl_ceilometer_haproxy': 'res_ceilometer_haproxy' 'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
} }
relation_set(init_services=init_services, relation_set(relation_id=relation_id,
init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'], corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'], corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources, resources=resources,

View File

@ -48,6 +48,7 @@ TO_PATCH = [
'peer_store', 'peer_store',
'configure_https', 'configure_https',
'status_set', 'status_set',
'update_dns_ha_resource_params',
] ]
@ -299,7 +300,8 @@ class CeilometerHooksTest(CharmTestCase):
} }
exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'} exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
call1 = call(groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'}) call1 = call(groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'})
call2 = call(init_services={'res_ceilometer_haproxy': 'haproxy'}, call2 = call(relation_id=None,
init_services={'res_ceilometer_haproxy': 'haproxy'},
corosync_bindiface='bnd0', corosync_bindiface='bnd0',
corosync_mcastport=5802, corosync_mcastport=5802,
resources=exp_resources, resources=exp_resources,
@ -346,7 +348,8 @@ class CeilometerHooksTest(CharmTestCase):
} }
exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'} exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
call1 = call(groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'}) call1 = call(groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'})
call2 = call(init_services={'res_ceilometer_haproxy': 'haproxy'}, call2 = call(relation_id=None,
init_services={'res_ceilometer_haproxy': 'haproxy'},
corosync_bindiface='bnd0', corosync_bindiface='bnd0',
corosync_mcastport=5802, corosync_mcastport=5802,
resources=exp_resources, resources=exp_resources,
@ -354,6 +357,47 @@ class CeilometerHooksTest(CharmTestCase):
clones=exp_clones) clones=exp_clones)
self.relation_set.assert_has_calls([call1, call2], any_order=False) self.relation_set.assert_has_calls([call1, call2], any_order=False)
@patch.object(hooks, 'get_hacluster_config')
def test_ha_joined_dns_ha(self, mock_cluster_config):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_ceilometer_public_hostname':
'ocf:maas:dns'})
resource_params.update({'res_ceilometer_public_hostname':
'params fqdn="ceilometer.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
mock_cluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'ceilometer.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ceilometer_haproxy': 'haproxy'},
'resources': {'res_ceilometer_public_hostname': 'ocf:maas:dns',
'res_ceilometer_haproxy': 'lsb:haproxy',
'res_ceilometer_agent_central':
'ocf:openstack:ceilometer-agent-central'},
'resource_params': {
'res_ceilometer_public_hostname':
'params fqdn="ceilometer.maas" '
'ip_address="10.0.0.1"',
'res_ceilometer_haproxy': 'op monitor interval="5s"',
'res_ceilometer_agent_central': 'op monitor interval="30s"'},
'clones': {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
}
self.update_dns_ha_resource_params.side_effect = _fake_update
hooks.ha_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
@patch('charmhelpers.core.hookenv.config') @patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'keystone_joined') @patch.object(hooks, 'keystone_joined')
def test_ha_changed_not_clustered(self, mock_keystone_joined, mock_config): def test_ha_changed_not_clustered(self, mock_keystone_joined, mock_config):