Implement DNS high availability. Pass the correct information to
hacluster to register a DNS entry with MAAS 2.0 or greater rather
than using a virtual IP.

Charm-helpers sync to bring in DNS HA helpers

Change-Id: Icc9bfb16e07ca6cf5475182b1974b09949cd89b6
This commit is contained in:
David Ames 2016-06-14 16:05:36 -07:00 committed by James Page
parent 74e10c82d4
commit 3a2adbf106
7 changed files with 166 additions and 66 deletions

View File

@ -58,27 +58,41 @@ have already deployed Ceph using the ceph charm:
This configuration can also be used to support Glance in HA/Scale-out This configuration can also be used to support Glance in HA/Scale-out
deployments. deployments.
Glance HA/Scale-out HA/Clustering
=================== ===================
The Glance charm can also be used in a HA/scale-out configuration using There are two mutually exclusive high availability options: using virtual
the hacluster charm: IP(s) or DNS. In both cases, a relationship to hacluster is required which
provides the corosync back end HA functionality.
juju deploy -n 3 glance To use virtual IP(s) the clustered nodes must be on the same subnet such that
juju deploy hacluster haglance the VIP is a valid IP on the subnet for one of the node's interfaces and each
juju set glance vip=<virtual IP address to access glance over> node has an interface in said subnet. The VIP becomes a highly-available API
juju add-relation glance haglance endpoint.
juju add-relation glance mysql
juju add-relation glance keystone
juju add-relation glance nova-cloud-controller
juju add-relation glance ceph|swift-proxy
In this configuration, 3 service units host the Glance image service; At a minimum, the config option 'vip' must be set in order to use virtual IP
API requests are load balanced across all 3 service units via the HA. If multiple networks are being used, a VIP should be provided for each
configured virtual IP address (which is also registered into Keystone network, separated by spaces. Optionally, vip_iface or vip_cidr may be
as the endpoint for Glance). specified.
Note that Glance in this configuration must be used with either Ceph or To use DNS high availability there are several prerequisites. However, DNS HA
does not require the clustered nodes to be on the same subnet.
Currently the DNS HA feature is only available for MAAS 2.0 or greater
environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must
have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s)
must be pre-registered in MAAS before use with DNS HA.
At a minimum, the config option 'dns-ha' must be set to true and at least one
of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must
be set in order to use DNS HA. One or more of the above hostnames may be set.
The charm will throw an exception in the following circumstances:
If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster
If both 'vip' and 'dns-ha' are set as they are mutually exclusive
If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are
set
Note that Glance in HA configuration must be used with either Ceph or
Swift providing backing image storage. Swift providing backing image storage.
Glance metering Glance metering

View File

@ -280,14 +280,14 @@ def get_hacluster_config(exclude_keys=None):
for initiating a relation to hacluster: for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, os-internal-hostname, ha-bindiface, ha-mcastport, vip, os-internal-hostname,
os-admin-hostname, os-public-hostname os-admin-hostname, os-public-hostname, os-access-hostname
param: exclude_keys: list of setting key(s) to be excluded. param: exclude_keys: list of setting key(s) to be excluded.
returns: dict: A dict containing settings keyed by setting name. returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing or incorrect. raises: HAIncompleteConfig if settings are missing or incorrect.
''' '''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
'os-admin-hostname', 'os-public-hostname'] 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
conf = {} conf = {}
for setting in settings: for setting in settings:
if exclude_keys and setting in exclude_keys: if exclude_keys and setting in exclude_keys:
@ -324,7 +324,7 @@ def valid_hacluster_config():
# If dns-ha then one of os-*-hostname must be set # If dns-ha then one of os-*-hostname must be set
if dns: if dns:
dns_settings = ['os-internal-hostname', 'os-admin-hostname', dns_settings = ['os-internal-hostname', 'os-admin-hostname',
'os-public-hostname'] 'os-public-hostname', 'os-access-hostname']
# At this point it is unknown if one or all of the possible # At this point it is unknown if one or all of the possible
# network spaces are in HA. Validate at least one is set which is # network spaces are in HA. Validate at least one is set which is
# the minimum required. # the minimum required.

View File

@ -36,6 +36,10 @@ from charmhelpers.core.hookenv import (
DEBUG, DEBUG,
) )
from charmhelpers.core.host import (
lsb_release
)
from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.openstack.ip import (
resolve_address, resolve_address,
) )
@ -63,8 +67,11 @@ def update_dns_ha_resource_params(resources, resource_params,
DNS HA DNS HA
""" """
# Validate the charm environment for DNS HA
assert_charm_supports_dns_ha()
settings = ['os-admin-hostname', 'os-internal-hostname', settings = ['os-admin-hostname', 'os-internal-hostname',
'os-public-hostname'] 'os-public-hostname', 'os-access-hostname']
# Check which DNS settings are set and update dictionaries # Check which DNS settings are set and update dictionaries
hostname_group = [] hostname_group = []
@ -109,3 +116,15 @@ def update_dns_ha_resource_params(resources, resource_params,
msg = 'DNS HA: Hostname group has no members.' msg = 'DNS HA: Hostname group has no members.'
status_set('blocked', msg) status_set('blocked', msg)
raise DNSHAException(msg) raise DNSHAException(msg)
def assert_charm_supports_dns_ha():
"""Validate prerequisites for DNS HA
The MAAS client is only available on Xenial or greater
"""
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
msg = ('DNS HA is only supported on 16.04 and greater '
'versions of Ubuntu.')
status_set('blocked', msg)
raise DNSHAException(msg)
return True

View File

@ -725,15 +725,14 @@ def git_install_requested():
requirements_dir = None requirements_dir = None
def git_default_repos(projects_yaml): def git_default_repos(projects):
""" """
Returns default repos if a default openstack-origin-git value is specified. Returns default repos if a default openstack-origin-git value is specified.
""" """
service = service_name() service = service_name()
core_project = service
for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
if projects_yaml == default: if projects == default:
# add the requirements repo first # add the requirements repo first
repo = { repo = {
@ -743,41 +742,34 @@ def git_default_repos(projects_yaml):
} }
repos = [repo] repos = [repo]
# neutron-* and nova-* charms require some additional repos # neutron and nova charms require some additional repos
if service in ['neutron-api', 'neutron-gateway', if service == 'neutron':
'neutron-openvswitch']: for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']:
core_project = 'neutron'
for project in ['neutron-fwaas', 'neutron-lbaas',
'neutron-vpnaas']:
repo = { repo = {
'name': project, 'name': svc,
'repository': GIT_DEFAULT_REPOS[project], 'repository': GIT_DEFAULT_REPOS[svc],
'branch': branch, 'branch': branch,
} }
repos.append(repo) repos.append(repo)
elif service == 'nova':
elif service in ['nova-cloud-controller', 'nova-compute']:
core_project = 'nova'
repo = { repo = {
'name': 'neutron', 'name': 'neutron',
'repository': GIT_DEFAULT_REPOS['neutron'], 'repository': GIT_DEFAULT_REPOS['neutron'],
'branch': branch, 'branch': branch,
} }
repos.append(repo) repos.append(repo)
elif service == 'openstack-dashboard':
core_project = 'horizon'
# finally add the current service's core project repo # finally add the current service's repo
repo = { repo = {
'name': core_project, 'name': service,
'repository': GIT_DEFAULT_REPOS[core_project], 'repository': GIT_DEFAULT_REPOS[service],
'branch': branch, 'branch': branch,
} }
repos.append(repo) repos.append(repo)
return yaml.dump(dict(repositories=repos)) return yaml.dump(dict(repositories=repos))
return projects_yaml return projects
def _git_yaml_load(projects_yaml): def _git_yaml_load(projects_yaml):

View File

@ -112,6 +112,12 @@ options:
effect (although it can be changed in ceph by manually configuring effect (although it can be changed in ceph by manually configuring
your ceph cluster). your ceph cluster).
# HA configuration settings # HA configuration settings
dns-ha:
type: boolean
default: False
description: |
Use DNS HA with MAAS 2.0. Note if this is set do not set vip
settings below.
vip: vip:
type: string type: string
default: default:
@ -232,6 +238,30 @@ options:
create a public endpoint for glance of: create a public endpoint for glance of:
. .
https://glance.example.com:9292/ https://glance.example.com:9292/
os-internal-hostname:
type: string
default:
description: |
The hostname or address of the internal endpoints created for glance
in the keystone identity provider.
.
This value will be used for internal endpoints. For example, an
os-internal-hostname set to 'glance.internal.example.com' with ssl enabled will
create a internal endpoint for glance of:
.
https://glance.internal.example.com:9292/
os-admin-hostname:
type: string
default:
description: |
The hostname or address of the admin endpoints created for glance
in the keystone identity provider.
.
This value will be used for admin endpoints. For example, an
os-admin-hostname set to 'glance.admin.example.com' with ssl enabled will
create a admin endpoint for glance of:
.
https://glance.admin.example.com:9292/
prefer-ipv6: prefer-ipv6:
type: boolean type: boolean
default: False default: False

View File

@ -60,6 +60,9 @@ from charmhelpers.contrib.hahelpers.cluster import (
is_elected_leader, is_elected_leader,
get_hacluster_config get_hacluster_config
) )
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
)
from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.openstack.utils import (
config_value_changed, config_value_changed,
configure_installation_source, configure_installation_source,
@ -437,35 +440,40 @@ def ha_relation_joined(relation_id=None):
'res_glance_haproxy': 'op monitor interval="5s"' 'res_glance_haproxy': 'op monitor interval="5s"'
} }
vip_group = [] if config('dns-ha'):
for vip in cluster_config['vip'].split(): update_dns_ha_resource_params(relation_id=relation_id,
if is_ipv6(vip): resources=resources,
res_ks_vip = 'ocf:heartbeat:IPv6addr' resource_params=resource_params)
vip_params = 'ipv6addr' else:
else: vip_group = []
res_ks_vip = 'ocf:heartbeat:IPaddr2' for vip in cluster_config['vip'].split():
vip_params = 'ip' if is_ipv6(vip):
res_ks_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_ks_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = (get_iface_for_address(vip) or iface = (get_iface_for_address(vip) or
config('vip_iface')) config('vip_iface'))
netmask = (get_netmask_for_address(vip) or netmask = (get_netmask_for_address(vip) or
config('vip_cidr')) config('vip_cidr'))
if iface is not None: if iface is not None:
vip_key = 'res_glance_{}_vip'.format(iface) vip_key = 'res_glance_{}_vip'.format(iface)
resources[vip_key] = res_ks_vip resources[vip_key] = res_ks_vip
resource_params[vip_key] = ( resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"' 'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params, ' nic="{iface}"'.format(ip=vip_params,
vip=vip, vip=vip,
iface=iface, iface=iface,
netmask=netmask) netmask=netmask)
) )
vip_group.append(vip_key) vip_group.append(vip_key)
if len(vip_group) >= 1: if len(vip_group) >= 1:
relation_set(relation_id=relation_id, relation_set(relation_id=relation_id,
groups={'grp_glance_vips': ' '.join(vip_group)}) groups={'grp_glance_vips': ' '.join(vip_group)})
init_services = { init_services = {
'res_glance_haproxy': 'haproxy', 'res_glance_haproxy': 'haproxy',

View File

@ -55,6 +55,8 @@ TO_PATCH = [
'configure_installation_source', 'configure_installation_source',
'os_release', 'os_release',
'openstack_upgrade_available', 'openstack_upgrade_available',
# charmhelpers.contrib.openstack.ha.utils
'update_dns_ha_resource_params',
# charmhelpers.contrib.hahelpers.cluster_utils # charmhelpers.contrib.hahelpers.cluster_utils
'is_elected_leader', 'is_elected_leader',
# hooks.glance_utils # hooks.glance_utils
@ -765,6 +767,41 @@ class GlanceRelationTests(CharmTestCase):
} }
self.relation_set.assert_called_with(**args) self.relation_set.assert_called_with(**args)
def test_ha_joined_dns_ha(self):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_glance_public_hostname': 'ocf:maas:dns'})
resource_params.update({'res_glance_public_hostname':
'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
self.get_hacluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'keystone.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_glance_haproxy': 'haproxy'},
'resources': {'res_glance_public_hostname': 'ocf:maas:dns',
'res_glance_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_glance_public_hostname': 'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"',
'res_glance_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_glance_haproxy': 'res_glance_haproxy'}
}
self.update_dns_ha_resource_params.side_effect = _fake_update
relations.ha_relation_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
def test_ha_relation_changed_not_clustered(self): def test_ha_relation_changed_not_clustered(self):
self.relation_get.return_value = False self.relation_get.return_value = False
relations.ha_relation_changed() relations.ha_relation_changed()