diff --git a/.gitignore b/.gitignore index 25d8aecb..1ba85a76 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,6 @@ bin tags *.sw[nop] *.pyc +.unit-state.db +trusty/ +xenial/ diff --git a/README.md b/README.md index a351f1a9..085fe5d7 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,39 @@ This charm also supports scale out and high availability using the hacluster cha juju set neutron-api vip= juju add-relation neutron-hacluster neutron-api +## HA/Clustering + +There are two mutually exclusive high availability options: using virtual +IP(s) or DNS. In both cases, a relationship to hacluster is required which +provides the corosync back end HA functionality. + +To use virtual IP(s) the clustered nodes must be on the same subnet such that +the VIP is a valid IP on the subnet for one of the node's interfaces and each +node has an interface in said subnet. The VIP becomes a highly-available API +endpoint. + +At a minimum, the config option 'vip' must be set in order to use virtual IP +HA. If multiple networks are being used, a VIP should be provided for each +network, separated by spaces. Optionally, vip_iface or vip_cidr may be +specified. + +To use DNS high availability there are several prerequisites. However, DNS HA +does not require the clustered nodes to be on the same subnet. +Currently the DNS HA feature is only available for MAAS 2.0 or greater +environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must +have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s) +must be pre-registered in MAAS before use with DNS HA. + +At a minimum, the config option 'dns-ha' must be set to true and at least one +of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must +be set in order to use DNS HA. One or more of the above hostnames may be set. + +The charm will throw an exception in the following circumstances: +If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster +If both 'vip' and 'dns-ha' are set as they are mutually exclusive +If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are +set + # Restrictions This charm only support deployment with OpenStack Icehouse or better. diff --git a/config.yaml b/config.yaml index 4a36a756..c7388820 100755 --- a/config.yaml +++ b/config.yaml @@ -231,6 +231,12 @@ options: Number of floating IPs allowed per tenant. A negative value means unlimited. # HA configuration settings + dns-ha: + type: boolean + default: False + description: | + Use DNS HA with MAAS 2.0. Note if this is set do not set vip + settings below. vip: type: string default: @@ -301,6 +307,30 @@ options: will create the following endpoint for neutron-api: . https://neutron-api.example.com:9696/ + os-internal-hostname: + type: string + default: + description: | + The hostname or address of the internal endpoints created for neutron-api + in the keystone identity provider. + . + This value will be used for internal endpoints. For example, an + os-internal-hostname set to 'neutron-api.internal.example.com' with ssl + enabled will create a internal endpoint for neutron-api: + . + https://neutron-api.internal.example.com:9696/ + os-admin-hostname: + type: string + default: + description: | + The hostname or address of the admin endpoints created for neutron-api + in the keystone identity provider. + . + This value will be used for admin endpoints. For example, an + os-admin-hostname set to 'neutron-api.admin.example.com' with ssl enabled + will create a internal endpoint for neutron-api: + . + https://neutron-api.admin.example.com:9696/ ssl_cert: type: string default: diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 92325a96..90e437aa 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -280,14 +280,14 @@ def get_hacluster_config(exclude_keys=None): for initiating a relation to hacluster: ha-bindiface, ha-mcastport, vip, os-internal-hostname, - os-admin-hostname, os-public-hostname + os-admin-hostname, os-public-hostname, os-access-hostname param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing or incorrect. ''' settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', - 'os-admin-hostname', 'os-public-hostname'] + 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] conf = {} for setting in settings: if exclude_keys and setting in exclude_keys: @@ -324,7 +324,7 @@ def valid_hacluster_config(): # If dns-ha then one of os-*-hostname must be set if dns: dns_settings = ['os-internal-hostname', 'os-admin-hostname', - 'os-public-hostname'] + 'os-public-hostname', 'os-access-hostname'] # At this point it is unknown if one or all of the possible # network spaces are in HA. Validate at least one is set which is # the minimum required. diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py index 34064237..2a8a1291 100644 --- a/hooks/charmhelpers/contrib/openstack/ha/utils.py +++ b/hooks/charmhelpers/contrib/openstack/ha/utils.py @@ -36,6 +36,10 @@ from charmhelpers.core.hookenv import ( DEBUG, ) +from charmhelpers.core.host import ( + lsb_release +) + from charmhelpers.contrib.openstack.ip import ( resolve_address, ) @@ -63,8 +67,11 @@ def update_dns_ha_resource_params(resources, resource_params, DNS HA """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + settings = ['os-admin-hostname', 'os-internal-hostname', - 'os-public-hostname'] + 'os-public-hostname', 'os-access-hostname'] # Check which DNS settings are set and update dictionaries hostname_group = [] @@ -109,3 +116,15 @@ def update_dns_ha_resource_params(resources, resource_params, msg = 'DNS HA: Hostname group has no members.' status_set('blocked', msg) raise DNSHAException(msg) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 8da5c5ed..53e58424 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -725,15 +725,14 @@ def git_install_requested(): requirements_dir = None -def git_default_repos(projects_yaml): +def git_default_repos(projects): """ Returns default repos if a default openstack-origin-git value is specified. """ service = service_name() - core_project = service for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): - if projects_yaml == default: + if projects == default: # add the requirements repo first repo = { @@ -743,41 +742,34 @@ def git_default_repos(projects_yaml): } repos = [repo] - # neutron-* and nova-* charms require some additional repos - if service in ['neutron-api', 'neutron-gateway', - 'neutron-openvswitch']: - core_project = 'neutron' - for project in ['neutron-fwaas', 'neutron-lbaas', - 'neutron-vpnaas']: + # neutron and nova charms require some additional repos + if service == 'neutron': + for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']: repo = { - 'name': project, - 'repository': GIT_DEFAULT_REPOS[project], + 'name': svc, + 'repository': GIT_DEFAULT_REPOS[svc], 'branch': branch, } repos.append(repo) - - elif service in ['nova-cloud-controller', 'nova-compute']: - core_project = 'nova' + elif service == 'nova': repo = { 'name': 'neutron', 'repository': GIT_DEFAULT_REPOS['neutron'], 'branch': branch, } repos.append(repo) - elif service == 'openstack-dashboard': - core_project = 'horizon' - # finally add the current service's core project repo + # finally add the current service's repo repo = { - 'name': core_project, - 'repository': GIT_DEFAULT_REPOS[core_project], + 'name': service, + 'repository': GIT_DEFAULT_REPOS[service], 'branch': branch, } repos.append(repo) return yaml.dump(dict(repositories=repos)) - return projects_yaml + return projects def _git_yaml_load(projects_yaml): diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 3f3eded7..4d6db9aa 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -86,6 +86,10 @@ from charmhelpers.contrib.hahelpers.cluster import ( is_elected_leader, ) +from charmhelpers.contrib.openstack.ha.utils import ( + update_dns_ha_resource_params, +) + from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.ip import ( @@ -516,7 +520,7 @@ def cluster_changed(): @hooks.hook('ha-relation-joined') -def ha_joined(): +def ha_joined(relation_id=None): cluster_config = get_hacluster_config() resources = { 'res_neutron_haproxy': 'lsb:haproxy', @@ -524,34 +528,39 @@ def ha_joined(): resource_params = { 'res_neutron_haproxy': 'op monitor interval="5s"' } - vip_group = [] - for vip in cluster_config['vip'].split(): - if is_ipv6(vip): - res_neutron_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_neutron_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' + if config('dns-ha'): + update_dns_ha_resource_params(relation_id=relation_id, + resources=resources, + resource_params=resource_params) + else: + vip_group = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_neutron_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_neutron_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) + iface = (get_iface_for_address(vip) or + config('vip_iface')) + netmask = (get_netmask_for_address(vip) or + config('vip_cidr')) - if iface is not None: - vip_key = 'res_neutron_{}_vip'.format(iface) - resources[vip_key] = res_neutron_vip - resource_params[vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}" ' - 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) - vip_group.append(vip_key) + if iface is not None: + vip_key = 'res_neutron_{}_vip'.format(iface) + resources[vip_key] = res_neutron_vip + resource_params[vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + vip_group.append(vip_key) - if len(vip_group) >= 1: - relation_set(groups={'grp_neutron_vips': ' '.join(vip_group)}) + if len(vip_group) >= 1: + relation_set(groups={'grp_neutron_vips': ' '.join(vip_group)}) init_services = { 'res_neutron_haproxy': 'haproxy' @@ -559,7 +568,8 @@ def ha_joined(): clones = { 'cl_nova_haproxy': 'res_neutron_haproxy' } - relation_set(init_services=init_services, + relation_set(relation_id=relation_id, + init_services=init_services, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 58324c9c..ea512967 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -75,6 +75,7 @@ TO_PATCH = [ 'force_etcd_restart', 'status_set', 'network_get_primary_address', + 'update_dns_ha_resource_params', ] NEUTRON_CONF_DIR = "/etc/neutron" @@ -731,6 +732,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.get_iface_for_address.return_value = 'eth0' self.get_netmask_for_address.return_value = '255.255.255.0' _relation_data = { + 'relation_id': None, 'init_services': {'res_neutron_haproxy': 'haproxy'}, 'corosync_bindiface': _ha_config['ha-bindiface'], 'corosync_mcastport': _ha_config['ha-mcastport'], @@ -763,6 +765,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.get_iface_for_address.return_value = None self.get_netmask_for_address.return_value = None _relation_data = { + 'relation_id': None, 'init_services': {'res_neutron_haproxy': 'haproxy'}, 'corosync_bindiface': _ha_config['ha-bindiface'], 'corosync_mcastport': _ha_config['ha-mcastport'], @@ -799,6 +802,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.get_iface_for_address.return_value = 'eth0' self.get_netmask_for_address.return_value = 'ffff.ffff.ffff.ffff' _relation_data = { + 'relation_id': None, 'init_services': {'res_neutron_haproxy': 'haproxy'}, 'corosync_bindiface': _ha_config['ha-bindiface'], 'corosync_mcastport': _ha_config['ha-mcastport'], @@ -817,6 +821,43 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) + @patch.object(hooks, 'get_hacluster_config') + def test_ha_joined_dns_ha(self, _get_hacluster_config): + def _fake_update(resources, resource_params, relation_id=None): + resources.update({'res_neutron_public_hostname': + 'ocf:maas:dns'}) + resource_params.update({'res_neutron_public_hostname': + 'params fqdn="neutron-api.maas" ' + 'ip_address="10.0.0.1"'}) + + self.test_config.set('dns-ha', True) + _get_hacluster_config.return_value = { + 'vip': None, + 'ha-bindiface': 'em0', + 'ha-mcastport': '8080', + 'os-admin-hostname': None, + 'os-internal-hostname': None, + 'os-public-hostname': 'neutron-api.maas', + } + args = { + 'relation_id': None, + 'corosync_bindiface': 'em0', + 'corosync_mcastport': '8080', + 'init_services': {'res_neutron_haproxy': 'haproxy'}, + 'resources': {'res_neutron_public_hostname': 'ocf:maas:dns', + 'res_neutron_haproxy': 'lsb:haproxy'}, + 'resource_params': { + 'res_neutron_public_hostname': + 'params fqdn="neutron-api.maas" ip_address="10.0.0.1"', + 'res_neutron_haproxy': 'op monitor interval="5s"'}, + 'clones': {'cl_nova_haproxy': 'res_neutron_haproxy'} + } + self.update_dns_ha_resource_params.side_effect = _fake_update + + hooks.ha_joined() + self.assertTrue(self.update_dns_ha_resource_params.called) + self.relation_set.assert_called_with(**args) + def test_ha_changed(self): self.test_relation.set({ 'clustered': 'true',