diff --git a/.gitignore b/.gitignore index a37185b..8791980 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,5 @@ tags precise/ trusty/ xenial/ +.unit-state.db +tests/cirros-*-disk.img diff --git a/README.md b/README.md index 16f8709..e09a2f8 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,40 @@ required domains, roles and users in the cloud for Heat stacks: This is only required for >= OpenStack Kilo. +HA/Clustering +------------- + +There are two mutually exclusive high availability options: using virtual +IP(s) or DNS. In both cases, a relationship to hacluster is required which +provides the corosync back end HA functionality. + +To use virtual IP(s) the clustered nodes must be on the same subnet such that +the VIP is a valid IP on the subnet for one of the node's interfaces and each +node has an interface in said subnet. The VIP becomes a highly-available API +endpoint. + +At a minimum, the config option 'vip' must be set in order to use virtual IP +HA. If multiple networks are being used, a VIP should be provided for each +network, separated by spaces. Optionally, vip_iface or vip_cidr may be +specified. + +To use DNS high availability there are several prerequisites. However, DNS HA +does not require the clustered nodes to be on the same subnet. +Currently the DNS HA feature is only available for MAAS 2.0 or greater +environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must +have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s) +must be pre-registered in MAAS before use with DNS HA. + +At a minimum, the config option 'dns-ha' must be set to true and at least one +of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must +be set in order to use DNS HA. One or more of the above hostnames may be set. + +The charm will throw an exception in the following circumstances: +If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster +If both 'vip' and 'dns-ha' are set as they are mutually exclusive +If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are +set + Network Space support --------------------- diff --git a/config.yaml b/config.yaml index dbe4c4a..3ee4f74 100644 --- a/config.yaml +++ b/config.yaml @@ -125,7 +125,31 @@ options: os-public-hostname set to 'heat.example.com' with ssl enabled will create the following public endpoints for ceilometer: . - https://ceilometer.example.com:8777/ + https://heat.example.com:8004/ + os-internal-hostname: + type: string + default: + description: | + The hostname or address of the internal endpoints created for heat + in the keystone identity provider. + . + This value will be used for internal endpoints. For example, an + os-internal-hostname set to 'heat.internal.example.com' with ssl enabled + will create the following internal endpoints for ceilometer: + . + https://heat.internal.example.com:8004/ + os-admin-hostname: + type: string + default: + description: | + The hostname or address of the admin endpoints created for heat + in the keystone identity provider. + . + This value will be used for admin endpoints. For example, an + os-admin-hostname set to 'heat.admin.example.com' with ssl enabled will + create the following admin endpoints for ceilometer: + . + https://heat.admin.example.com:8004/ action-managed-upgrade: type: boolean default: False @@ -149,6 +173,12 @@ options: disabled and a non-temporary address must be configured/available on your network interface. # HA configuration settings + dns-ha: + type: boolean + default: False + description: | + Use DNS HA with MAAS 2.0. Note if this is set do not set vip + settings below. vip: type: string default: diff --git a/hooks/heat_relations.py b/hooks/heat_relations.py index fd99591..c1e9a4b 100755 --- a/hooks/heat_relations.py +++ b/hooks/heat_relations.py @@ -62,6 +62,10 @@ from charmhelpers.contrib.openstack.utils import ( sync_db_with_multi_ipv6_addresses, ) +from charmhelpers.contrib.openstack.ha.utils import ( + update_dns_ha_resource_params, +) + from charmhelpers.contrib.openstack.ip import ( canonical_url, ADMIN, @@ -311,35 +315,40 @@ def ha_joined(relation_id=None): 'res_heat_haproxy': 'op monitor interval="5s"' } - vip_group = [] - for vip in cluster_config['vip'].split(): - if is_ipv6(vip): - res_heat_vip = 'ocf:heartbeat:IPv6addr' - vip_params = 'ipv6addr' - else: - res_heat_vip = 'ocf:heartbeat:IPaddr2' - vip_params = 'ip' + if config('dns-ha'): + update_dns_ha_resource_params(relation_id=relation_id, + resources=resources, + resource_params=resource_params) + else: + vip_group = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_heat_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_heat_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' - iface = (get_iface_for_address(vip) or - config('vip_iface')) - netmask = (get_netmask_for_address(vip) or - config('vip_cidr')) + iface = (get_iface_for_address(vip) or + config('vip_iface')) + netmask = (get_netmask_for_address(vip) or + config('vip_cidr')) - if iface is not None: - vip_key = 'res_heat_{}_vip'.format(iface) - resources[vip_key] = res_heat_vip - resource_params[vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}"' - ' nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=netmask) - ) - vip_group.append(vip_key) + if iface is not None: + vip_key = 'res_heat_{}_vip'.format(iface) + resources[vip_key] = res_heat_vip + resource_params[vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}"' + ' nic="{iface}"'.format(ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask) + ) + vip_group.append(vip_key) - if len(vip_group) >= 1: - relation_set(relation_id=relation_id, - groups={'grp_heat_vips': ' '.join(vip_group)}) + if len(vip_group) >= 1: + relation_set(relation_id=relation_id, + groups={'grp_heat_vips': ' '.join(vip_group)}) init_services = { 'res_heat_haproxy': 'haproxy' diff --git a/unit_tests/test_heat_relations.py b/unit_tests/test_heat_relations.py index 22040fa..3c0667b 100644 --- a/unit_tests/test_heat_relations.py +++ b/unit_tests/test_heat_relations.py @@ -44,6 +44,8 @@ TO_PATCH = [ 'determine_packages', 'charm_dir', 'sync_db_with_multi_ipv6_addresses', + # charmhelpers.contrib.openstack.ha.utils + 'update_dns_ha_resource_params', # charmhelpers.contrib.hahelpers.cluster_utils # heat_utils 'restart_map', @@ -313,3 +315,38 @@ class HeatRelationTests(CharmTestCase): 'clones': {'cl_heat_haproxy': 'res_heat_haproxy'} } self.relation_set.assert_called_with(**expected) + + def test_ha_joined_dns_ha(self): + def _fake_update(resources, resource_params, relation_id=None): + resources.update({'res_heat_public_hostname': 'ocf:maas:dns'}) + resource_params.update({'res_heat_public_hostname': + 'params fqdn="keystone.maas" ' + 'ip_address="10.0.0.1"'}) + + self.test_config.set('dns-ha', True) + self.get_hacluster_config.return_value = { + 'vip': None, + 'ha-bindiface': 'em0', + 'ha-mcastport': '8080', + 'os-admin-hostname': None, + 'os-internal-hostname': None, + 'os-public-hostname': 'keystone.maas', + } + args = { + 'relation_id': None, + 'corosync_bindiface': 'em0', + 'corosync_mcastport': '8080', + 'init_services': {'res_heat_haproxy': 'haproxy'}, + 'resources': {'res_heat_public_hostname': 'ocf:maas:dns', + 'res_heat_haproxy': 'lsb:haproxy'}, + 'resource_params': { + 'res_heat_public_hostname': 'params fqdn="keystone.maas" ' + 'ip_address="10.0.0.1"', + 'res_heat_haproxy': 'op monitor interval="5s"'}, + 'clones': {'cl_heat_haproxy': 'res_heat_haproxy'} + } + self.update_dns_ha_resource_params.side_effect = _fake_update + + relations.ha_joined() + self.assertTrue(self.update_dns_ha_resource_params.called) + self.relation_set.assert_called_with(**args)