Merge "DNS HA"
This commit is contained in:
34
README.md
34
README.md
@@ -131,6 +131,40 @@ Swift may be used to as a storage backend for the Glance image service. To do
|
||||
so, simply add a relation between swift-proxy and an existing Glance service
|
||||
deployed using the cs:precise/glance charm.
|
||||
|
||||
HA/Clustering
|
||||
-------------
|
||||
|
||||
There are two mutually exclusive high availability options: using virtual
|
||||
IP(s) or DNS. In both cases, a relationship to hacluster is required which
|
||||
provides the corosync back end HA functionality.
|
||||
|
||||
To use virtual IP(s) the clustered nodes must be on the same subnet such that
|
||||
the VIP is a valid IP on the subnet for one of the node's interfaces and each
|
||||
node has an interface in said subnet. The VIP becomes a highly-available API
|
||||
endpoint.
|
||||
|
||||
At a minimum, the config option 'vip' must be set in order to use virtual IP
|
||||
HA. If multiple networks are being used, a VIP should be provided for each
|
||||
network, separated by spaces. Optionally, vip_iface or vip_cidr may be
|
||||
specified.
|
||||
|
||||
To use DNS high availability there are several prerequisites. However, DNS HA
|
||||
does not require the clustered nodes to be on the same subnet.
|
||||
Currently the DNS HA feature is only available for MAAS 2.0 or greater
|
||||
environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must
|
||||
have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s)
|
||||
must be pre-registered in MAAS before use with DNS HA.
|
||||
|
||||
At a minimum, the config option 'dns-ha' must be set to true and at least one
|
||||
of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must
|
||||
be set in order to use DNS HA. One or more of the above hostnames may be set.
|
||||
|
||||
The charm will throw an exception in the following circumstances:
|
||||
If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster
|
||||
If both 'vip' and 'dns-ha' are set as they are mutually exclusive
|
||||
If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are
|
||||
set
|
||||
|
||||
Network Space support
|
||||
---------------------
|
||||
|
||||
|
||||
30
config.yaml
30
config.yaml
@@ -169,6 +169,12 @@ options:
|
||||
type: string
|
||||
default:
|
||||
description: Hash to use across all swift-proxy servers - don't loose
|
||||
dns-ha:
|
||||
type: boolean
|
||||
default: False
|
||||
description: |
|
||||
Use DNS HA with MAAS 2.0. Note if this is set do not set vip
|
||||
settings below.
|
||||
vip:
|
||||
type: string
|
||||
default:
|
||||
@@ -227,6 +233,30 @@ options:
|
||||
the following public endpoint for the swift-proxy:
|
||||
|
||||
https://files.example.com:80/swift/v1
|
||||
os-internal-hostname:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
The hostname or address of the internal endpoints created for swift-proxy
|
||||
in the keystone identity provider.
|
||||
|
||||
This value will be used for internal endpoints. For example, an
|
||||
os-internal-hostname set to 'files.internal.example.com' with will create
|
||||
the following internal endpoint for the swift-proxy:
|
||||
|
||||
https://files.internal.example.com:80/swift/v1
|
||||
os-admin-hostname:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
The hostname or address of the admin endpoints created for swift-proxy
|
||||
in the keystone identity provider.
|
||||
|
||||
This value will be used for admin endpoints. For example, an
|
||||
os-admin-hostname set to 'files.admin.example.com' with will create
|
||||
the following admin endpoint for the swift-proxy:
|
||||
|
||||
https://files.admin.example.com:80/swift/v1
|
||||
prefer-ipv6:
|
||||
type: boolean
|
||||
default: False
|
||||
|
||||
@@ -41,6 +41,15 @@ from lib.swift_utils import (
|
||||
)
|
||||
|
||||
import charmhelpers.contrib.openstack.utils as openstack
|
||||
|
||||
from charmhelpers.contrib.openstack.ha.utils import (
|
||||
update_dns_ha_resource_params,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
get_hacluster_config,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
is_elected_leader,
|
||||
)
|
||||
@@ -567,51 +576,53 @@ def ha_relation_changed():
|
||||
|
||||
|
||||
@hooks.hook('ha-relation-joined')
|
||||
def ha_relation_joined():
|
||||
def ha_relation_joined(relation_id=None):
|
||||
# Obtain the config values necessary for the cluster config. These
|
||||
# include multicast port and interface to bind to.
|
||||
corosync_bindiface = config('ha-bindiface')
|
||||
corosync_mcastport = config('ha-mcastport')
|
||||
vip = config('vip')
|
||||
if not vip:
|
||||
msg = 'Unable to configure hacluster as vip not provided'
|
||||
raise SwiftProxyCharmException(msg)
|
||||
cluster_config = get_hacluster_config()
|
||||
|
||||
# Obtain resources
|
||||
resources = {'res_swift_haproxy': 'lsb:haproxy'}
|
||||
resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'}
|
||||
|
||||
vip_group = []
|
||||
for vip in vip.split():
|
||||
if is_ipv6(vip):
|
||||
res_swift_vip = 'ocf:heartbeat:IPv6addr'
|
||||
vip_params = 'ipv6addr'
|
||||
else:
|
||||
res_swift_vip = 'ocf:heartbeat:IPaddr2'
|
||||
vip_params = 'ip'
|
||||
if config('dns-ha'):
|
||||
update_dns_ha_resource_params(relation_id=relation_id,
|
||||
resources=resources,
|
||||
resource_params=resource_params)
|
||||
else:
|
||||
vip_group = []
|
||||
for vip in cluster_config['vip'].split():
|
||||
if is_ipv6(vip):
|
||||
res_swift_vip = 'ocf:heartbeat:IPv6addr'
|
||||
vip_params = 'ipv6addr'
|
||||
else:
|
||||
res_swift_vip = 'ocf:heartbeat:IPaddr2'
|
||||
vip_params = 'ip'
|
||||
|
||||
iface = get_iface_for_address(vip)
|
||||
if iface is not None:
|
||||
vip_key = 'res_swift_{}_vip'.format(iface)
|
||||
resources[vip_key] = res_swift_vip
|
||||
resource_params[vip_key] = (
|
||||
'params {ip}="{vip}" cidr_netmask="{netmask}"'
|
||||
' nic="{iface}"'.format(ip=vip_params,
|
||||
vip=vip,
|
||||
iface=iface,
|
||||
netmask=get_netmask_for_address(vip))
|
||||
)
|
||||
vip_group.append(vip_key)
|
||||
iface = get_iface_for_address(vip)
|
||||
if iface is not None:
|
||||
vip_key = 'res_swift_{}_vip'.format(iface)
|
||||
resources[vip_key] = res_swift_vip
|
||||
resource_params[vip_key] = (
|
||||
'params {ip}="{vip}" cidr_netmask="{netmask}"'
|
||||
' nic="{iface}"'
|
||||
''.format(ip=vip_params,
|
||||
vip=vip,
|
||||
iface=iface,
|
||||
netmask=get_netmask_for_address(vip))
|
||||
)
|
||||
vip_group.append(vip_key)
|
||||
|
||||
if len(vip_group) >= 1:
|
||||
relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
|
||||
if len(vip_group) >= 1:
|
||||
relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
|
||||
|
||||
init_services = {'res_swift_haproxy': 'haproxy'}
|
||||
clones = {'cl_swift_haproxy': 'res_swift_haproxy'}
|
||||
|
||||
relation_set(init_services=init_services,
|
||||
corosync_bindiface=corosync_bindiface,
|
||||
corosync_mcastport=corosync_mcastport,
|
||||
relation_set(relation_id=relation_id,
|
||||
init_services=init_services,
|
||||
corosync_bindiface=cluster_config['ha-bindiface'],
|
||||
corosync_mcastport=cluster_config['ha-mcastport'],
|
||||
resources=resources,
|
||||
resource_params=resource_params,
|
||||
clones=clones)
|
||||
|
||||
@@ -169,3 +169,46 @@ class SwiftHooksTestCase(unittest.TestCase):
|
||||
calls = [call(rsync_allowed_hosts='10.0.0.1 10.0.0.2',
|
||||
relation_id='storage:1', timestamp=1234)]
|
||||
mock_rel_set.assert_has_calls(calls)
|
||||
|
||||
@patch.object(swift_hooks, 'relation_set')
|
||||
@patch.object(swift_hooks, 'update_dns_ha_resource_params')
|
||||
@patch.object(swift_hooks, 'get_hacluster_config')
|
||||
@patch.object(swift_hooks, 'config')
|
||||
def test_ha_jrelation_oined_dns_ha(self, test_config, get_hacluster_config,
|
||||
update_dns_ha_resource_params,
|
||||
relation_set):
|
||||
def _fake_update(resources, resource_params, relation_id=None):
|
||||
resources.update({'res_swift_proxy_public_hostname':
|
||||
'ocf:maas:dns'})
|
||||
resource_params.update({'res_swift_proxy_public_hostname':
|
||||
'params fqdn="keystone.maas" '
|
||||
'ip_address="10.0.0.1"'})
|
||||
|
||||
test_config.set('dns-ha', True)
|
||||
get_hacluster_config.return_value = {
|
||||
'vip': None,
|
||||
'ha-bindiface': 'em0',
|
||||
'ha-mcastport': '8080',
|
||||
'os-admin-hostname': None,
|
||||
'os-internal-hostname': None,
|
||||
'os-public-hostname': 'keystone.maas',
|
||||
}
|
||||
args = {
|
||||
'relation_id': None,
|
||||
'corosync_bindiface': 'em0',
|
||||
'corosync_mcastport': '8080',
|
||||
'init_services': {'res_swift_haproxy': 'haproxy'},
|
||||
'resources': {'res_swift_proxy_public_hostname': 'ocf:maas:dns',
|
||||
'res_swift_haproxy': 'lsb:haproxy'},
|
||||
'resource_params': {
|
||||
'res_swift_proxy_public_hostname':
|
||||
'params fqdn="keystone.maas" '
|
||||
'ip_address="10.0.0.1"',
|
||||
'res_swift_haproxy': 'op monitor interval="5s"'},
|
||||
'clones': {'cl_swift_haproxy': 'res_swift_haproxy'}
|
||||
}
|
||||
update_dns_ha_resource_params.side_effect = _fake_update
|
||||
|
||||
swift_hooks.ha_relation_joined()
|
||||
self.assertTrue(update_dns_ha_resource_params.called)
|
||||
relation_set.assert_called_with(**args)
|
||||
|
||||
Reference in New Issue
Block a user