[jamespage,r=gnuoy] Add support for HTTPS+HA in network-split configurations

This commit is contained in:
James Page
2014-10-06 22:49:00 +01:00
9 changed files with 169 additions and 123 deletions

View File

@@ -57,6 +57,8 @@ def get_address_in_network(network, fallback=None, fatal=False):
else:
if fatal:
not_found_error_out()
else:
return None
_validate_cidr(network)
network = netaddr.IPNetwork(network)

View File

@@ -1,6 +1,3 @@
from bzrlib.branch import Branch
import os
import re
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
@@ -13,62 +10,62 @@ class OpenStackAmuletDeployment(AmuletDeployment):
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None):
def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
def _is_dev_branch(self):
"""Determine if branch being tested is a dev (i.e. next) branch."""
branch = Branch.open(os.getcwd())
parent = branch.get_parent()
pattern = re.compile("^.*/next/$")
if (pattern.match(parent)):
return True
else:
return False
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
If the branch being tested is a dev branch, then determine the
development branch locations for the other services. Otherwise,
the default charm store branches will be used."""
name = 0
if self._is_dev_branch():
updated_services = []
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
if self.stable:
for svc in other_services:
if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
location = 'lp:charms/{}'.format(svc[name])
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
location = temp.format(svc[name])
updated_services.append(svc + (location,))
other_services = updated_services
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
name = 0
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
if svc['name'] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc[name] in use_source:
if svc['name'] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""

View File

@@ -52,6 +52,7 @@ from charmhelpers.contrib.openstack.neutron import (
from charmhelpers.contrib.network.ip import (
get_address_in_network,
get_ipv6_addr,
get_netmask_for_address,
format_ipv6_addr,
is_address_in_network
)
@@ -408,6 +409,9 @@ class CephContext(OSContextGenerator):
return ctxt
ADDRESS_TYPES = ['admin', 'internal', 'public']
class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
@@ -420,7 +424,6 @@ class HAProxyContext(OSContextGenerator):
if not relation_ids('cluster'):
return {}
cluster_hosts = {}
l_unit = local_unit().replace('/', '-')
if config('prefer-ipv6'):
@@ -428,17 +431,49 @@ class HAProxyContext(OSContextGenerator):
else:
addr = unit_get('private-address')
cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
addr)
cluster_hosts = {}
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
addr = relation_get('private-address', rid=rid, unit=unit)
cluster_hosts[_unit] = addr
# NOTE(jamespage): build out map of configured network endpoints
# and associated backends
for addr_type in ADDRESS_TYPES:
laddr = get_address_in_network(
config('os-{}-network'.format(addr_type)))
if laddr:
cluster_hosts[laddr] = {}
cluster_hosts[laddr]['network'] = "{}/{}".format(
laddr,
get_netmask_for_address(laddr)
)
cluster_hosts[laddr]['backends'] = {}
cluster_hosts[laddr]['backends'][l_unit] = laddr
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
_laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit)
if _laddr:
cluster_hosts[laddr]['backends'][_unit] = _laddr
# NOTE(jamespage) no split configurations found, just use
# private addresses
if not cluster_hosts:
cluster_hosts[addr] = {}
cluster_hosts[addr]['network'] = "{}/{}".format(
addr,
get_netmask_for_address(addr)
)
cluster_hosts[addr]['backends'] = {}
cluster_hosts[addr]['backends'][l_unit] = addr
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
_laddr = relation_get('private-address',
rid=rid, unit=unit)
if _laddr:
cluster_hosts[addr]['backends'][_unit] = _laddr
ctxt = {
'units': cluster_hosts,
'frontends': cluster_hosts,
}
if config('haproxy-server-timeout'):
@@ -455,12 +490,13 @@ class HAProxyContext(OSContextGenerator):
ctxt['haproxy_host'] = '0.0.0.0'
ctxt['stat_port'] = ':8888'
if len(cluster_hosts.keys()) > 1:
# Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.')
with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n')
return ctxt
for frontend in cluster_hosts:
if len(cluster_hosts[frontend]['backends']) > 1:
# Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.')
with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n')
return ctxt
log('HAProxy context is incomplete, this unit has no peers.')
return {}
@@ -722,22 +758,22 @@ class NeutronContext(OSContextGenerator):
class OSConfigFlagContext(OSContextGenerator):
"""
Responsible for adding user-defined config-flags in charm config to a
template context.
"""
Responsible for adding user-defined config-flags in charm config to a
template context.
NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support
comma-separated lists as values.
"""
NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support
comma-separated lists as values.
"""
def __call__(self):
config_flags = config('config-flags')
if not config_flags:
return {}
def __call__(self):
config_flags = config('config-flags')
if not config_flags:
return {}
flags = config_flags_parser(config_flags)
return {'user_config_flags': flags}
flags = config_flags_parser(config_flags)
return {'user_config_flags': flags}
class SubordinateConfigContext(OSContextGenerator):

View File

@@ -34,17 +34,21 @@ listen stats {{ stat_port }}
stats uri /
stats auth admin:password
{% if units -%}
{% if frontends -%}
{% for service, ports in service_ports.iteritems() -%}
listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
frontend tcp-in_{{ service }}
bind *:{{ ports[0] }}
bind :::{{ ports[0] }}
{% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
{% endfor %}
listen {{ service }}_ipv6 :::{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
{% for frontend in frontends -%}
backend {{ service }}_{{ frontend }}
balance leastconn
{% for unit, address in frontends[frontend]['backends'].iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
{% endfor -%}
{% endfor -%}
{% endif -%}

View File

@@ -78,6 +78,8 @@ SWIFT_CODENAMES = OrderedDict([
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
('2.0.0', 'juno'),
('2.1.0', 'juno'),
('2.2.0', 'juno'),
])
DEFAULT_LOOPBACK_SIZE = '5G'

View File

@@ -55,7 +55,6 @@ from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring
from charmhelpers.contrib.hahelpers.cluster import (
eligible_leader,
is_leader,
get_hacluster_config,
)
@@ -71,6 +70,7 @@ from charmhelpers.contrib.openstack.ip import (
canonical_url,
PUBLIC, INTERNAL, ADMIN
)
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
hooks = Hooks()
@@ -212,9 +212,6 @@ def amqp_departed():
@hooks.hook('identity-service-relation-joined')
def identity_joined(rid=None):
if not eligible_leader(CLUSTER_RES):
return
public_url = '{}:{}/v1/$(tenant_id)s'.format(
canonical_url(CONFIGS, PUBLIC),
config('api-listening-port')
@@ -277,15 +274,19 @@ def ceph_changed():
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
for addr_type in ADDRESS_TYPES:
address = get_address_in_network(
config('os-{}-network'.format(addr_type))
)
if address:
relation_set(
relation_id=relation_id,
relation_settings={'{}-address'.format(addr_type): address}
)
if config('prefer-ipv6'):
private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
else:
private_addr = unit_get('private-address')
address = get_address_in_network(config('os-internal-network'),
private_addr)
relation_set(relation_id=relation_id,
relation_settings={'private-address': address})
relation_set(relation_id=relation_id,
relation_settings={'private-address': private_addr})
@hooks.hook('cluster-relation-changed',
@@ -351,14 +352,11 @@ def ha_changed():
clustered = relation_get('clustered')
if not clustered or clustered in [None, 'None', '']:
juju_log('ha_changed: hacluster subordinate not fully clustered.')
return
if not is_leader(CLUSTER_RES):
juju_log('ha_changed: hacluster complete but we are not leader.')
return
juju_log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
identity_joined(rid=rid)
else:
juju_log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
identity_joined(rid=rid)
@hooks.hook('image-service-relation-changed')

View File

@@ -4,7 +4,7 @@ import os
os.environ['JUJU_UNIT_NAME'] = 'cinder'
import cinder_utils as utils
from mock import patch
from mock import patch, MagicMock
from test_utils import (
CharmTestCase
@@ -121,17 +121,17 @@ class TestCinderContext(CharmTestCase):
mock_is_clustered.return_value = False
ctxt = contexts.ApacheSSLContext()
with patch.object(ctxt, 'enable_modules'):
with patch.object(ctxt, 'configure_cert'):
service_enabled.return_value = False
self.assertEquals(ctxt(), {})
self.assertFalse(mock_https.called)
service_enabled.return_value = True
self.assertEquals(ctxt(), {'endpoints': [('1.2.3.4',
'1.2.3.4',
34, 12)],
'ext_ports': [34],
'namespace': 'cinder'})
self.assertTrue(mock_https.called)
mock_unit_get.assert_called_with('private-address')
ctxt.enable_modules = MagicMock()
ctxt.configure_cert = MagicMock()
ctxt.configure_ca = MagicMock()
ctxt.canonical_names = MagicMock()
service_enabled.return_value = False
self.assertEquals(ctxt(), {})
self.assertFalse(mock_https.called)
service_enabled.return_value = True
self.assertEquals(ctxt(), {'endpoints': [('1.2.3.4', '1.2.3.4',
34, 12)],
'ext_ports': [34],
'namespace': 'cinder'})
self.assertTrue(mock_https.called)
mock_unit_get.assert_called_with('private-address')

View File

@@ -18,6 +18,8 @@ utils.register_configs = MagicMock()
import cinder_hooks as hooks
hooks.hooks._config_save = False
hooks.hooks._config_save = False
# Unpatch it now that its loaded.
utils.restart_map = _restart_map
utils.register_configs = _register_configs
@@ -59,7 +61,6 @@ TO_PATCH = [
# charmhelpers.contrib.hahelpers.cluster_utils
'canonical_url',
'eligible_leader',
'is_leader',
'get_hacluster_config',
'execd_preinstall',
'get_ipv6_addr',
@@ -344,12 +345,6 @@ class TestJoinedHooks(CharmTestCase):
}
self.relation_set.assert_called_with(**expected)
def test_identity_service_joined_no_leadership(self):
'It does nothing on identity-joined when not eligible leader'
self.eligible_leader.return_value = False
hooks.hooks.execute(['hooks/identity-service-relation-joined'])
self.assertFalse(self.relation_set.called)
@patch('os.mkdir')
def test_ceph_joined(self, mkdir):
'It correctly prepares for a ceph changed hook'

View File

@@ -52,10 +52,10 @@ TO_PATCH = [
# charmhelpers.contrib.hahelpers.cluster_utils
'eligible_leader',
'get_hacluster_config',
'is_leader',
# charmhelpers.contrib.network.ip
'get_iface_for_address',
'get_netmask_for_address'
'get_netmask_for_address',
'get_address_in_network',
]
@@ -90,6 +90,29 @@ class TestClusterHooks(CharmTestCase):
call('start', 'apache2')]
self.assertEquals(ex, service.call_args_list)
def test_cluster_joined_hook(self):
self.config.side_effect = self.test_config.get
self.get_address_in_network.return_value = None
hooks.hooks.execute(['hooks/cluster-relation-joined'])
self.assertFalse(self.relation_set.called)
def test_cluster_joined_hook_multinet(self):
self.config.side_effect = self.test_config.get
self.get_address_in_network.side_effect = [
'192.168.20.2',
'10.20.3.2',
'146.162.23.45'
]
hooks.hooks.execute(['hooks/cluster-relation-joined'])
self.relation_set.assert_has_calls([
call(relation_id=None,
relation_settings={'admin-address': '192.168.20.2'}),
call(relation_id=None,
relation_settings={'internal-address': '10.20.3.2'}),
call(relation_id=None,
relation_settings={'public-address': '146.162.23.45'}),
])
def test_ha_joined_complete_config(self):
'Ensure hacluster subordinate receives all relevant config'
conf = {
@@ -161,18 +184,8 @@ class TestClusterHooks(CharmTestCase):
self.relation_set.assert_called_with(**ex_args)
@patch.object(hooks, 'identity_joined')
def test_ha_changed_clustered_not_leader(self, joined):
'Skip keystone notification if not cluster leader'
def test_ha_changed_clustered(self, joined):
self.relation_get.return_value = True
self.is_leader.return_value = False
hooks.hooks.execute(['hooks/ha-relation-changed'])
self.assertFalse(joined.called)
@patch.object(hooks, 'identity_joined')
def test_ha_changed_clustered_leader(self, joined):
'Notify keystone if cluster leader'
self.relation_get.return_value = True
self.is_leader.return_value = True
self.relation_ids.return_value = ['identity:0']
hooks.hooks.execute(['hooks/ha-relation-changed'])
joined.assert_called_with(rid='identity:0')
@@ -182,4 +195,3 @@ class TestClusterHooks(CharmTestCase):
self.relation_get.return_value = None
hooks.hooks.execute(['hooks/ha-relation-changed'])
self.assertTrue(self.juju_log.called)
self.assertFalse(self.is_leader.called)