Fixup unit tests for new context helpers

This commit is contained in:
James Page 2014-09-30 09:05:41 +01:00
parent 0781a760c8
commit 999cfae8c1
7 changed files with 112 additions and 99 deletions

View File

@ -1,4 +1,4 @@
branch: lp:charm-helpers
branch: lp:~james-page/charm-helpers/multiple-https-networks
destination: hooks/charmhelpers
include:
- core

View File

@ -1,6 +1,3 @@
from bzrlib.branch import Branch
import os
import re
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
@ -19,41 +16,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.openstack = openstack
self.source = source
def _is_dev_branch(self):
"""Determine if branch being tested is a dev (i.e. next) branch."""
branch = Branch.open(os.getcwd())
parent = branch.get_parent()
pattern = re.compile("^.*/next/$")
if (pattern.match(parent)):
return True
else:
return False
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
If the branch being tested is a dev branch, then determine the
development branch locations for the other services. Otherwise,
the default charm store branches will be used."""
name = 0
if self._is_dev_branch():
updated_services = []
for svc in other_services:
if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
location = 'lp:charms/{}'.format(svc[name])
else:
temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
location = temp.format(svc[name])
updated_services.append(svc + (location,))
other_services = updated_services
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
name = 0
other_services = self._determine_branch_locations(other_services)
"""Add services to the deployment and set openstack-origin."""
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']

View File

@ -187,16 +187,15 @@ class OpenStackAmuletUtils(AmuletUtils):
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
local_path = os.path.join('tests', cirros_img)
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
if not os.path.exists(local_path):
if not os.path.exists(cirros_img):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, local_path)
opener.retrieve(cirros_url, cirros_img)
f.close()
with open(local_path) as f:
with open(cirros_img) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)

View File

@ -52,7 +52,8 @@ from charmhelpers.contrib.openstack.neutron import (
from charmhelpers.contrib.network.ip import (
get_address_in_network,
get_ipv6_addr,
is_address_in_network
is_address_in_network,
get_netmask_for_address
)
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -396,6 +397,9 @@ class CephContext(OSContextGenerator):
return ctxt
ADDRESS_TYPES = ['admin', 'internal', 'public']
class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
@ -408,30 +412,57 @@ class HAProxyContext(OSContextGenerator):
if not relation_ids('cluster'):
return {}
cluster_hosts = {}
l_unit = local_unit().replace('/', '-')
if config('prefer-ipv6'):
addr = get_ipv6_addr()
else:
addr = unit_get('private-address')
cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
addr)
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
addr = relation_get('private-address', rid=rid, unit=unit)
cluster_hosts[_unit] = addr
cluster_hosts = {}
# NOTE(jamespage): build out map of configured network endpoints
# and associated backends
for addr_type in ADDRESS_TYPES:
laddr = get_address_in_network(
config('os-{}-network'.format(addr_type)))
if laddr:
cluster_hosts[laddr] = {}
cluster_hosts[laddr]['network'] = "{}/{}".format(
laddr,
get_netmask_for_address(laddr)
)
cluster_hosts[laddr]['backends'] = {}
cluster_hosts[laddr]['backends'][l_unit] = laddr
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
_laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit)
if _laddr:
cluster_hosts[laddr]['backends'][_unit] = _laddr
# NOTE(jamespage) no split configurations found, just use
# private addresses
if len(cluster_hosts) < 1:
cluster_hosts[addr] = {}
cluster_hosts[addr]['network'] = "{}/{}".format(
addr,
get_netmask_for_address(addr)
)
cluster_hosts[addr]['backends'] = {}
cluster_hosts[addr]['backends'][l_unit] = addr
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
_laddr = relation_get('private-address',
rid=rid, unit=unit)
if _laddr:
cluster_hosts[addr]['backends'][_unit] = _laddr
ctxt = {
'units': cluster_hosts,
'frontends': cluster_hosts,
}
if config('haproxy-server-timeout'):
ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
if config('haproxy-client-timeout'):
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
if config('prefer-ipv6'):
ctxt['local_host'] = 'ip6-localhost'
ctxt['haproxy_host'] = '::'
@ -441,12 +472,13 @@ class HAProxyContext(OSContextGenerator):
ctxt['haproxy_host'] = '0.0.0.0'
ctxt['stat_port'] = ':8888'
if len(cluster_hosts.keys()) > 1:
# Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.')
with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n')
return ctxt
for frontend in cluster_hosts:
if len(cluster_hosts[frontend]['backends']) > 1:
# Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.')
with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n')
return ctxt
log('HAProxy context is incomplete, this unit has no peers.')
return {}
@ -708,22 +740,22 @@ class NeutronContext(OSContextGenerator):
class OSConfigFlagContext(OSContextGenerator):
"""
Responsible for adding user-defined config-flags in charm config to a
template context.
"""
Responsible for adding user-defined config-flags in charm config to a
template context.
NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support
comma-separated lists as values.
"""
NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support
comma-separated lists as values.
"""
def __call__(self):
config_flags = config('config-flags')
if not config_flags:
return {}
def __call__(self):
config_flags = config('config-flags')
if not config_flags:
return {}
flags = config_flags_parser(config_flags)
return {'user_config_flags': flags}
flags = config_flags_parser(config_flags)
return {'user_config_flags': flags}
class SubordinateConfigContext(OSContextGenerator):

View File

@ -14,17 +14,8 @@ defaults
retries 3
timeout queue 1000
timeout connect 1000
{% if haproxy_client_timeout -%}
timeout client {{ haproxy_client_timeout }}
{% else -%}
timeout client 30000
{% endif -%}
{% if haproxy_server_timeout -%}
timeout server {{ haproxy_server_timeout }}
{% else -%}
timeout server 30000
{% endif -%}
listen stats {{ stat_port }}
mode http
@ -34,17 +25,21 @@ listen stats {{ stat_port }}
stats uri /
stats auth admin:password
{% if units -%}
{% if frontends -%}
{% for service, ports in service_ports.iteritems() -%}
listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
frontend tcp-in_{{ service }}
bind *:{{ ports[0] }}
bind :::{{ ports[0] }}
{% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
{% endfor %}
listen {{ service }}_ipv6 :::{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
{% for frontend in frontends -%}
backend {{ service }}_{{ frontend }}
balance leastconn
{% for unit, address in frontends[frontend]['backends'].iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
{% endfor -%}
{% endfor -%}
{% endif -%}

View File

@ -60,6 +60,8 @@ from charmhelpers.contrib.network.ip import (
get_address_in_network
)
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
hooks = Hooks()
CONFIGS = register_configs()
@ -245,10 +247,15 @@ def neutron_plugin_api_relation_joined(rid=None):
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
address = get_address_in_network(config('os-internal-network'),
unit_get('private-address'))
relation_set(relation_id=relation_id,
relation_settings={'private-address': address})
for addr_type in ADDRESS_TYPES:
address = get_address_in_network(
config('os-{}-network'.format(addr_type))
)
if address:
relation_set(
relation_id=relation_id,
relation_settings={'{}-address'.format(addr_type): address}
)
@hooks.hook('cluster-relation-changed',

View File

@ -14,6 +14,7 @@ TO_PATCH = [
class IdentityServiceContext(CharmTestCase):
def setUp(self):
super(IdentityServiceContext, self).setUp(context, TO_PATCH)
self.relation_get.side_effect = self.test_relation.get
@ -67,6 +68,10 @@ class HAProxyContextTest(CharmTestCase):
hap_ctxt = context.HAProxyContext()
self.assertTrue('units' not in hap_ctxt())
@patch.object(
charmhelpers.contrib.openstack.context, 'get_netmask_for_address')
@patch.object(
charmhelpers.contrib.openstack.context, 'get_address_in_network')
@patch.object(charmhelpers.contrib.openstack.context, 'config')
@patch.object(charmhelpers.contrib.openstack.context, 'local_unit')
@patch.object(charmhelpers.contrib.openstack.context, 'unit_get')
@ -75,7 +80,8 @@ class HAProxyContextTest(CharmTestCase):
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_context_peers(self, _log, _rids, _runits, _rget, _uget,
_lunit, _config):
_lunit, _config, _get_address_in_network,
_get_netmask_for_address):
unit_addresses = {
'neutron-api-0': '10.10.10.10',
'neutron-api-1': '10.10.10.11',
@ -86,13 +92,20 @@ class HAProxyContextTest(CharmTestCase):
_lunit.return_value = "neutron-api/1"
_uget.return_value = unit_addresses['neutron-api-1']
_config.return_value = None
_get_address_in_network.return_value = None
_get_netmask_for_address.return_value = '255.255.255.0'
service_ports = {'neutron-server': [9696, 9686]}
self.maxDiff = None
ctxt_data = {
'haproxy_host': '0.0.0.0',
'local_host': '127.0.0.1',
'stat_port': ':8888',
'units': unit_addresses,
'frontends': {
'10.10.10.11': {
'network': '10.10.10.11/255.255.255.0',
'backends': unit_addresses,
}
},
'service_ports': service_ports,
'neutron_bind_port': 9686,
}