[jamespage, r=gnuoy] Add support for multiple network configuration.

This commit is contained in:
Liam Young 2014-07-28 11:32:01 +01:00
commit 19faf78ef2
16 changed files with 381 additions and 41 deletions

2
.bzrignore Normal file
View File

@ -0,0 +1,2 @@
bin
.coverage

View File

@ -6,11 +6,15 @@ lint:
@charm proof @charm proof
test: test:
@echo Starting tests... @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
@$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
sync: bin/charm_helpers_sync.py:
@charm-helper-sync -c charm-helpers.yaml @mkdir -p bin
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
> bin/charm_helpers_sync.py
sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
publish: lint test publish: lint test
bzr push lp:charms/cinder bzr push lp:charms/cinder

View File

@ -10,3 +10,4 @@ include:
- cluster - cluster
- fetch - fetch
- payload.execd - payload.execd
- contrib.network.ip

View File

@ -102,15 +102,11 @@ options:
# HA configuration settings # HA configuration settings
vip: vip:
type: string type: string
description: "Virtual IP to use to front cinder API in ha configuration" description: |
vip_iface: Virtual IP(s) to use to front API services in HA configuration.
type: string .
default: eth0 If multiple networks are being used, a VIP should be provided for each
description: "Network Interface where to place the Virtual IP" network, separated by spaces.
vip_cidr:
type: int
default: 24
description: "Netmask that will be used for the Virtual IP"
ha-bindiface: ha-bindiface:
type: string type: string
default: eth0 default: eth0
@ -142,4 +138,27 @@ options:
config-flags: config-flags:
type: string type: string
description: Comma separated list of key=value config flags to be set in cinder.conf. description: Comma separated list of key=value config flags to be set in cinder.conf.
# Network configuration options
# by default all access is over 'private-address'
os-admin-network:
type: string
description: |
The IP address and netmask of the OpenStack Admin network (e.g.,
192.168.0.0/24)
.
This network will be used for admin endpoints.
os-internal-network:
type: string
description: |
The IP address and netmask of the OpenStack Internal network (e.g.,
192.168.0.0/24)
.
This network will be used for internal endpoints.
os-public-network:
type: string
description: |
The IP address and netmask of the OpenStack Public network (e.g.,
192.168.0.0/24)
.
This network will be used for public endpoints.

View File

@ -146,12 +146,12 @@ def get_hacluster_config():
Obtains all relevant configuration from charm configuration required Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster: for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr ha-bindiface, ha-mcastport, vip
returns: dict: A dict containing settings keyed by setting name. returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing. raises: HAIncompleteConfig if settings are missing.
''' '''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] settings = ['ha-bindiface', 'ha-mcastport', 'vip']
conf = {} conf = {}
for setting in settings: for setting in settings:
conf[setting] = config_get(setting) conf[setting] = config_get(setting)

View File

@ -0,0 +1,156 @@
import sys
from functools import partial
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
ERROR, log,
)
try:
import netifaces
except ImportError:
apt_install('python-netifaces')
import netifaces
try:
import netaddr
except ImportError:
apt_install('python-netaddr')
import netaddr
def _validate_cidr(network):
try:
netaddr.IPNetwork(network)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" %
network)
def get_address_in_network(network, fallback=None, fatal=False):
"""
Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
:param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1).
"""
def not_found_error_out():
log("No IP address found in network: %s" % network,
level=ERROR)
sys.exit(1)
if network is None:
if fallback is not None:
return fallback
else:
if fatal:
not_found_error_out()
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
if fallback is not None:
return fallback
if fatal:
not_found_error_out()
return None
def is_ipv6(address):
'''Determine whether provided address is IPv6 or not'''
try:
address = netaddr.IPAddress(address)
except netaddr.AddrFormatError:
# probably a hostname - so not an address at all!
return False
else:
return address.version == 6
def is_address_in_network(network, address):
"""
Determine whether the provided address is within a network range.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
:param address: An individual IPv4 or IPv6 address without a net
mask or subnet prefix. For example, '192.168.1.1'.
:returns boolean: Flag indicating whether address is in network.
"""
try:
network = netaddr.IPNetwork(network)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" %
network)
try:
address = netaddr.IPAddress(address)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Address (%s) is not in correct presentation format" %
address)
if address in network:
return True
else:
return False
def _get_for_address(address, key):
"""Retrieve an attribute of or the physical interface that
the IP address provided could be bound to.
:param address (str): An individual IPv4 or IPv6 address without a net
mask or subnet prefix. For example, '192.168.1.1'.
:param key: 'iface' for the physical interface name or an attribute
of the configured interface, for example 'netmask'.
:returns str: Requested attribute or None if address is not bindable.
"""
address = netaddr.IPAddress(address)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if address.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if address in cidr:
if key == 'iface':
return iface
else:
return addresses[netifaces.AF_INET][0][key]
if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if address in cidr:
if key == 'iface':
return iface
else:
return addr[key]
return None
get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')

View File

@ -21,6 +21,7 @@ from charmhelpers.core.hookenv import (
relation_get, relation_get,
relation_ids, relation_ids,
related_units, related_units,
relation_set,
unit_get, unit_get,
unit_private_ip, unit_private_ip,
ERROR, ERROR,
@ -43,6 +44,8 @@ from charmhelpers.contrib.openstack.neutron import (
neutron_plugin_attribute, neutron_plugin_attribute,
) )
from charmhelpers.contrib.network.ip import get_address_in_network
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -135,8 +138,26 @@ class SharedDBContext(OSContextGenerator):
'Missing required charm config options. ' 'Missing required charm config options. '
'(database name and user)') '(database name and user)')
raise OSContextError raise OSContextError
ctxt = {} ctxt = {}
# NOTE(jamespage) if mysql charm provides a network upon which
# access to the database should be made, reconfigure relation
# with the service units local address and defer execution
access_network = relation_get('access-network')
if access_network is not None:
if self.relation_prefix is not None:
hostname_key = "{}_hostname".format(self.relation_prefix)
else:
hostname_key = "hostname"
access_hostname = get_address_in_network(access_network,
unit_get('private-address'))
set_hostname = relation_get(attribute=hostname_key,
unit=local_unit())
if set_hostname != access_hostname:
relation_set(relation_settings={hostname_key: access_hostname})
return ctxt # Defer any further hook execution for now....
password_setting = 'password' password_setting = 'password'
if self.relation_prefix: if self.relation_prefix:
password_setting = self.relation_prefix + '_password' password_setting = self.relation_prefix + '_password'
@ -341,10 +362,12 @@ class CephContext(OSContextGenerator):
use_syslog = str(config('use-syslog')).lower() use_syslog = str(config('use-syslog')).lower()
for rid in relation_ids('ceph'): for rid in relation_ids('ceph'):
for unit in related_units(rid): for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit) auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit)
ceph_addr = \
relation_get('ceph-public-address', rid=rid, unit=unit) or \
relation_get('private-address', rid=rid, unit=unit)
mon_hosts.append(ceph_addr)
ctxt = { ctxt = {
'mon_hosts': ' '.join(mon_hosts), 'mon_hosts': ' '.join(mon_hosts),
@ -378,7 +401,9 @@ class HAProxyContext(OSContextGenerator):
cluster_hosts = {} cluster_hosts = {}
l_unit = local_unit().replace('/', '-') l_unit = local_unit().replace('/', '-')
cluster_hosts[l_unit] = unit_get('private-address') cluster_hosts[l_unit] = \
get_address_in_network(config('os-internal-network'),
unit_get('private-address'))
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in related_units(rid):

View File

@ -0,0 +1,75 @@
from charmhelpers.core.hookenv import (
config,
unit_get,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
_address_map = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:configs OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:endpoint_type str: The endpoint type to resolve.
:returns str: Base URL for services on the current service unit.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def resolve_address(endpoint_type=PUBLIC):
resolved_address = None
if is_clustered():
if config(_address_map[endpoint_type]['config']) is None:
# Assume vip is simple and pass back directly
resolved_address = config('vip')
else:
for vip in config('vip').split():
if is_address_in_network(
config(_address_map[endpoint_type]['config']),
vip):
resolved_address = vip
else:
resolved_address = get_address_in_network(
config(_address_map[endpoint_type]['config']),
unit_get(_address_map[endpoint_type]['fallback'])
)
if resolved_address is None:
raise ValueError('Unable to resolve a suitable IP address'
' based on charm state and configuration')
else:
return resolved_address

View File

@ -27,7 +27,12 @@ listen stats :8888
{% if units -%} {% if units -%}
{% for service, ports in service_ports.iteritems() -%} {% for service, ports in service_ports.iteritems() -%}
listen {{ service }} 0.0.0.0:{{ ports[0] }} listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
balance roundrobin
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
listen {{ service }}_ipv6 :::{{ ports[0] }}
balance roundrobin balance roundrobin
{% for unit, address in units.iteritems() -%} {% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check server {{ unit }} {{ address }}:{{ ports[1] }} check

View File

@ -322,6 +322,10 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
import apt_pkg import apt_pkg
if not pkgcache: if not pkgcache:
apt_pkg.init() apt_pkg.init()
# Force Apt to build its cache in memory. That way we avoid race
# conditions with other applications building the cache in the same
# place.
apt_pkg.config.set("Dir::Cache::pkgcache", "")
pkgcache = apt_pkg.Cache() pkgcache = apt_pkg.Cache()
pkg = pkgcache[package] pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -105,5 +105,6 @@ class StorageBackendContext(OSContextGenerator):
class LoggingConfigContext(OSContextGenerator): class LoggingConfigContext(OSContextGenerator):
def __call__(self): def __call__(self):
return {'debug': config('debug'), 'verbose': config('verbose')} return {'debug': config('debug'), 'verbose': config('verbose')}

View File

@ -34,7 +34,7 @@ from charmhelpers.core.hookenv import (
service_name, service_name,
unit_get, unit_get,
log, log,
ERROR ERROR,
) )
from charmhelpers.fetch import apt_install, apt_update from charmhelpers.fetch import apt_install, apt_update
@ -46,13 +46,21 @@ from charmhelpers.contrib.openstack.utils import (
from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
canonical_url,
eligible_leader, eligible_leader,
is_leader, is_leader,
get_hacluster_config, get_hacluster_config,
) )
from charmhelpers.payload.execd import execd_preinstall from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address,
get_address_in_network
)
from charmhelpers.contrib.openstack.ip import (
canonical_url,
PUBLIC, INTERNAL, ADMIN
)
hooks = Hooks() hooks = Hooks()
@ -65,7 +73,7 @@ def install():
conf = config() conf = config()
src = conf['openstack-origin'] src = conf['openstack-origin']
if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and
src == 'distro'): src == 'distro'):
src = 'cloud:precise-folsom' src = 'cloud:precise-folsom'
configure_installation_source(src) configure_installation_source(src)
apt_update() apt_update()
@ -93,6 +101,9 @@ def config_changed():
CONFIGS.write_all() CONFIGS.write_all()
configure_https() configure_https()
for rid in relation_ids('cluster'):
cluster_joined(relation_id=rid)
@hooks.hook('shared-db-relation-joined') @hooks.hook('shared-db-relation-joined')
def db_joined(): def db_joined():
@ -175,17 +186,24 @@ def identity_joined(rid=None):
if not eligible_leader(CLUSTER_RES): if not eligible_leader(CLUSTER_RES):
return return
conf = config() public_url = '{}:{}/v1/$(tenant_id)s'.format(
canonical_url(CONFIGS, PUBLIC),
port = conf['api-listening-port'] config('api-listening-port')
url = canonical_url(CONFIGS) + ':%s/v1/$(tenant_id)s' % port )
internal_url = '{}:{}/v1/$(tenant_id)s'.format(
canonical_url(CONFIGS, INTERNAL),
config('api-listening-port')
)
admin_url = '{}:{}/v1/$(tenant_id)s'.format(
canonical_url(CONFIGS, ADMIN),
config('api-listening-port')
)
settings = { settings = {
'region': conf['region'], 'region': config('region'),
'service': 'cinder', 'service': 'cinder',
'public_url': url, 'public_url': public_url,
'internal_url': url, 'internal_url': internal_url,
'admin_url': url, 'admin_url': admin_url,
} }
relation_set(relation_id=rid, **settings) relation_set(relation_id=rid, **settings)
@ -228,6 +246,14 @@ def ceph_changed():
replicas=_config['ceph-osd-replication-count']) replicas=_config['ceph-osd-replication-count'])
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
address = get_address_in_network(config('os-internal-network'),
unit_get('private-address'))
relation_set(relation_id=relation_id,
relation_settings={'private-address': address})
@hooks.hook('cluster-relation-changed', @hooks.hook('cluster-relation-changed',
'cluster-relation-departed') 'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True) @restart_on_change(restart_map(), stopstart=True)
@ -238,17 +264,32 @@ def cluster_changed():
@hooks.hook('ha-relation-joined') @hooks.hook('ha-relation-joined')
def ha_joined(): def ha_joined():
config = get_hacluster_config() config = get_hacluster_config()
resources = { resources = {
'res_cinder_vip': 'ocf:heartbeat:IPaddr2',
'res_cinder_haproxy': 'lsb:haproxy' 'res_cinder_haproxy': 'lsb:haproxy'
} }
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
(config['vip'], config['vip_cidr'], config['vip_iface'])
resource_params = { resource_params = {
'res_cinder_vip': vip_params,
'res_cinder_haproxy': 'op monitor interval="5s"' 'res_cinder_haproxy': 'op monitor interval="5s"'
} }
vip_group = []
for vip in config['vip'].split():
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_cinder_{}_vip'.format(iface)
resources[vip_key] = 'ocf:heartbeat:IPaddr2'
resource_params[vip_key] = (
'params ip="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
vip_group.append(vip_key)
if len(vip_group) > 1:
relation_set(groups={'grp_cinder_vips': ' '.join(vip_group)})
init_services = { init_services = {
'res_cinder_haproxy': 'haproxy' 'res_cinder_haproxy': 'haproxy'
} }

View File

@ -86,7 +86,7 @@ SCHEDULER_PACKAGES = ['cinder-scheduler']
DEFAULT_LOOPBACK_SIZE = '5G' DEFAULT_LOOPBACK_SIZE = '5G'
# Cluster resource used to determine leadership when hacluster'd # Cluster resource used to determine leadership when hacluster'd
CLUSTER_RES = 'res_cinder_vip' CLUSTER_RES = 'grp_cinder_vips'
class CinderCharmError(Exception): class CinderCharmError(Exception):
@ -391,7 +391,7 @@ def set_ceph_env_variables(service):
with open('/etc/environment', 'a') as out: with open('/etc/environment', 'a') as out:
out.write('CEPH_ARGS="--id %s"\n' % service) out.write('CEPH_ARGS="--id %s"\n' % service)
with open('/etc/init/cinder-volume.override', 'w') as out: with open('/etc/init/cinder-volume.override', 'w') as out:
out.write('env CEPH_ARGS="--id %s"\n' % service) out.write('env CEPH_ARGS="--id %s"\n' % service)
def do_openstack_upgrade(configs): def do_openstack_upgrade(configs):

View File

@ -300,6 +300,7 @@ class TestJoinedHooks(CharmTestCase):
def test_identity_service_joined(self): def test_identity_service_joined(self):
'It properly requests unclustered endpoint via identity-service' 'It properly requests unclustered endpoint via identity-service'
self.unit_get.return_value = 'cindernode1' self.unit_get.return_value = 'cindernode1'
self.config.side_effect = self.test_config.get
self.canonical_url.return_value = 'http://cindernode1' self.canonical_url.return_value = 'http://cindernode1'
hooks.hooks.execute(['hooks/identity-service-relation-joined']) hooks.hooks.execute(['hooks/identity-service-relation-joined'])
expected = { expected = {

View File

@ -51,7 +51,10 @@ TO_PATCH = [
# charmhelpers.contrib.hahelpers.cluster_utils # charmhelpers.contrib.hahelpers.cluster_utils
'eligible_leader', 'eligible_leader',
'get_hacluster_config', 'get_hacluster_config',
'is_leader' 'is_leader',
# charmhelpers.contrib.network.ip
'get_iface_for_address',
'get_netmask_for_address'
] ]
@ -96,19 +99,22 @@ class TestClusterHooks(CharmTestCase):
'vip_cidr': '19', 'vip_cidr': '19',
} }
self.get_hacluster_config.return_value = conf self.get_hacluster_config.return_value = conf
self.get_iface_for_address.return_value = 'eth101'
self.get_netmask_for_address.return_value = '255.255.224.0'
hooks.hooks.execute(['hooks/ha-relation-joined']) hooks.hooks.execute(['hooks/ha-relation-joined'])
ex_args = { ex_args = {
'corosync_mcastport': '37373', 'corosync_mcastport': '37373',
'init_services': {'res_cinder_haproxy': 'haproxy'}, 'init_services': {'res_cinder_haproxy': 'haproxy'},
'resource_params': { 'resource_params': {
'res_cinder_vip': 'res_cinder_eth101_vip':
'params ip="192.168.25.163" cidr_netmask="19" nic="eth101"', 'params ip="192.168.25.163" cidr_netmask="255.255.224.0"'
' nic="eth101"',
'res_cinder_haproxy': 'op monitor interval="5s"' 'res_cinder_haproxy': 'op monitor interval="5s"'
}, },
'corosync_bindiface': 'eth100', 'corosync_bindiface': 'eth100',
'clones': {'cl_cinder_haproxy': 'res_cinder_haproxy'}, 'clones': {'cl_cinder_haproxy': 'res_cinder_haproxy'},
'resources': { 'resources': {
'res_cinder_vip': 'ocf:heartbeat:IPaddr2', 'res_cinder_eth101_vip': 'ocf:heartbeat:IPaddr2',
'res_cinder_haproxy': 'lsb:haproxy' 'res_cinder_haproxy': 'lsb:haproxy'
} }
} }