Enable haproxy for when there is a single unit in a deployment

This commit is contained in:
Liam Young 2014-11-25 10:19:07 +00:00
parent ea938dbdb5
commit e55e64c7dd
27 changed files with 680 additions and 478 deletions

View File

@ -1,4 +1,4 @@
branch: lp:charm-helpers branch: lp:~gnuoy/charm-helpers/haproxy-singlenode-mode
destination: hooks/charmhelpers destination: hooks/charmhelpers
include: include:
- core - core

View File

@ -13,9 +13,10 @@ clustering-related helpers.
import subprocess import subprocess
import os import os
from socket import gethostname as get_unit_hostname from socket import gethostname as get_unit_hostname
import six
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
relation_ids, relation_ids,
@ -150,34 +151,42 @@ def https():
return False return False
def determine_api_port(public_port): def determine_api_port(public_port, singlenode_mode=False):
''' '''
Determine correct API server listening port based on Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy. existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service returns: int: the correct listening port for the API service
''' '''
i = 0 i = 0
if len(peer_units()) > 0 or is_clustered(): if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1 i += 1
if https(): if https():
i += 1 i += 1
return public_port - (i * 10) return public_port - (i * 10)
def determine_apache_port(public_port): def determine_apache_port(public_port, singlenode_mode=False):
''' '''
Description: Determine correct apache listening port based on public IP + Description: Determine correct apache listening port based on public IP +
state of the cluster. state of the cluster.
public_port: int: standard public port for given service public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service returns: int: the correct listening port for the HAProxy service
''' '''
i = 0 i = 0
if len(peer_units()) > 0 or is_clustered(): if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1 i += 1
return public_port - (i * 10) return public_port - (i * 10)
@ -197,7 +206,7 @@ def get_hacluster_config():
for setting in settings: for setting in settings:
conf[setting] = config_get(setting) conf[setting] = config_get(setting)
missing = [] missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None] [missing.append(s) for s, v in six.iteritems(conf) if v is None]
if missing: if missing:
log('Insufficient config data to configure hacluster.', level=ERROR) log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig raise HAIncompleteConfig

View File

@ -1,15 +1,12 @@
import glob import glob
import re import re
import subprocess import subprocess
import sys
from functools import partial from functools import partial
from charmhelpers.core.hookenv import unit_get from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
WARNING,
ERROR,
log log
) )
@ -34,31 +31,28 @@ def _validate_cidr(network):
network) network)
def no_ip_found_error_out(network):
errmsg = ("No IP address found in network: %s" % network)
raise ValueError(errmsg)
def get_address_in_network(network, fallback=None, fatal=False): def get_address_in_network(network, fallback=None, fatal=False):
""" """Get an IPv4 or IPv6 address within the network from the host.
Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example, :param network (str): CIDR presentation format. For example,
'192.168.1.0/24'. '192.168.1.0/24'.
:param fallback (str): If no address is found, return fallback. :param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not :param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1). set and fatal is True then exit(1).
""" """
def not_found_error_out():
log("No IP address found in network: %s" % network,
level=ERROR)
sys.exit(1)
if network is None: if network is None:
if fallback is not None: if fallback is not None:
return fallback return fallback
if fatal:
no_ip_found_error_out(network)
else: else:
if fatal: return None
not_found_error_out()
else:
return None
_validate_cidr(network) _validate_cidr(network)
network = netaddr.IPNetwork(network) network = netaddr.IPNetwork(network)
@ -70,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network: if cidr in network:
return str(cidr.ip) return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses: if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]: for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'): if not addr['addr'].startswith('fe80'):
@ -82,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False):
return fallback return fallback
if fatal: if fatal:
not_found_error_out() no_ip_found_error_out(network)
return None return None
def is_ipv6(address): def is_ipv6(address):
'''Determine whether provided address is IPv6 or not''' """Determine whether provided address is IPv6 or not."""
try: try:
address = netaddr.IPAddress(address) address = netaddr.IPAddress(address)
except netaddr.AddrFormatError: except netaddr.AddrFormatError:
# probably a hostname - so not an address at all! # probably a hostname - so not an address at all!
return False return False
else:
return address.version == 6 return address.version == 6
def is_address_in_network(network, address): def is_address_in_network(network, address):
@ -113,11 +108,13 @@ def is_address_in_network(network, address):
except (netaddr.core.AddrFormatError, ValueError): except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" % raise ValueError("Network (%s) is not in CIDR presentation format" %
network) network)
try: try:
address = netaddr.IPAddress(address) address = netaddr.IPAddress(address)
except (netaddr.core.AddrFormatError, ValueError): except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Address (%s) is not in correct presentation format" % raise ValueError("Address (%s) is not in correct presentation format" %
address) address)
if address in network: if address in network:
return True return True
else: else:
@ -147,6 +144,7 @@ def _get_for_address(address, key):
return iface return iface
else: else:
return addresses[netifaces.AF_INET][0][key] return addresses[netifaces.AF_INET][0][key]
if address.version == 6 and netifaces.AF_INET6 in addresses: if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]: for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'): if not addr['addr'].startswith('fe80'):
@ -160,41 +158,42 @@ def _get_for_address(address, key):
return str(cidr).split('/')[1] return str(cidr).split('/')[1]
else: else:
return addr[key] return addr[key]
return None return None
get_iface_for_address = partial(_get_for_address, key='iface') get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask') get_netmask_for_address = partial(_get_for_address, key='netmask')
def format_ipv6_addr(address): def format_ipv6_addr(address):
""" """If address is IPv6, wrap it in '[]' otherwise return None.
IPv6 needs to be wrapped with [] in url link to parse correctly.
This is required by most configuration files when specifying IPv6
addresses.
""" """
if is_ipv6(address): if is_ipv6(address):
address = "[%s]" % address return "[%s]" % address
else:
log("Not a valid ipv6 address: %s" % address, level=WARNING)
address = None
return address return None
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None): fatal=True, exc_list=None):
""" """Return the assigned IP address for a given interface, if any."""
Return the assigned IP address for a given interface, if any, or [].
"""
# Extract nic if passed /dev/ethX # Extract nic if passed /dev/ethX
if '/' in iface: if '/' in iface:
iface = iface.split('/')[-1] iface = iface.split('/')[-1]
if not exc_list: if not exc_list:
exc_list = [] exc_list = []
try: try:
inet_num = getattr(netifaces, inet_type) inet_num = getattr(netifaces, inet_type)
except AttributeError: except AttributeError:
raise Exception('Unknown inet type ' + str(inet_type)) raise Exception("Unknown inet type '%s'" % str(inet_type))
interfaces = netifaces.interfaces() interfaces = netifaces.interfaces()
if inc_aliases: if inc_aliases:
@ -202,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
for _iface in interfaces: for _iface in interfaces:
if iface == _iface or _iface.split(':')[0] == iface: if iface == _iface or _iface.split(':')[0] == iface:
ifaces.append(_iface) ifaces.append(_iface)
if fatal and not ifaces: if fatal and not ifaces:
raise Exception("Invalid interface '%s'" % iface) raise Exception("Invalid interface '%s'" % iface)
ifaces.sort() ifaces.sort()
else: else:
if iface not in interfaces: if iface not in interfaces:
if fatal: if fatal:
raise Exception("%s not found " % (iface)) raise Exception("Interface '%s' not found " % (iface))
else: else:
return [] return []
else: else:
ifaces = [iface] ifaces = [iface]
@ -221,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
for entry in net_info[inet_num]: for entry in net_info[inet_num]:
if 'addr' in entry and entry['addr'] not in exc_list: if 'addr' in entry and entry['addr'] not in exc_list:
addresses.append(entry['addr']) addresses.append(entry['addr'])
if fatal and not addresses: if fatal and not addresses:
raise Exception("Interface '%s' doesn't have any %s addresses." % raise Exception("Interface '%s' doesn't have any %s addresses." %
(iface, inet_type)) (iface, inet_type))
return addresses
return sorted(addresses)
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
@ -241,6 +246,7 @@ def get_iface_from_addr(addr):
raw = re.match(ll_key, _addr) raw = re.match(ll_key, _addr)
if raw: if raw:
_addr = raw.group(1) _addr = raw.group(1)
if _addr == addr: if _addr == addr:
log("Address '%s' is configured on iface '%s'" % log("Address '%s' is configured on iface '%s'" %
(addr, iface)) (addr, iface))
@ -251,8 +257,9 @@ def get_iface_from_addr(addr):
def sniff_iface(f): def sniff_iface(f):
"""If no iface provided, inject net iface inferred from unit private """Ensure decorated function is called with a value for iface.
address.
If no iface provided, inject net iface inferred from unit private address.
""" """
def iface_sniffer(*args, **kwargs): def iface_sniffer(*args, **kwargs):
if not kwargs.get('iface', None): if not kwargs.get('iface', None):
@ -317,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
return addrs return addrs
if fatal: if fatal:
raise Exception("Interface '%s' doesn't have a scope global " raise Exception("Interface '%s' does not have a scope global "
"non-temporary ipv6 address." % iface) "non-temporary ipv6 address." % iface)
return [] return []
def get_bridges(vnic_dir='/sys/devices/virtual/net'): def get_bridges(vnic_dir='/sys/devices/virtual/net'):
""" """Return a list of bridges on the system."""
Return a list of bridges on the system or [] b_regex = "%s/*/bridge" % vnic_dir
""" return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
b_rgex = vnic_dir + '/*/bridge'
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
""" """Return a list of nics comprising a given bridge on the system."""
Return a list of nics comprising a given bridge on the system or [] brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
""" return [x.split('/')[-1] for x in glob.glob(brif_regex)]
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
def is_bridge_member(nic): def is_bridge_member(nic):
""" """Check if a given nic is a member of a bridge."""
Check if a given nic is a member of a bridge
"""
for bridge in get_bridges(): for bridge in get_bridges():
if nic in get_bridge_nics(bridge): if nic in get_bridge_nics(bridge):
return True return True
return False return False

View File

@ -1,3 +1,4 @@
import six
from charmhelpers.contrib.amulet.deployment import ( from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment AmuletDeployment
) )
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
def _configure_services(self, configs): def _configure_services(self, configs):
"""Configure all of the services.""" """Configure all of the services."""
for service, config in configs.iteritems(): for service, config in six.iteritems(configs):
self.d.configure(service, config) self.d.configure(service, config)
def _get_openstack_release(self): def _get_openstack_release(self):

View File

@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import ( from charmhelpers.contrib.amulet.utils import (
AmuletUtils AmuletUtils
) )
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
expected service catalog endpoints. expected service catalog endpoints.
""" """
self.log.debug('actual: {}'.format(repr(actual))) self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems(): for k, v in six.iteritems(expected):
if k in actual: if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0]) ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret: if ret:

View File

@ -1,20 +1,19 @@
import json import json
import os import os
import time import time
from base64 import b64decode from base64 import b64decode
from subprocess import check_call
from subprocess import ( import six
check_call from six.moves import xrange
)
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
filter_installed_packages, filter_installed_packages,
) )
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
is_relation_made,
local_unit, local_unit,
log, log,
relation_get, relation_get,
@ -23,43 +22,40 @@ from charmhelpers.core.hookenv import (
relation_set, relation_set,
unit_get, unit_get,
unit_private_ip, unit_private_ip,
DEBUG,
INFO,
WARNING,
ERROR, ERROR,
INFO
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
mkdir, mkdir,
write_file write_file,
) )
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port, determine_apache_port,
determine_api_port, determine_api_port,
https, https,
is_clustered is_clustered,
) )
from charmhelpers.contrib.hahelpers.apache import ( from charmhelpers.contrib.hahelpers.apache import (
get_cert, get_cert,
get_ca_cert, get_ca_cert,
install_ca_cert, install_ca_cert,
) )
from charmhelpers.contrib.openstack.neutron import ( from charmhelpers.contrib.openstack.neutron import (
neutron_plugin_attribute, neutron_plugin_attribute,
) )
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_address_in_network, get_address_in_network,
get_ipv6_addr, get_ipv6_addr,
get_netmask_for_address, get_netmask_for_address,
format_ipv6_addr, format_ipv6_addr,
is_address_in_network is_address_in_network,
) )
from charmhelpers.contrib.openstack.utils import get_host_ip from charmhelpers.contrib.openstack.utils import get_host_ip
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
class OSContextError(Exception): class OSContextError(Exception):
@ -67,7 +63,7 @@ class OSContextError(Exception):
def ensure_packages(packages): def ensure_packages(packages):
'''Install but do not upgrade required plugin packages''' """Install but do not upgrade required plugin packages."""
required = filter_installed_packages(packages) required = filter_installed_packages(packages)
if required: if required:
apt_install(required, fatal=True) apt_install(required, fatal=True)
@ -75,20 +71,27 @@ def ensure_packages(packages):
def context_complete(ctxt): def context_complete(ctxt):
_missing = [] _missing = []
for k, v in ctxt.iteritems(): for k, v in six.iteritems(ctxt):
if v is None or v == '': if v is None or v == '':
_missing.append(k) _missing.append(k)
if _missing: if _missing:
log('Missing required data: %s' % ' '.join(_missing), level='INFO') log('Missing required data: %s' % ' '.join(_missing), level=INFO)
return False return False
return True return True
def config_flags_parser(config_flags): def config_flags_parser(config_flags):
"""Parses config flags string into dict.
The provided config_flags string may be a list of comma-separated values
which themselves may be comma-separated list of values.
"""
if config_flags.find('==') >= 0: if config_flags.find('==') >= 0:
log("config_flags is not in expected format (key=value)", log("config_flags is not in expected format (key=value)", level=ERROR)
level=ERROR)
raise OSContextError raise OSContextError
# strip the following from each value. # strip the following from each value.
post_strippers = ' ,' post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then # we strip any leading/trailing '=' or ' ' from the string then
@ -111,17 +114,18 @@ def config_flags_parser(config_flags):
# if this not the first entry, expect an embedded key. # if this not the first entry, expect an embedded key.
index = current.rfind(',') index = current.rfind(',')
if index < 0: if index < 0:
log("invalid config value(s) at index %s" % (i), log("Invalid config value(s) at index %s" % (i), level=ERROR)
level=ERROR)
raise OSContextError raise OSContextError
key = current[index + 1:] key = current[index + 1:]
# Add to collection. # Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers) flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags return flags
class OSContextGenerator(object): class OSContextGenerator(object):
"""Base class for all context generators."""
interfaces = [] interfaces = []
def __call__(self): def __call__(self):
@ -133,11 +137,11 @@ class SharedDBContext(OSContextGenerator):
def __init__(self, def __init__(self,
database=None, user=None, relation_prefix=None, ssl_dir=None): database=None, user=None, relation_prefix=None, ssl_dir=None):
''' """Allows inspecting relation for settings prefixed with
Allows inspecting relation for settings prefixed with relation_prefix. relation_prefix. This is useful for parsing access for multiple
This is useful for parsing access for multiple databases returned via databases returned via the shared-db interface (eg, nova_password,
the shared-db interface (eg, nova_password, quantum_password) quantum_password)
''' """
self.relation_prefix = relation_prefix self.relation_prefix = relation_prefix
self.database = database self.database = database
self.user = user self.user = user
@ -147,9 +151,8 @@ class SharedDBContext(OSContextGenerator):
self.database = self.database or config('database') self.database = self.database or config('database')
self.user = self.user or config('database-user') self.user = self.user or config('database-user')
if None in [self.database, self.user]: if None in [self.database, self.user]:
log('Could not generate shared_db context. ' log("Could not generate shared_db context. Missing required charm "
'Missing required charm config options. ' "config options. (database name and user)", level=ERROR)
'(database name and user)')
raise OSContextError raise OSContextError
ctxt = {} ctxt = {}
@ -202,23 +205,24 @@ class PostgresqlDBContext(OSContextGenerator):
def __call__(self): def __call__(self):
self.database = self.database or config('database') self.database = self.database or config('database')
if self.database is None: if self.database is None:
log('Could not generate postgresql_db context. ' log('Could not generate postgresql_db context. Missing required '
'Missing required charm config options. ' 'charm config options. (database name)', level=ERROR)
'(database name)')
raise OSContextError raise OSContextError
ctxt = {}
ctxt = {}
for rid in relation_ids(self.interfaces[0]): for rid in relation_ids(self.interfaces[0]):
for unit in related_units(rid): for unit in related_units(rid):
ctxt = { rel_host = relation_get('host', rid=rid, unit=unit)
'database_host': relation_get('host', rid=rid, unit=unit), rel_user = relation_get('user', rid=rid, unit=unit)
'database': self.database, rel_passwd = relation_get('password', rid=rid, unit=unit)
'database_user': relation_get('user', rid=rid, unit=unit), ctxt = {'database_host': rel_host,
'database_password': relation_get('password', rid=rid, unit=unit), 'database': self.database,
'database_type': 'postgresql', 'database_user': rel_user,
} 'database_password': rel_passwd,
'database_type': 'postgresql'}
if context_complete(ctxt): if context_complete(ctxt):
return ctxt return ctxt
return {} return {}
@ -227,23 +231,29 @@ def db_ssl(rdata, ctxt, ssl_dir):
ca_path = os.path.join(ssl_dir, 'db-client.ca') ca_path = os.path.join(ssl_dir, 'db-client.ca')
with open(ca_path, 'w') as fh: with open(ca_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_ca'])) fh.write(b64decode(rdata['ssl_ca']))
ctxt['database_ssl_ca'] = ca_path ctxt['database_ssl_ca'] = ca_path
elif 'ssl_ca' in rdata: elif 'ssl_ca' in rdata:
log("Charm not setup for ssl support but ssl ca found") log("Charm not setup for ssl support but ssl ca found", level=INFO)
return ctxt return ctxt
if 'ssl_cert' in rdata: if 'ssl_cert' in rdata:
cert_path = os.path.join( cert_path = os.path.join(
ssl_dir, 'db-client.cert') ssl_dir, 'db-client.cert')
if not os.path.exists(cert_path): if not os.path.exists(cert_path):
log("Waiting 1m for ssl client cert validity") log("Waiting 1m for ssl client cert validity", level=INFO)
time.sleep(60) time.sleep(60)
with open(cert_path, 'w') as fh: with open(cert_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_cert'])) fh.write(b64decode(rdata['ssl_cert']))
ctxt['database_ssl_cert'] = cert_path ctxt['database_ssl_cert'] = cert_path
key_path = os.path.join(ssl_dir, 'db-client.key') key_path = os.path.join(ssl_dir, 'db-client.key')
with open(key_path, 'w') as fh: with open(key_path, 'w') as fh:
fh.write(b64decode(rdata['ssl_key'])) fh.write(b64decode(rdata['ssl_key']))
ctxt['database_ssl_key'] = key_path ctxt['database_ssl_key'] = key_path
return ctxt return ctxt
@ -251,9 +261,8 @@ class IdentityServiceContext(OSContextGenerator):
interfaces = ['identity-service'] interfaces = ['identity-service']
def __call__(self): def __call__(self):
log('Generating template context for identity-service') log('Generating template context for identity-service', level=DEBUG)
ctxt = {} ctxt = {}
for rid in relation_ids('identity-service'): for rid in relation_ids('identity-service'):
for unit in related_units(rid): for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit) rdata = relation_get(rid=rid, unit=unit)
@ -261,26 +270,24 @@ class IdentityServiceContext(OSContextGenerator):
serv_host = format_ipv6_addr(serv_host) or serv_host serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host') auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host auth_host = format_ipv6_addr(auth_host) or auth_host
svc_protocol = rdata.get('service_protocol') or 'http'
ctxt = { auth_protocol = rdata.get('auth_protocol') or 'http'
'service_port': rdata.get('service_port'), ctxt = {'service_port': rdata.get('service_port'),
'service_host': serv_host, 'service_host': serv_host,
'auth_host': auth_host, 'auth_host': auth_host,
'auth_port': rdata.get('auth_port'), 'auth_port': rdata.get('auth_port'),
'admin_tenant_name': rdata.get('service_tenant'), 'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'), 'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'), 'admin_password': rdata.get('service_password'),
'service_protocol': 'service_protocol': svc_protocol,
rdata.get('service_protocol') or 'http', 'auth_protocol': auth_protocol}
'auth_protocol':
rdata.get('auth_protocol') or 'http',
}
if context_complete(ctxt): if context_complete(ctxt):
# NOTE(jamespage) this is required for >= icehouse # NOTE(jamespage) this is required for >= icehouse
# so a missing value just indicates keystone needs # so a missing value just indicates keystone needs
# upgrading # upgrading
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
return ctxt return ctxt
return {} return {}
@ -293,21 +300,23 @@ class AMQPContext(OSContextGenerator):
self.interfaces = [rel_name] self.interfaces = [rel_name]
def __call__(self): def __call__(self):
log('Generating template context for amqp') log('Generating template context for amqp', level=DEBUG)
conf = config() conf = config()
user_setting = 'rabbit-user'
vhost_setting = 'rabbit-vhost'
if self.relation_prefix: if self.relation_prefix:
user_setting = self.relation_prefix + '-rabbit-user' user_setting = '%s-rabbit-user' % (self.relation_prefix)
vhost_setting = self.relation_prefix + '-rabbit-vhost' vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
else:
user_setting = 'rabbit-user'
vhost_setting = 'rabbit-vhost'
try: try:
username = conf[user_setting] username = conf[user_setting]
vhost = conf[vhost_setting] vhost = conf[vhost_setting]
except KeyError as e: except KeyError as e:
log('Could not generate shared_db context. ' log('Could not generate shared_db context. Missing required charm '
'Missing required charm config options: %s.' % e) 'config options: %s.' % e, level=ERROR)
raise OSContextError raise OSContextError
ctxt = {} ctxt = {}
for rid in relation_ids(self.rel_name): for rid in relation_ids(self.rel_name):
ha_vip_only = False ha_vip_only = False
@ -321,6 +330,7 @@ class AMQPContext(OSContextGenerator):
host = relation_get('private-address', rid=rid, unit=unit) host = relation_get('private-address', rid=rid, unit=unit)
host = format_ipv6_addr(host) or host host = format_ipv6_addr(host) or host
ctxt['rabbitmq_host'] = host ctxt['rabbitmq_host'] = host
ctxt.update({ ctxt.update({
'rabbitmq_user': username, 'rabbitmq_user': username,
'rabbitmq_password': relation_get('password', rid=rid, 'rabbitmq_password': relation_get('password', rid=rid,
@ -331,6 +341,7 @@ class AMQPContext(OSContextGenerator):
ssl_port = relation_get('ssl_port', rid=rid, unit=unit) ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
if ssl_port: if ssl_port:
ctxt['rabbit_ssl_port'] = ssl_port ctxt['rabbit_ssl_port'] = ssl_port
ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
if ssl_ca: if ssl_ca:
ctxt['rabbit_ssl_ca'] = ssl_ca ctxt['rabbit_ssl_ca'] = ssl_ca
@ -344,41 +355,45 @@ class AMQPContext(OSContextGenerator):
if context_complete(ctxt): if context_complete(ctxt):
if 'rabbit_ssl_ca' in ctxt: if 'rabbit_ssl_ca' in ctxt:
if not self.ssl_dir: if not self.ssl_dir:
log(("Charm not setup for ssl support " log("Charm not setup for ssl support but ssl ca "
"but ssl ca found")) "found", level=INFO)
break break
ca_path = os.path.join( ca_path = os.path.join(
self.ssl_dir, 'rabbit-client-ca.pem') self.ssl_dir, 'rabbit-client-ca.pem')
with open(ca_path, 'w') as fh: with open(ca_path, 'w') as fh:
fh.write(b64decode(ctxt['rabbit_ssl_ca'])) fh.write(b64decode(ctxt['rabbit_ssl_ca']))
ctxt['rabbit_ssl_ca'] = ca_path ctxt['rabbit_ssl_ca'] = ca_path
# Sufficient information found = break out! # Sufficient information found = break out!
break break
# Used for active/active rabbitmq >= grizzly # Used for active/active rabbitmq >= grizzly
if ('clustered' not in ctxt or ha_vip_only) \ if (('clustered' not in ctxt or ha_vip_only) and
and len(related_units(rid)) > 1: len(related_units(rid)) > 1):
rabbitmq_hosts = [] rabbitmq_hosts = []
for unit in related_units(rid): for unit in related_units(rid):
host = relation_get('private-address', rid=rid, unit=unit) host = relation_get('private-address', rid=rid, unit=unit)
host = format_ipv6_addr(host) or host host = format_ipv6_addr(host) or host
rabbitmq_hosts.append(host) rabbitmq_hosts.append(host)
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
if not context_complete(ctxt): if not context_complete(ctxt):
return {} return {}
else:
return ctxt return ctxt
class CephContext(OSContextGenerator): class CephContext(OSContextGenerator):
"""Generates context for /etc/ceph/ceph.conf templates."""
interfaces = ['ceph'] interfaces = ['ceph']
def __call__(self): def __call__(self):
'''This generates context for /etc/ceph/ceph.conf templates'''
if not relation_ids('ceph'): if not relation_ids('ceph'):
return {} return {}
log('Generating template context for ceph') log('Generating template context for ceph', level=DEBUG)
mon_hosts = [] mon_hosts = []
auth = None auth = None
key = None key = None
@ -387,18 +402,18 @@ class CephContext(OSContextGenerator):
for unit in related_units(rid): for unit in related_units(rid):
auth = relation_get('auth', rid=rid, unit=unit) auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit)
ceph_addr = \ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
relation_get('ceph-public-address', rid=rid, unit=unit) or \ unit=unit)
relation_get('private-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid,
unit=unit)
ceph_addr = ceph_pub_addr or unit_priv_addr
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
mon_hosts.append(ceph_addr) mon_hosts.append(ceph_addr)
ctxt = { ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
'mon_hosts': ' '.join(mon_hosts), 'auth': auth,
'auth': auth, 'key': key,
'key': key, 'use_syslog': use_syslog}
'use_syslog': use_syslog
}
if not os.path.isdir('/etc/ceph'): if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph') os.mkdir('/etc/ceph')
@ -407,79 +422,68 @@ class CephContext(OSContextGenerator):
return {} return {}
ensure_packages(['ceph-common']) ensure_packages(['ceph-common'])
return ctxt return ctxt
ADDRESS_TYPES = ['admin', 'internal', 'public']
class HAProxyContext(OSContextGenerator): class HAProxyContext(OSContextGenerator):
"""Provides half a context for the haproxy template, which describes
all peers to be included in the cluster. Each charm needs to include
its own context generator that describes the port mapping.
"""
interfaces = ['cluster'] interfaces = ['cluster']
def __call__(self): def __init__(self, singlenode_mode=False):
''' self.singlenode_mode = singlenode_mode
Builds half a context for the haproxy template, which describes
all peers to be included in the cluster. Each charm needs to include
its own context generator that describes the port mapping.
'''
if not relation_ids('cluster'):
return {}
l_unit = local_unit().replace('/', '-') def __call__(self):
if not relation_ids('cluster') and not self.singlenode_mode:
return {}
if config('prefer-ipv6'): if config('prefer-ipv6'):
addr = get_ipv6_addr(exc_list=[config('vip')])[0] addr = get_ipv6_addr(exc_list=[config('vip')])[0]
else: else:
addr = get_host_ip(unit_get('private-address')) addr = get_host_ip(unit_get('private-address'))
l_unit = local_unit().replace('/', '-')
cluster_hosts = {} cluster_hosts = {}
# NOTE(jamespage): build out map of configured network endpoints # NOTE(jamespage): build out map of configured network endpoints
# and associated backends # and associated backends
for addr_type in ADDRESS_TYPES: for addr_type in ADDRESS_TYPES:
laddr = get_address_in_network( cfg_opt = 'os-{}-network'.format(addr_type)
config('os-{}-network'.format(addr_type))) laddr = get_address_in_network(config(cfg_opt))
if laddr: if laddr:
cluster_hosts[laddr] = {} netmask = get_netmask_for_address(laddr)
cluster_hosts[laddr]['network'] = "{}/{}".format( cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
laddr, netmask),
get_netmask_for_address(laddr) 'backends': {l_unit: laddr}}
)
cluster_hosts[laddr]['backends'] = {}
cluster_hosts[laddr]['backends'][l_unit] = laddr
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in related_units(rid):
_unit = unit.replace('/', '-')
_laddr = relation_get('{}-address'.format(addr_type), _laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit) rid=rid, unit=unit)
if _laddr: if _laddr:
_unit = unit.replace('/', '-')
cluster_hosts[laddr]['backends'][_unit] = _laddr cluster_hosts[laddr]['backends'][_unit] = _laddr
# NOTE(jamespage) no split configurations found, just use # NOTE(jamespage) no split configurations found, just use
# private addresses # private addresses
if not cluster_hosts: if not cluster_hosts:
cluster_hosts[addr] = {} netmask = get_netmask_for_address(addr)
cluster_hosts[addr]['network'] = "{}/{}".format( cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
addr, 'backends': {l_unit: addr}}
get_netmask_for_address(addr)
)
cluster_hosts[addr]['backends'] = {}
cluster_hosts[addr]['backends'][l_unit] = addr
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in related_units(rid):
_unit = unit.replace('/', '-')
_laddr = relation_get('private-address', _laddr = relation_get('private-address',
rid=rid, unit=unit) rid=rid, unit=unit)
if _laddr: if _laddr:
_unit = unit.replace('/', '-')
cluster_hosts[addr]['backends'][_unit] = _laddr cluster_hosts[addr]['backends'][_unit] = _laddr
ctxt = { ctxt = {'frontends': cluster_hosts}
'frontends': cluster_hosts,
}
if config('haproxy-server-timeout'): if config('haproxy-server-timeout'):
ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
if config('haproxy-client-timeout'): if config('haproxy-client-timeout'):
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
@ -493,13 +497,18 @@ class HAProxyContext(OSContextGenerator):
ctxt['stat_port'] = ':8888' ctxt['stat_port'] = ':8888'
for frontend in cluster_hosts: for frontend in cluster_hosts:
if len(cluster_hosts[frontend]['backends']) > 1: if (len(cluster_hosts[frontend]['backends']) > 1 or
self.singlenode_mode):
# Enable haproxy when we have enough peers. # Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.') log('Ensuring haproxy enabled in /etc/default/haproxy.',
level=DEBUG)
with open('/etc/default/haproxy', 'w') as out: with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n') out.write('ENABLED=1\n')
return ctxt return ctxt
log('HAProxy context is incomplete, this unit has no peers.')
log('HAProxy context is incomplete, this unit has no peers.',
level=INFO)
return {} return {}
@ -507,29 +516,28 @@ class ImageServiceContext(OSContextGenerator):
interfaces = ['image-service'] interfaces = ['image-service']
def __call__(self): def __call__(self):
''' """Obtains the glance API server from the image-service relation.
Obtains the glance API server from the image-service relation. Useful Useful in nova and cinder (currently).
in nova and cinder (currently). """
''' log('Generating template context for image-service.', level=DEBUG)
log('Generating template context for image-service.')
rids = relation_ids('image-service') rids = relation_ids('image-service')
if not rids: if not rids:
return {} return {}
for rid in rids: for rid in rids:
for unit in related_units(rid): for unit in related_units(rid):
api_server = relation_get('glance-api-server', api_server = relation_get('glance-api-server',
rid=rid, unit=unit) rid=rid, unit=unit)
if api_server: if api_server:
return {'glance_api_servers': api_server} return {'glance_api_servers': api_server}
log('ImageService context is incomplete. '
'Missing required relation data.') log("ImageService context is incomplete. Missing required relation "
"data.", level=INFO)
return {} return {}
class ApacheSSLContext(OSContextGenerator): class ApacheSSLContext(OSContextGenerator):
"""Generates a context for an apache vhost configuration that configures
"""
Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context HTTPS reverse proxying for one or many endpoints. Generated context
looks something like:: looks something like::
@ -563,6 +571,7 @@ class ApacheSSLContext(OSContextGenerator):
else: else:
cert_filename = 'cert' cert_filename = 'cert'
key_filename = 'key' key_filename = 'key'
write_file(path=os.path.join(ssl_dir, cert_filename), write_file(path=os.path.join(ssl_dir, cert_filename),
content=b64decode(cert)) content=b64decode(cert))
write_file(path=os.path.join(ssl_dir, key_filename), write_file(path=os.path.join(ssl_dir, key_filename),
@ -574,7 +583,8 @@ class ApacheSSLContext(OSContextGenerator):
install_ca_cert(b64decode(ca_cert)) install_ca_cert(b64decode(ca_cert))
def canonical_names(self): def canonical_names(self):
'''Figure out which canonical names clients will access this service''' """Figure out which canonical names clients will access this service.
"""
cns = [] cns = []
for r_id in relation_ids('identity-service'): for r_id in relation_ids('identity-service'):
for unit in related_units(r_id): for unit in related_units(r_id):
@ -582,55 +592,80 @@ class ApacheSSLContext(OSContextGenerator):
for k in rdata: for k in rdata:
if k.startswith('ssl_key_'): if k.startswith('ssl_key_'):
cns.append(k.lstrip('ssl_key_')) cns.append(k.lstrip('ssl_key_'))
return list(set(cns))
return sorted(list(set(cns)))
def get_network_addresses(self):
"""For each network configured, return corresponding address and vip
(if available).
Returns a list of tuples of the form:
[(address_in_net_a, vip_in_net_a),
(address_in_net_b, vip_in_net_b),
...]
or, if no vip(s) available:
[(address_in_net_a, address_in_net_a),
(address_in_net_b, address_in_net_b),
...]
"""
addresses = []
if config('vip'):
vips = config('vip').split()
else:
vips = []
for net_type in ['os-internal-network', 'os-admin-network',
'os-public-network']:
addr = get_address_in_network(config(net_type),
unit_get('private-address'))
if len(vips) > 1 and is_clustered():
if not config(net_type):
log("Multiple networks configured but net_type "
"is None (%s)." % net_type, level=WARNING)
continue
for vip in vips:
if is_address_in_network(config(net_type), vip):
addresses.append((addr, vip))
break
elif is_clustered() and config('vip'):
addresses.append((addr, config('vip')))
else:
addresses.append((addr, addr))
return sorted(addresses)
def __call__(self): def __call__(self):
if isinstance(self.external_ports, basestring): if isinstance(self.external_ports, six.string_types):
self.external_ports = [self.external_ports] self.external_ports = [self.external_ports]
if (not self.external_ports or not https()):
if not self.external_ports or not https():
return {} return {}
self.configure_ca() self.configure_ca()
self.enable_modules() self.enable_modules()
ctxt = { ctxt = {'namespace': self.service_namespace,
'namespace': self.service_namespace, 'endpoints': [],
'endpoints': [], 'ext_ports': []}
'ext_ports': []
}
for cn in self.canonical_names(): for cn in self.canonical_names():
self.configure_cert(cn) self.configure_cert(cn)
addresses = [] addresses = self.get_network_addresses()
vips = [] for address, endpoint in sorted(set(addresses)):
if config('vip'):
vips = config('vip').split()
for network_type in ['os-internal-network',
'os-admin-network',
'os-public-network']:
address = get_address_in_network(config(network_type),
unit_get('private-address'))
if len(vips) > 0 and is_clustered():
for vip in vips:
if is_address_in_network(config(network_type),
vip):
addresses.append((address, vip))
break
elif is_clustered():
addresses.append((address, config('vip')))
else:
addresses.append((address, address))
for address, endpoint in set(addresses):
for api_port in self.external_ports: for api_port in self.external_ports:
ext_port = determine_apache_port(api_port) ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port) int_port = determine_api_port(api_port)
portmap = (address, endpoint, int(ext_port), int(int_port)) portmap = (address, endpoint, int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap) ctxt['endpoints'].append(portmap)
ctxt['ext_ports'].append(int(ext_port)) ctxt['ext_ports'].append(int(ext_port))
ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
return ctxt return ctxt
@ -647,21 +682,23 @@ class NeutronContext(OSContextGenerator):
@property @property
def packages(self): def packages(self):
return neutron_plugin_attribute( return neutron_plugin_attribute(self.plugin, 'packages',
self.plugin, 'packages', self.network_manager) self.network_manager)
@property @property
def neutron_security_groups(self): def neutron_security_groups(self):
return None return None
def _ensure_packages(self): def _ensure_packages(self):
[ensure_packages(pkgs) for pkgs in self.packages] for pkgs in self.packages:
ensure_packages(pkgs)
def _save_flag_file(self): def _save_flag_file(self):
if self.network_manager == 'quantum': if self.network_manager == 'quantum':
_file = '/etc/nova/quantum_plugin.conf' _file = '/etc/nova/quantum_plugin.conf'
else: else:
_file = '/etc/nova/neutron_plugin.conf' _file = '/etc/nova/neutron_plugin.conf'
with open(_file, 'wb') as out: with open(_file, 'wb') as out:
out.write(self.plugin + '\n') out.write(self.plugin + '\n')
@ -670,13 +707,11 @@ class NeutronContext(OSContextGenerator):
self.network_manager) self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config', config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager) self.network_manager)
ovs_ctxt = { ovs_ctxt = {'core_plugin': driver,
'core_plugin': driver, 'neutron_plugin': 'ovs',
'neutron_plugin': 'ovs', 'neutron_security_groups': self.neutron_security_groups,
'neutron_security_groups': self.neutron_security_groups, 'local_ip': unit_private_ip(),
'local_ip': unit_private_ip(), 'config': config}
'config': config
}
return ovs_ctxt return ovs_ctxt
@ -685,13 +720,11 @@ class NeutronContext(OSContextGenerator):
self.network_manager) self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config', config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager) self.network_manager)
nvp_ctxt = { nvp_ctxt = {'core_plugin': driver,
'core_plugin': driver, 'neutron_plugin': 'nvp',
'neutron_plugin': 'nvp', 'neutron_security_groups': self.neutron_security_groups,
'neutron_security_groups': self.neutron_security_groups, 'local_ip': unit_private_ip(),
'local_ip': unit_private_ip(), 'config': config}
'config': config
}
return nvp_ctxt return nvp_ctxt
@ -700,35 +733,50 @@ class NeutronContext(OSContextGenerator):
self.network_manager) self.network_manager)
n1kv_config = neutron_plugin_attribute(self.plugin, 'config', n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager) self.network_manager)
n1kv_ctxt = { n1kv_user_config_flags = config('n1kv-config-flags')
'core_plugin': driver, restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
'neutron_plugin': 'n1kv', n1kv_ctxt = {'core_plugin': driver,
'neutron_security_groups': self.neutron_security_groups, 'neutron_plugin': 'n1kv',
'local_ip': unit_private_ip(), 'neutron_security_groups': self.neutron_security_groups,
'config': n1kv_config, 'local_ip': unit_private_ip(),
'vsm_ip': config('n1kv-vsm-ip'), 'config': n1kv_config,
'vsm_username': config('n1kv-vsm-username'), 'vsm_ip': config('n1kv-vsm-ip'),
'vsm_password': config('n1kv-vsm-password'), 'vsm_username': config('n1kv-vsm-username'),
'restrict_policy_profiles': config( 'vsm_password': config('n1kv-vsm-password'),
'n1kv_restrict_policy_profiles'), 'restrict_policy_profiles': restrict_policy_profiles}
}
if n1kv_user_config_flags:
flags = config_flags_parser(n1kv_user_config_flags)
n1kv_ctxt['user_config_flags'] = flags
return n1kv_ctxt return n1kv_ctxt
def calico_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
calico_ctxt = {'core_plugin': driver,
'neutron_plugin': 'Calico',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config}
return calico_ctxt
def neutron_ctxt(self): def neutron_ctxt(self):
if https(): if https():
proto = 'https' proto = 'https'
else: else:
proto = 'http' proto = 'http'
if is_clustered(): if is_clustered():
host = config('vip') host = config('vip')
else: else:
host = unit_get('private-address') host = unit_get('private-address')
url = '%s://%s:%s' % (proto, host, '9696')
ctxt = { ctxt = {'network_manager': self.network_manager,
'network_manager': self.network_manager, 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
'neutron_url': url,
}
return ctxt return ctxt
def __call__(self): def __call__(self):
@ -748,6 +796,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.nvp_ctxt()) ctxt.update(self.nvp_ctxt())
elif self.plugin == 'n1kv': elif self.plugin == 'n1kv':
ctxt.update(self.n1kv_ctxt()) ctxt.update(self.n1kv_ctxt())
elif self.plugin == 'Calico':
ctxt.update(self.calico_ctxt())
alchemy_flags = config('neutron-alchemy-flags') alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags: if alchemy_flags:
@ -759,23 +809,40 @@ class NeutronContext(OSContextGenerator):
class OSConfigFlagContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator):
"""Provides support for user-defined config flags.
""" Users can define a comma-seperated list of key=value pairs
Responsible for adding user-defined config-flags in charm config to a in the charm configuration and apply them at any point in
template context. any file by using a template flag.
Sometimes users might want config flags inserted within a
specific section so this class allows users to specify the
template flag name, allowing for multiple template flags
(sections) within the same context.
NOTE: the value of config-flags may be a comma-separated list of NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support key=value pairs and some Openstack config files support
comma-separated lists as values. comma-separated lists as values.
""" """
def __init__(self, charm_flag='config-flags',
template_flag='user_config_flags'):
"""
:param charm_flag: config flags in charm configuration.
:param template_flag: insert point for user-defined flags in template
file.
"""
super(OSConfigFlagContext, self).__init__()
self._charm_flag = charm_flag
self._template_flag = template_flag
def __call__(self): def __call__(self):
config_flags = config('config-flags') config_flags = config(self._charm_flag)
if not config_flags: if not config_flags:
return {} return {}
flags = config_flags_parser(config_flags) return {self._template_flag:
return {'user_config_flags': flags} config_flags_parser(config_flags)}
class SubordinateConfigContext(OSContextGenerator): class SubordinateConfigContext(OSContextGenerator):
@ -819,7 +886,6 @@ class SubordinateConfigContext(OSContextGenerator):
}, },
} }
} }
""" """
def __init__(self, service, config_file, interface): def __init__(self, service, config_file, interface):
@ -849,26 +915,28 @@ class SubordinateConfigContext(OSContextGenerator):
if self.service not in sub_config: if self.service not in sub_config:
log('Found subordinate_config on %s but it contained' log('Found subordinate_config on %s but it contained'
'nothing for %s service' % (rid, self.service)) 'nothing for %s service' % (rid, self.service),
level=INFO)
continue continue
sub_config = sub_config[self.service] sub_config = sub_config[self.service]
if self.config_file not in sub_config: if self.config_file not in sub_config:
log('Found subordinate_config on %s but it contained' log('Found subordinate_config on %s but it contained'
'nothing for %s' % (rid, self.config_file)) 'nothing for %s' % (rid, self.config_file),
level=INFO)
continue continue
sub_config = sub_config[self.config_file] sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems(): for k, v in six.iteritems(sub_config):
if k == 'sections': if k == 'sections':
for section, config_dict in v.iteritems(): for section, config_dict in six.iteritems(v):
log("adding section '%s'" % (section)) log("adding section '%s'" % (section),
level=DEBUG)
ctxt[k][section] = config_dict ctxt[k][section] = config_dict
else: else:
ctxt[k] = v ctxt[k] = v
log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
return ctxt return ctxt
@ -880,15 +948,14 @@ class LogLevelContext(OSContextGenerator):
False if config('debug') is None else config('debug') False if config('debug') is None else config('debug')
ctxt['verbose'] = \ ctxt['verbose'] = \
False if config('verbose') is None else config('verbose') False if config('verbose') is None else config('verbose')
return ctxt return ctxt
class SyslogContext(OSContextGenerator): class SyslogContext(OSContextGenerator):
def __call__(self): def __call__(self):
ctxt = { ctxt = {'use_syslog': config('use-syslog')}
'use_syslog': config('use-syslog')
}
return ctxt return ctxt
@ -896,13 +963,9 @@ class BindHostContext(OSContextGenerator):
def __call__(self): def __call__(self):
if config('prefer-ipv6'): if config('prefer-ipv6'):
return { return {'bind_host': '::'}
'bind_host': '::'
}
else: else:
return { return {'bind_host': '0.0.0.0'}
'bind_host': '0.0.0.0'
}
class WorkerConfigContext(OSContextGenerator): class WorkerConfigContext(OSContextGenerator):
@ -914,11 +977,42 @@ class WorkerConfigContext(OSContextGenerator):
except ImportError: except ImportError:
apt_install('python-psutil', fatal=True) apt_install('python-psutil', fatal=True)
from psutil import NUM_CPUS from psutil import NUM_CPUS
return NUM_CPUS return NUM_CPUS
def __call__(self): def __call__(self):
multiplier = config('worker-multiplier') or 1 multiplier = config('worker-multiplier') or 0
ctxt = { ctxt = {"workers": self.num_cpus * multiplier}
"workers": self.num_cpus * multiplier return ctxt
}
class ZeroMQContext(OSContextGenerator):
interfaces = ['zeromq-configuration']
def __call__(self):
ctxt = {}
if is_relation_made('zeromq-configuration', 'host'):
for rid in relation_ids('zeromq-configuration'):
for unit in related_units(rid):
ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
ctxt['zmq_host'] = relation_get('host', unit, rid)
return ctxt
class NotificationDriverContext(OSContextGenerator):
def __init__(self, zmq_relation='zeromq-configuration',
amqp_relation='amqp'):
"""
:param zmq_relation: Name of Zeromq relation to check
"""
self.zmq_relation = zmq_relation
self.amqp_relation = amqp_relation
def __call__(self):
ctxt = {'notifications': 'False'}
if is_relation_made(self.amqp_relation):
ctxt['notifications'] = "True"
return ctxt return ctxt

View File

@ -138,10 +138,25 @@ def neutron_plugins():
relation_prefix='neutron', relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)], ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [['neutron-plugin-cisco']], 'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-cisco']],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
'neutron-plugin-cisco'], 'neutron-plugin-cisco'],
'server_services': ['neutron-server'] 'server_services': ['neutron-server']
},
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['calico-compute', 'bird', 'neutron-dhcp-agent']],
'server_packages': ['neutron-server', 'calico-control'],
'server_services': ['neutron-server']
} }
} }
if release >= 'icehouse': if release >= 'icehouse':
@ -162,7 +177,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None):
elif manager == 'neutron': elif manager == 'neutron':
plugins = neutron_plugins() plugins = neutron_plugins()
else: else:
log('Error: Network manager does not support plugins.') log("Network manager '%s' does not support plugins." % (manager),
level=ERROR)
raise Exception raise Exception
try: try:

View File

@ -35,7 +35,7 @@ listen stats {{ stat_port }}
stats auth admin:password stats auth admin:password
{% if frontends -%} {% if frontends -%}
{% for service, ports in service_ports.iteritems() -%} {% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }} frontend tcp-in_{{ service }}
bind *:{{ ports[0] }} bind *:{{ ports[0] }}
bind :::{{ ports[0] }} bind :::{{ ports[0] }}
@ -46,7 +46,7 @@ frontend tcp-in_{{ service }}
{% for frontend in frontends -%} {% for frontend in frontends -%}
backend {{ service }}_{{ frontend }} backend {{ service }}_{{ frontend }}
balance leastconn balance leastconn
{% for unit, address in frontends[frontend]['backends'].iteritems() -%} {% for unit, address in frontends[frontend]['backends'].items() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %} {% endfor %}
{% endfor -%} {% endfor -%}

View File

@ -1,13 +1,13 @@
import os import os
from charmhelpers.fetch import apt_install import six
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
ERROR, ERROR,
INFO INFO
) )
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try: try:
@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release):
order by OpenStack release. order by OpenStack release.
""" """
tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()] for rel in six.itervalues(OPENSTACK_CODENAMES)]
if not os.path.isdir(templates_dir): if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir, log('Templates directory not found @ %s.' % templates_dir,
@ -258,7 +258,7 @@ class OSConfigRenderer(object):
""" """
Write out all registered config files. Write out all registered config files.
""" """
[self.write(k) for k in self.templates.iterkeys()] [self.write(k) for k in six.iterkeys(self.templates)]
def set_release(self, openstack_release): def set_release(self, openstack_release):
""" """
@ -275,5 +275,5 @@ class OSConfigRenderer(object):
''' '''
interfaces = [] interfaces = []
[interfaces.extend(i.complete_contexts()) [interfaces.extend(i.complete_contexts())
for i in self.templates.itervalues()] for i in six.itervalues(self.templates)]
return interfaces return interfaces

View File

@ -2,6 +2,7 @@
# Common python helper functions used for OpenStack charms. # Common python helper functions used for OpenStack charms.
from collections import OrderedDict from collections import OrderedDict
from functools import wraps
import subprocess import subprocess
import json import json
@ -9,6 +10,8 @@ import os
import socket import socket
import sys import sys
import six
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
log as juju_log, log as juju_log,
@ -112,7 +115,7 @@ def get_os_codename_install_source(src):
# Best guess match based on deb string provided # Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'): if src.startswith('deb') or src.startswith('ppa'):
for k, v in OPENSTACK_CODENAMES.iteritems(): for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v in src: if v in src:
return v return v
@ -133,7 +136,7 @@ def get_os_codename_version(vers):
def get_os_version_codename(codename): def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.''' '''Determine OpenStack version number from codename.'''
for k, v in OPENSTACK_CODENAMES.iteritems(): for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v == codename: if v == codename:
return k return k
e = 'Could not derive OpenStack version for '\ e = 'Could not derive OpenStack version for '\
@ -193,7 +196,7 @@ def get_os_version_package(pkg, fatal=True):
else: else:
vers_map = OPENSTACK_CODENAMES vers_map = OPENSTACK_CODENAMES
for version, cname in vers_map.iteritems(): for version, cname in six.iteritems(vers_map):
if cname == codename: if cname == codename:
return version return version
# e = "Could not determine OpenStack version for package: %s" % pkg # e = "Could not determine OpenStack version for package: %s" % pkg
@ -317,7 +320,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
rc_script.write( rc_script.write(
"#!/bin/bash\n") "#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p)) [rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"] for u, p in six.iteritems(env_vars) if u != "script_path"]
def openstack_upgrade_available(package): def openstack_upgrade_available(package):
@ -417,7 +420,7 @@ def ns_query(address):
if isinstance(address, dns.name.Name): if isinstance(address, dns.name.Name):
rtype = 'PTR' rtype = 'PTR'
elif isinstance(address, basestring): elif isinstance(address, six.string_types):
rtype = 'A' rtype = 'A'
else: else:
return None return None
@ -468,6 +471,14 @@ def get_hostname(address, fqdn=True):
return result.split('.')[0] return result.split('.')[0]
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def sync_db_with_multi_ipv6_addresses(database, database_user, def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None): relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False) hosts = get_ipv6_addr(dynamic_only=False)
@ -477,10 +488,24 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
'hostname': json.dumps(hosts)} 'hostname': json.dumps(hosts)}
if relation_prefix: if relation_prefix:
keys = kwargs.keys() for key in list(kwargs.keys()):
for key in keys:
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
del kwargs[key] del kwargs[key]
for rid in relation_ids('shared-db'): for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs) relation_set(relation_id=rid, **kwargs)
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap

View File

@ -16,19 +16,18 @@ import time
from subprocess import ( from subprocess import (
check_call, check_call,
check_output, check_output,
CalledProcessError CalledProcessError,
) )
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
relation_get, relation_get,
relation_ids, relation_ids,
related_units, related_units,
log, log,
DEBUG,
INFO, INFO,
WARNING, WARNING,
ERROR ERROR,
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
mount, mount,
mounts, mounts,
@ -37,7 +36,6 @@ from charmhelpers.core.host import (
service_running, service_running,
umount, umount,
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
) )
@ -56,99 +54,82 @@ CEPH_CONF = """[global]
def install(): def install():
''' Basic Ceph client installation ''' """Basic Ceph client installation."""
ceph_dir = "/etc/ceph" ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir): if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir) os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True) apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img): def rbd_exists(service, pool, rbd_img):
''' Check to see if a RADOS block device exists ''' """Check to see if a RADOS block device exists."""
try: try:
out = check_output(['rbd', 'list', '--id', service, out = check_output(['rbd', 'list', '--id', service, '--pool', pool])
'--pool', pool])
except CalledProcessError: except CalledProcessError:
return False return False
else:
return rbd_img in out return rbd_img in out
def create_rbd_image(service, pool, image, sizemb): def create_rbd_image(service, pool, image, sizemb):
''' Create a new RADOS block device ''' """Create a new RADOS block device."""
cmd = [ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
'rbd', '--pool', pool]
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd) check_call(cmd)
def pool_exists(service, name): def pool_exists(service, name):
''' Check to see if a RADOS pool already exists ''' """Check to see if a RADOS pool already exists."""
try: try:
out = check_output(['rados', '--id', service, 'lspools']) out = check_output(['rados', '--id', service, 'lspools'])
except CalledProcessError: except CalledProcessError:
return False return False
else:
return name in out return name in out
def get_osds(service): def get_osds(service):
''' """Return a list of all Ceph Object Storage Daemons currently in the
Return a list of all Ceph Object Storage Daemons cluster.
currently in the cluster """
'''
version = ceph_version() version = ceph_version()
if version and version >= '0.56': if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service, return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json'])) 'osd', 'ls', '--format=json']))
else:
return None return None
def create_pool(service, name, replicas=2): def create_pool(service, name, replicas=3):
''' Create a new RADOS pool ''' """Create a new RADOS pool."""
if pool_exists(service, name): if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name), log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING) level=WARNING)
return return
# Calculate the number of placement groups based # Calculate the number of placement groups based
# on upstream recommended best practices. # on upstream recommended best practices.
osds = get_osds(service) osds = get_osds(service)
if osds: if osds:
pgnum = (len(osds) * 100 / replicas) pgnum = (len(osds) * 100 // replicas)
else: else:
# NOTE(james-page): Default to 200 for older ceph versions # NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli # which don't support OSD query from cli
pgnum = 200 pgnum = 200
cmd = [
'ceph', '--id', service, cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
'osd', 'pool', 'create',
name, str(pgnum)
]
check_call(cmd) check_call(cmd)
cmd = [
'ceph', '--id', service, cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
'osd', 'pool', 'set', name, str(replicas)]
'size', str(replicas)
]
check_call(cmd) check_call(cmd)
def delete_pool(service, name): def delete_pool(service, name):
''' Delete a RADOS pool from ceph ''' """Delete a RADOS pool from ceph."""
cmd = [ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
'ceph', '--id', service, '--yes-i-really-really-mean-it']
'osd', 'pool', 'delete',
name, '--yes-i-really-really-mean-it'
]
check_call(cmd) check_call(cmd)
@ -161,44 +142,43 @@ def _keyring_path(service):
def create_keyring(service, key): def create_keyring(service, key):
''' Create a new Ceph keyring containing key''' """Create a new Ceph keyring containing key."""
keyring = _keyring_path(service) keyring = _keyring_path(service)
if os.path.exists(keyring): if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=WARNING) log('Ceph keyring exists at %s.' % keyring, level=WARNING)
return return
cmd = [
'ceph-authtool', cmd = ['ceph-authtool', keyring, '--create-keyring',
keyring, '--name=client.{}'.format(service), '--add-key={}'.format(key)]
'--create-keyring',
'--name=client.{}'.format(service),
'--add-key={}'.format(key)
]
check_call(cmd) check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO) log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
def create_key_file(service, key): def create_key_file(service, key):
''' Create a file containing key ''' """Create a file containing key."""
keyfile = _keyfile_path(service) keyfile = _keyfile_path(service)
if os.path.exists(keyfile): if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) log('Keyfile exists at %s.' % keyfile, level=WARNING)
return return
with open(keyfile, 'w') as fd: with open(keyfile, 'w') as fd:
fd.write(key) fd.write(key)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
log('Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes(): def get_ceph_nodes():
''' Query named relation 'ceph' to detemine current nodes ''' """Query named relation 'ceph' to determine current nodes."""
hosts = [] hosts = []
for r_id in relation_ids('ceph'): for r_id in relation_ids('ceph'):
for unit in related_units(r_id): for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id)) hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts return hosts
def configure(service, key, auth, use_syslog): def configure(service, key, auth, use_syslog):
''' Perform basic configuration of Ceph ''' """Perform basic configuration of Ceph."""
create_keyring(service, key) create_keyring(service, key)
create_key_file(service, key) create_key_file(service, key)
hosts = get_ceph_nodes() hosts = get_ceph_nodes()
@ -211,17 +191,17 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name): def image_mapped(name):
''' Determine whether a RADOS block device is mapped locally ''' """Determine whether a RADOS block device is mapped locally."""
try: try:
out = check_output(['rbd', 'showmapped']) out = check_output(['rbd', 'showmapped'])
except CalledProcessError: except CalledProcessError:
return False return False
else:
return name in out return name in out
def map_block_storage(service, pool, image): def map_block_storage(service, pool, image):
''' Map a RADOS block device for local use ''' """Map a RADOS block device for local use."""
cmd = [ cmd = [
'rbd', 'rbd',
'map', 'map',
@ -235,31 +215,32 @@ def map_block_storage(service, pool, image):
def filesystem_mounted(fs): def filesystem_mounted(fs):
''' Determine whether a filesytems is already mounted ''' """Determine whether a filesytems is already mounted."""
return fs in [f for f, m in mounts()] return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10): def make_filesystem(blk_device, fstype='ext4', timeout=10):
''' Make a new filesystem on the specified block device ''' """Make a new filesystem on the specified block device."""
count = 0 count = 0
e_noent = os.errno.ENOENT e_noent = os.errno.ENOENT
while not os.path.exists(blk_device): while not os.path.exists(blk_device):
if count >= timeout: if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device, log('Gave up waiting on block device %s' % blk_device,
level=ERROR) level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device) raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO) log('Waiting for block device %s to appear' % blk_device,
level=DEBUG)
count += 1 count += 1
time.sleep(1) time.sleep(1)
else: else:
log('ceph: Formatting block device %s as filesystem %s.' % log('Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO) (blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device]) check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst): def place_data_on_block_device(blk_device, data_src_dst):
''' Migrate data in data_src_dst to blk_device and then remount ''' """Migrate data in data_src_dst to blk_device and then remount."""
# mount block device into /mnt # mount block device into /mnt
mount(blk_device, '/mnt') mount(blk_device, '/mnt')
# copy data to /mnt # copy data to /mnt
@ -279,8 +260,8 @@ def place_data_on_block_device(blk_device, data_src_dst):
# TODO: re-use # TODO: re-use
def modprobe(module): def modprobe(module):
''' Load a kernel module and configure for auto-load on reboot ''' """Load a kernel module and configure for auto-load on reboot."""
log('ceph: Loading kernel module', level=INFO) log('Loading kernel module', level=INFO)
cmd = ['modprobe', module] cmd = ['modprobe', module]
check_call(cmd) check_call(cmd)
with open('/etc/modules', 'r+') as modules: with open('/etc/modules', 'r+') as modules:
@ -289,7 +270,7 @@ def modprobe(module):
def copy_files(src, dst, symlinks=False, ignore=None): def copy_files(src, dst, symlinks=False, ignore=None):
''' Copy files from src to dst ''' """Copy files from src to dst."""
for item in os.listdir(src): for item in os.listdir(src):
s = os.path.join(src, item) s = os.path.join(src, item)
d = os.path.join(dst, item) d = os.path.join(dst, item)
@ -300,9 +281,9 @@ def copy_files(src, dst, symlinks=False, ignore=None):
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]): blk_device, fstype, system_services=[],
""" replicas=3):
NOTE: This function must only be called from a single service unit for """NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur. the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device, Ensures given pool and RBD image exists, is mapped to a block device,
@ -316,15 +297,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
""" """
# Ensure pool, RBD image, RBD mappings are in place. # Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool): if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool)) log('Creating new pool {}.'.format(pool), level=INFO)
create_pool(service, pool) create_pool(service, pool, replicas=replicas)
if not rbd_exists(service, pool, rbd_img): if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img)) log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb) create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img): if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
level=INFO)
map_block_storage(service, pool, rbd_img) map_block_storage(service, pool, rbd_img)
# make file system # make file system
@ -339,42 +321,44 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
for svc in system_services: for svc in system_services:
if service_running(svc): if service_running(svc):
log('ceph: Stopping services {} prior to migrating data.' log('Stopping services {} prior to migrating data.'
.format(svc)) .format(svc), level=DEBUG)
service_stop(svc) service_stop(svc)
place_data_on_block_device(blk_device, mount_point) place_data_on_block_device(blk_device, mount_point)
for svc in system_services: for svc in system_services:
log('ceph: Starting service {} after migrating data.' log('Starting service {} after migrating data.'
.format(svc)) .format(svc), level=DEBUG)
service_start(svc) service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None): def ensure_ceph_keyring(service, user=None, group=None):
''' """Ensures a ceph keyring is created for a named service and optionally
Ensures a ceph keyring is created for a named service ensures user and group ownership.
and optionally ensures user and group ownership.
Returns False if no ceph key is available in relation state. Returns False if no ceph key is available in relation state.
''' """
key = None key = None
for rid in relation_ids('ceph'): for rid in relation_ids('ceph'):
for unit in related_units(rid): for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit)
if key: if key:
break break
if not key: if not key:
return False return False
create_keyring(service=service, key=key) create_keyring(service=service, key=key)
keyring = _keyring_path(service) keyring = _keyring_path(service)
if user and group: if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring]) check_call(['chown', '%s.%s' % (user, group), keyring])
return True return True
def ceph_version(): def ceph_version():
''' Retrieve the local version of ceph ''' """Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'): if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v'] cmd = ['ceph', '-v']
output = check_output(cmd) output = check_output(cmd)

View File

@ -1,12 +1,12 @@
import os import os
import re import re
from subprocess import ( from subprocess import (
check_call, check_call,
check_output, check_output,
) )
import six
################################################## ##################################################
# loopback device helpers. # loopback device helpers.
@ -37,7 +37,7 @@ def create_loopback(file_path):
''' '''
file_path = os.path.abspath(file_path) file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path]) check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems(): for d, f in six.iteritems(loopback_devices()):
if f == file_path: if f == file_path:
return d return d
@ -51,7 +51,7 @@ def ensure_loopback_device(path, size):
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0) :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
''' '''
for d, f in loopback_devices().iteritems(): for d, f in six.iteritems(loopback_devices()):
if f == path: if f == path:
return d return d

View File

@ -3,10 +3,11 @@
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import io
import os import os
class Fstab(file): class Fstab(io.FileIO):
"""This class extends file in order to implement a file reader/writer """This class extends file in order to implement a file reader/writer
for file `/etc/fstab` for file `/etc/fstab`
""" """
@ -24,8 +25,8 @@ class Fstab(file):
options = "defaults" options = "defaults"
self.options = options self.options = options
self.d = d self.d = int(d)
self.p = p self.p = int(p)
def __eq__(self, o): def __eq__(self, o):
return str(self) == str(o) return str(self) == str(o)
@ -45,7 +46,7 @@ class Fstab(file):
self._path = path self._path = path
else: else:
self._path = self.DEFAULT_PATH self._path = self.DEFAULT_PATH
file.__init__(self, self._path, 'r+') super(Fstab, self).__init__(self._path, 'rb+')
def _hydrate_entry(self, line): def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any # NOTE: use split with no arguments to split on any
@ -58,8 +59,9 @@ class Fstab(file):
def entries(self): def entries(self):
self.seek(0) self.seek(0)
for line in self.readlines(): for line in self.readlines():
line = line.decode('us-ascii')
try: try:
if not line.startswith("#"): if line.strip() and not line.startswith("#"):
yield self._hydrate_entry(line) yield self._hydrate_entry(line)
except ValueError: except ValueError:
pass pass
@ -75,14 +77,14 @@ class Fstab(file):
if self.get_entry_by_attr('device', entry.device): if self.get_entry_by_attr('device', entry.device):
return False return False
self.write(str(entry) + '\n') self.write((str(entry) + '\n').encode('us-ascii'))
self.truncate() self.truncate()
return entry return entry
def remove_entry(self, entry): def remove_entry(self, entry):
self.seek(0) self.seek(0)
lines = self.readlines() lines = [l.decode('us-ascii') for l in self.readlines()]
found = False found = False
for index, line in enumerate(lines): for index, line in enumerate(lines):
@ -97,7 +99,7 @@ class Fstab(file):
lines.remove(line) lines.remove(line)
self.seek(0) self.seek(0)
self.write(''.join(lines)) self.write(''.join(lines).encode('us-ascii'))
self.truncate() self.truncate()
return True return True

View File

@ -9,9 +9,11 @@ import json
import yaml import yaml
import subprocess import subprocess
import sys import sys
import UserDict
from subprocess import CalledProcessError from subprocess import CalledProcessError
import six
from six.moves import UserDict
CRITICAL = "CRITICAL" CRITICAL = "CRITICAL"
ERROR = "ERROR" ERROR = "ERROR"
WARNING = "WARNING" WARNING = "WARNING"
@ -67,12 +69,12 @@ def log(message, level=None):
subprocess.call(command) subprocess.call(command)
class Serializable(UserDict.IterableUserDict): class Serializable(UserDict):
"""Wrapper, an object that can be serialized to yaml or json""" """Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj): def __init__(self, obj):
# wrap the object # wrap the object
UserDict.IterableUserDict.__init__(self) UserDict.__init__(self)
self.data = obj self.data = obj
def __getattr__(self, attr): def __getattr__(self, attr):
@ -214,6 +216,12 @@ class Config(dict):
except KeyError: except KeyError:
return (self._prev_dict or {})[key] return (self._prev_dict or {})[key]
def keys(self):
prev_keys = []
if self._prev_dict is not None:
prev_keys = self._prev_dict.keys()
return list(set(prev_keys + list(dict.keys(self))))
def load_previous(self, path=None): def load_previous(self, path=None):
"""Load previous copy of config from disk. """Load previous copy of config from disk.
@ -263,7 +271,7 @@ class Config(dict):
""" """
if self._prev_dict: if self._prev_dict:
for k, v in self._prev_dict.iteritems(): for k, v in six.iteritems(self._prev_dict):
if k not in self: if k not in self:
self[k] = v self[k] = v
with open(self.path, 'w') as f: with open(self.path, 'w') as f:
@ -300,7 +308,7 @@ def relation_get(attribute=None, unit=None, rid=None):
return json.loads(subprocess.check_output(_args)) return json.loads(subprocess.check_output(_args))
except ValueError: except ValueError:
return None return None
except CalledProcessError, e: except CalledProcessError as e:
if e.returncode == 2: if e.returncode == 2:
return None return None
raise raise
@ -312,7 +320,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
relation_cmd_line = ['relation-set'] relation_cmd_line = ['relation-set']
if relation_id is not None: if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id)) relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()): for k, v in (list(relation_settings.items()) + list(kwargs.items())):
if v is None: if v is None:
relation_cmd_line.append('{}='.format(k)) relation_cmd_line.append('{}='.format(k))
else: else:

View File

@ -6,19 +6,20 @@
# Matthew Wedgwood <matthew.wedgwood@canonical.com> # Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os import os
import re
import pwd import pwd
import grp import grp
import random import random
import string import string
import subprocess import subprocess
import hashlib import hashlib
import shutil
from contextlib import contextmanager from contextlib import contextmanager
from collections import OrderedDict from collections import OrderedDict
from hookenv import log import six
from fstab import Fstab
from .hookenv import log
from .fstab import Fstab
def service_start(service_name): def service_start(service_name):
@ -130,7 +131,7 @@ def symlink(source, destination):
subprocess.check_call(cmd) subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0555, force=False): def mkdir(path, owner='root', group='root', perms=0o555, force=False):
"""Create a directory""" """Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group, log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms)) perms))
@ -146,7 +147,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False):
os.chown(realpath, uid, gid) os.chown(realpath, uid, gid)
def write_file(path, content, owner='root', group='root', perms=0444): def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a string""" """Create or overwrite a file with the contents of a string"""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid uid = pwd.getpwnam(owner).pw_uid
@ -177,7 +178,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
cmd_args.extend([device, mountpoint]) cmd_args.extend([device, mountpoint])
try: try:
subprocess.check_output(cmd_args) subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False return False
@ -191,7 +192,7 @@ def umount(mountpoint, persist=False):
cmd_args = ['umount', mountpoint] cmd_args = ['umount', mountpoint]
try: try:
subprocess.check_output(cmd_args) subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output)) log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False return False
@ -218,8 +219,8 @@ def file_hash(path, hash_type='md5'):
""" """
if os.path.exists(path): if os.path.exists(path):
h = getattr(hashlib, hash_type)() h = getattr(hashlib, hash_type)()
with open(path, 'r') as source: with open(path, 'rb') as source:
h.update(source.read()) # IGNORE:E1101 - it does have update h.update(source.read())
return h.hexdigest() return h.hexdigest()
else: else:
return None return None
@ -297,7 +298,7 @@ def pwgen(length=None):
if length is None: if length is None:
length = random.choice(range(35, 45)) length = random.choice(range(35, 45))
alphanumeric_chars = [ alphanumeric_chars = [
l for l in (string.letters + string.digits) l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou'] if l not in 'l0QD1vAEIOUaeiou']
random_chars = [ random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)] random.choice(alphanumeric_chars) for _ in range(length)]
@ -306,7 +307,7 @@ def pwgen(length=None):
def list_nics(nic_type): def list_nics(nic_type):
'''Return a list of nics of given type(s)''' '''Return a list of nics of given type(s)'''
if isinstance(nic_type, basestring): if isinstance(nic_type, six.string_types):
int_types = [nic_type] int_types = [nic_type]
else: else:
int_types = nic_type int_types = nic_type
@ -317,7 +318,13 @@ def list_nics(nic_type):
ip_output = (line for line in ip_output if line) ip_output = (line for line in ip_output if line)
for line in ip_output: for line in ip_output:
if line.split()[1].startswith(int_type): if line.split()[1].startswith(int_type):
interfaces.append(line.split()[1].replace(":", "")) matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
return interfaces return interfaces

View File

@ -1,2 +1,2 @@
from .base import * from .base import * # NOQA
from .helpers import * from .helpers import * # NOQA

View File

@ -196,7 +196,7 @@ class StoredContext(dict):
if not os.path.isabs(file_name): if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name) file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'w') as file_stream: with open(file_name, 'w') as file_stream:
os.fchmod(file_stream.fileno(), 0600) os.fchmod(file_stream.fileno(), 0o600)
yaml.dump(config_data, file_stream) yaml.dump(config_data, file_stream)
def read_context(self, file_name): def read_context(self, file_name):
@ -211,15 +211,19 @@ class StoredContext(dict):
class TemplateCallback(ManagerCallback): class TemplateCallback(ManagerCallback):
""" """
Callback class that will render a Jinja2 template, for use as a ready action. Callback class that will render a Jinja2 template, for use as a ready
action.
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str source: The template source file, relative to `$CHARM_DIR/templates`
:param str target: The target to write the rendered template to :param str target: The target to write the rendered template to
:param str owner: The owner of the rendered file :param str owner: The owner of the rendered file
:param str group: The group of the rendered file :param str group: The group of the rendered file
:param int perms: The permissions of the rendered file :param int perms: The permissions of the rendered file
""" """
def __init__(self, source, target, owner='root', group='root', perms=0444): def __init__(self, source, target,
owner='root', group='root', perms=0o444):
self.source = source self.source = source
self.target = target self.target = target
self.owner = owner self.owner = owner

View File

@ -4,7 +4,8 @@ from charmhelpers.core import host
from charmhelpers.core import hookenv from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None):
""" """
Render a template. Render a template.

View File

@ -5,10 +5,6 @@ from yaml import safe_load
from charmhelpers.core.host import ( from charmhelpers.core.host import (
lsb_release lsb_release
) )
from urlparse import (
urlparse,
urlunparse,
)
import subprocess import subprocess
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
@ -16,6 +12,9 @@ from charmhelpers.core.hookenv import (
) )
import os import os
import six
from six.moves.urllib.parse import urlparse, urlunparse
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
@ -69,10 +68,16 @@ CLOUD_ARCHIVE_POCKETS = {
# The order of this list is very important. Handlers should be listed in from # The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching. # least- to most-specific URL matching.
FETCH_HANDLERS = ( if six.PY2:
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', FETCH_HANDLERS = (
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
) 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
)
else:
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
)
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
@ -148,7 +153,7 @@ def apt_install(packages, options=None, fatal=False):
cmd = ['apt-get', '--assume-yes'] cmd = ['apt-get', '--assume-yes']
cmd.extend(options) cmd.extend(options)
cmd.append('install') cmd.append('install')
if isinstance(packages, basestring): if isinstance(packages, six.string_types):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -181,7 +186,7 @@ def apt_update(fatal=False):
def apt_purge(packages, fatal=False): def apt_purge(packages, fatal=False):
"""Purge one or more packages""" """Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge'] cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring): if isinstance(packages, six.string_types):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -192,7 +197,7 @@ def apt_purge(packages, fatal=False):
def apt_hold(packages, fatal=False): def apt_hold(packages, fatal=False):
"""Hold one or more packages""" """Hold one or more packages"""
cmd = ['apt-mark', 'hold'] cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring): if isinstance(packages, six.string_types):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -218,6 +223,7 @@ def add_source(source, key=None):
pocket for the release. pocket for the release.
'cloud:' may be used to activate official cloud archive pockets, 'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse' such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used @param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an to verify the signatures on packages. Ideally, this should be an
@ -251,12 +257,14 @@ def add_source(source, key=None):
release = lsb_release()['DISTRIB_CODENAME'] release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release)) apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else: else:
raise SourceConfigError("Unknown source: {!r}".format(source)) log("Unknown source: {!r}".format(source))
if key: if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile() as key_file: with NamedTemporaryFile('w+') as key_file:
key_file.write(key) key_file.write(key)
key_file.flush() key_file.flush()
key_file.seek(0) key_file.seek(0)
@ -293,14 +301,14 @@ def configure_sources(update=False,
sources = safe_load((config(sources_var) or '').strip()) or [] sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, basestring): if isinstance(sources, six.string_types):
sources = [sources] sources = [sources]
if keys is None: if keys is None:
for source in sources: for source in sources:
add_source(source, None) add_source(source, None)
else: else:
if isinstance(keys, basestring): if isinstance(keys, six.string_types):
keys = [keys] keys = [keys]
if len(sources) != len(keys): if len(sources) != len(keys):
@ -397,7 +405,7 @@ def _run_apt_command(cmd, fatal=False):
while result is None or result == APT_NO_LOCK: while result is None or result == APT_NO_LOCK:
try: try:
result = subprocess.check_call(cmd, env=env) result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError as e:
retry_count = retry_count + 1 retry_count = retry_count + 1
if retry_count > APT_NO_LOCK_RETRY_COUNT: if retry_count > APT_NO_LOCK_RETRY_COUNT:
raise raise

View File

@ -1,8 +1,14 @@
import os import os
import urllib2
from urllib import urlretrieve
import urlparse
import hashlib import hashlib
import re
import six
from six.moves.urllib.request import (
build_opener, install_opener, urlopen, urlretrieve,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
)
from six.moves.urllib.parse import urlparse, urlunparse, parse_qs
from six.moves.urllib.error import URLError
from charmhelpers.fetch import ( from charmhelpers.fetch import (
BaseFetchHandler, BaseFetchHandler,
@ -15,6 +21,24 @@ from charmhelpers.payload.archive import (
from charmhelpers.core.host import mkdir, check_hash from charmhelpers.core.host import mkdir, check_hash
def splituser(host):
'''urllib.splituser(), but six's support of this seems broken'''
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match:
return match.group(1, 2)
return None, host
def splitpasswd(user):
'''urllib.splitpasswd(), but six's support of this is missing'''
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
match = _passwdprog.match(user)
if match:
return match.group(1, 2)
return user, None
class ArchiveUrlFetchHandler(BaseFetchHandler): class ArchiveUrlFetchHandler(BaseFetchHandler):
""" """
Handler to download archive files from arbitrary URLs. Handler to download archive files from arbitrary URLs.
@ -42,20 +66,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
""" """
# propogate all exceptions # propogate all exceptions
# URLError, OSError, etc # URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source) proto, netloc, path, params, query, fragment = urlparse(source)
if proto in ('http', 'https'): if proto in ('http', 'https'):
auth, barehost = urllib2.splituser(netloc) auth, barehost = splituser(netloc)
if auth is not None: if auth is not None:
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) source = urlunparse((proto, barehost, path, params, query, fragment))
username, password = urllib2.splitpasswd(auth) username, password = splitpasswd(auth)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm() passman = HTTPPasswordMgrWithDefaultRealm()
# Realm is set to None in add_password to force the username and password # Realm is set to None in add_password to force the username and password
# to be used whatever the realm # to be used whatever the realm
passman.add_password(None, source, username, password) passman.add_password(None, source, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman) authhandler = HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler) opener = build_opener(authhandler)
urllib2.install_opener(opener) install_opener(opener)
response = urllib2.urlopen(source) response = urlopen(source)
try: try:
with open(dest, 'w') as dest_file: with open(dest, 'w') as dest_file:
dest_file.write(response.read()) dest_file.write(response.read())
@ -91,17 +115,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
url_parts = self.parse_url(source) url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir): if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755) mkdir(dest_dir, perms=0o755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
try: try:
self.download(source, dld_file) self.download(source, dld_file)
except urllib2.URLError as e: except URLError as e:
raise UnhandledSource(e.reason) raise UnhandledSource(e.reason)
except OSError as e: except OSError as e:
raise UnhandledSource(e.strerror) raise UnhandledSource(e.strerror)
options = urlparse.parse_qs(url_parts.fragment) options = parse_qs(url_parts.fragment)
for key, value in options.items(): for key, value in options.items():
if key in hashlib.algorithms: if six.PY2:
algorithms = hashlib.algorithms
else:
algorithms = hashlib.algorithms_available
if key in algorithms:
check_hash(dld_file, value, key) check_hash(dld_file, value, key)
if checksum: if checksum:
check_hash(dld_file, checksum, hash_type) check_hash(dld_file, checksum, hash_type)

View File

@ -5,6 +5,10 @@ from charmhelpers.fetch import (
) )
from charmhelpers.core.host import mkdir from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('bzrlib does not support Python3')
try: try:
from bzrlib.branch import Branch from bzrlib.branch import Branch
except ImportError: except ImportError:
@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name) branch_name)
if not os.path.exists(dest_dir): if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755) mkdir(dest_dir, perms=0o755)
try: try:
self.branch(source, dest_dir) self.branch(source, dest_dir)
except OSError as e: except OSError as e:

View File

@ -64,8 +64,10 @@ class HAProxyContext(OSContextGenerator):
Also used to extend cinder.conf context with correct api_listening_port Also used to extend cinder.conf context with correct api_listening_port
''' '''
haproxy_port = config('api-listening-port') haproxy_port = config('api-listening-port')
api_port = determine_api_port(config('api-listening-port')) api_port = determine_api_port(config('api-listening-port'),
apache_port = determine_apache_port(config('api-listening-port')) singlenode_mode=True)
apache_port = determine_apache_port(config('api-listening-port'),
singlenode_mode=True)
ctxt = { ctxt = {
'service_ports': {'cinder_api': [haproxy_port, apache_port]}, 'service_ports': {'cinder_api': [haproxy_port, apache_port]},

View File

@ -145,7 +145,7 @@ CONFIG_FILES = OrderedDict([
'services': ['cinder-volume'] 'services': ['cinder-volume']
}), }),
(HAPROXY_CONF, { (HAPROXY_CONF, {
'hook_contexts': [context.HAProxyContext(), 'hook_contexts': [context.HAProxyContext(singlenode_mode=True),
cinder_contexts.HAProxyContext()], cinder_contexts.HAProxyContext()],
'services': ['haproxy'], 'services': ['haproxy'],
}), }),

View File

@ -1,6 +1,6 @@
import amulet import amulet
import os import os
import six
class AmuletDeployment(object): class AmuletDeployment(object):
@ -52,12 +52,12 @@ class AmuletDeployment(object):
def _add_relations(self, relations): def _add_relations(self, relations):
"""Add all of the relations for the services.""" """Add all of the relations for the services."""
for k, v in relations.iteritems(): for k, v in six.iteritems(relations):
self.d.relate(k, v) self.d.relate(k, v)
def _configure_services(self, configs): def _configure_services(self, configs):
"""Configure all of the services.""" """Configure all of the services."""
for service, config in configs.iteritems(): for service, config in six.iteritems(configs):
self.d.configure(service, config) self.d.configure(service, config)
def _deploy(self): def _deploy(self):

View File

@ -5,6 +5,8 @@ import re
import sys import sys
import time import time
import six
class AmuletUtils(object): class AmuletUtils(object):
"""Amulet utilities. """Amulet utilities.
@ -58,7 +60,7 @@ class AmuletUtils(object):
Verify the specified services are running on the corresponding Verify the specified services are running on the corresponding
service units. service units.
""" """
for k, v in commands.iteritems(): for k, v in six.iteritems(commands):
for cmd in v: for cmd in v:
output, code = k.run(cmd) output, code = k.run(cmd)
if code != 0: if code != 0:
@ -100,11 +102,11 @@ class AmuletUtils(object):
longs, or can be a function that evaluate a variable and returns a longs, or can be a function that evaluate a variable and returns a
bool. bool.
""" """
for k, v in expected.iteritems(): for k, v in six.iteritems(expected):
if k in actual: if k in actual:
if (isinstance(v, basestring) or if (isinstance(v, six.string_types) or
isinstance(v, bool) or isinstance(v, bool) or
isinstance(v, (int, long))): isinstance(v, six.integer_types)):
if v != actual[k]: if v != actual[k]:
return "{}:{}".format(k, actual[k]) return "{}:{}".format(k, actual[k])
elif not v(actual[k]): elif not v(actual[k]):

View File

@ -1,3 +1,4 @@
import six
from charmhelpers.contrib.amulet.deployment import ( from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment AmuletDeployment
) )
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
def _configure_services(self, configs): def _configure_services(self, configs):
"""Configure all of the services.""" """Configure all of the services."""
for service, config in configs.iteritems(): for service, config in six.iteritems(configs):
self.d.configure(service, config) self.d.configure(service, config)
def _get_openstack_release(self): def _get_openstack_release(self):

View File

@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import ( from charmhelpers.contrib.amulet.utils import (
AmuletUtils AmuletUtils
) )
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
expected service catalog endpoints. expected service catalog endpoints.
""" """
self.log.debug('actual: {}'.format(repr(actual))) self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems(): for k, v in six.iteritems(expected):
if k in actual: if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0]) ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret: if ret: