Resync new helper, fixup unit test

This commit is contained in:
James Page 2014-11-16 08:36:33 -06:00
parent 0fb0cbd29e
commit d3ce3b37f2
13 changed files with 258 additions and 62 deletions

View File

@ -1,4 +1,4 @@
branch: lp:charm-helpers branch: lp:~james-page/charm-helpers/lp.1391784
destination: hooks/charmhelpers destination: hooks/charmhelpers
include: include:
- core - core

View File

@ -8,7 +8,6 @@ from functools import partial
from charmhelpers.core.hookenv import unit_get from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
WARNING,
ERROR, ERROR,
log log
) )
@ -175,7 +174,6 @@ def format_ipv6_addr(address):
if is_ipv6(address): if is_ipv6(address):
address = "[%s]" % address address = "[%s]" % address
else: else:
log("Not a valid ipv6 address: %s" % address, level=WARNING)
address = None address = None
return address return address

View File

@ -15,6 +15,7 @@ from charmhelpers.fetch import (
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
is_relation_made,
local_unit, local_unit,
log, log,
relation_get, relation_get,
@ -24,7 +25,7 @@ from charmhelpers.core.hookenv import (
unit_get, unit_get,
unit_private_ip, unit_private_ip,
ERROR, ERROR,
INFO DEBUG
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
@ -57,8 +58,9 @@ from charmhelpers.contrib.network.ip import (
is_address_in_network is_address_in_network
) )
from charmhelpers.contrib.openstack.utils import get_host_ip from charmhelpers.contrib.openstack.utils import (
get_host_ip,
)
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -456,9 +458,9 @@ class HAProxyContext(OSContextGenerator):
if _laddr: if _laddr:
cluster_hosts[laddr]['backends'][_unit] = _laddr cluster_hosts[laddr]['backends'][_unit] = _laddr
# NOTE(jamespage) no split configurations found, just use # NOTE(jamespage) add backend based on private address - this
# private addresses # with either be the only backend or the fallback if no acls
if not cluster_hosts: # match in the frontend
cluster_hosts[addr] = {} cluster_hosts[addr] = {}
cluster_hosts[addr]['network'] = "{}/{}".format( cluster_hosts[addr]['network'] = "{}/{}".format(
addr, addr,
@ -476,6 +478,7 @@ class HAProxyContext(OSContextGenerator):
ctxt = { ctxt = {
'frontends': cluster_hosts, 'frontends': cluster_hosts,
'default_backend': addr
} }
if config('haproxy-server-timeout'): if config('haproxy-server-timeout'):
@ -584,6 +587,49 @@ class ApacheSSLContext(OSContextGenerator):
cns.append(k.lstrip('ssl_key_')) cns.append(k.lstrip('ssl_key_'))
return list(set(cns)) return list(set(cns))
def get_network_addresses(self):
"""For each network configured, return corresponding address and vip
(if available).
Returns a list of tuples of the form:
[(address_in_net_a, vip_in_net_a),
(address_in_net_b, vip_in_net_b),
...]
or, if no vip(s) available:
[(address_in_net_a, address_in_net_a),
(address_in_net_b, address_in_net_b),
...]
"""
addresses = []
vips = []
if config('vip'):
vips = config('vip').split()
for net_type in ['os-internal-network', 'os-admin-network',
'os-public-network']:
addr = get_address_in_network(config(net_type),
unit_get('private-address'))
if len(vips) > 1 and is_clustered():
if not config(net_type):
log("Multiple networks configured but net_type "
"is None (%s)." % net_type, level='WARNING')
continue
for vip in vips:
if is_address_in_network(config(net_type), vip):
addresses.append((addr, vip))
break
elif is_clustered() and config('vip'):
addresses.append((addr, config('vip')))
else:
addresses.append((addr, addr))
return addresses
def __call__(self): def __call__(self):
if isinstance(self.external_ports, basestring): if isinstance(self.external_ports, basestring):
self.external_ports = [self.external_ports] self.external_ports = [self.external_ports]
@ -602,27 +648,7 @@ class ApacheSSLContext(OSContextGenerator):
for cn in self.canonical_names(): for cn in self.canonical_names():
self.configure_cert(cn) self.configure_cert(cn)
addresses = [] addresses = self.get_network_addresses()
vips = []
if config('vip'):
vips = config('vip').split()
for network_type in ['os-internal-network',
'os-admin-network',
'os-public-network']:
address = get_address_in_network(config(network_type),
unit_get('private-address'))
if len(vips) > 0 and is_clustered():
for vip in vips:
if is_address_in_network(config(network_type),
vip):
addresses.append((address, vip))
break
elif is_clustered():
addresses.append((address, config('vip')))
else:
addresses.append((address, address))
for address, endpoint in set(addresses): for address, endpoint in set(addresses):
for api_port in self.external_ports: for api_port in self.external_ports:
ext_port = determine_apache_port(api_port) ext_port = determine_apache_port(api_port)
@ -700,6 +726,7 @@ class NeutronContext(OSContextGenerator):
self.network_manager) self.network_manager)
n1kv_config = neutron_plugin_attribute(self.plugin, 'config', n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager) self.network_manager)
n1kv_user_config_flags = config('n1kv-config-flags')
n1kv_ctxt = { n1kv_ctxt = {
'core_plugin': driver, 'core_plugin': driver,
'neutron_plugin': 'n1kv', 'neutron_plugin': 'n1kv',
@ -710,11 +737,29 @@ class NeutronContext(OSContextGenerator):
'vsm_username': config('n1kv-vsm-username'), 'vsm_username': config('n1kv-vsm-username'),
'vsm_password': config('n1kv-vsm-password'), 'vsm_password': config('n1kv-vsm-password'),
'restrict_policy_profiles': config( 'restrict_policy_profiles': config(
'n1kv_restrict_policy_profiles'), 'n1kv-restrict-policy-profiles'),
} }
if n1kv_user_config_flags:
flags = config_flags_parser(n1kv_user_config_flags)
n1kv_ctxt['user_config_flags'] = flags
return n1kv_ctxt return n1kv_ctxt
def calico_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
calico_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'Calico',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return calico_ctxt
def neutron_ctxt(self): def neutron_ctxt(self):
if https(): if https():
proto = 'https' proto = 'https'
@ -748,6 +793,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.nvp_ctxt()) ctxt.update(self.nvp_ctxt())
elif self.plugin == 'n1kv': elif self.plugin == 'n1kv':
ctxt.update(self.n1kv_ctxt()) ctxt.update(self.n1kv_ctxt())
elif self.plugin == 'Calico':
ctxt.update(self.calico_ctxt())
alchemy_flags = config('neutron-alchemy-flags') alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags: if alchemy_flags:
@ -761,21 +808,39 @@ class NeutronContext(OSContextGenerator):
class OSConfigFlagContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator):
""" """
Responsible for adding user-defined config-flags in charm config to a Provides support for user-defined config flags.
template context.
Users can define a comma-seperated list of key=value pairs
in the charm configuration and apply them at any point in
any file by using a template flag.
Sometimes users might want config flags inserted within a
specific section so this class allows users to specify the
template flag name, allowing for multiple template flags
(sections) within the same context.
NOTE: the value of config-flags may be a comma-separated list of NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support key=value pairs and some Openstack config files support
comma-separated lists as values. comma-separated lists as values.
""" """
def __init__(self, charm_flag='config-flags',
template_flag='user_config_flags'):
"""
charm_flag: config flags in charm configuration.
template_flag: insert point for user-defined flags template file.
"""
super(OSConfigFlagContext, self).__init__()
self._charm_flag = charm_flag
self._template_flag = template_flag
def __call__(self): def __call__(self):
config_flags = config('config-flags') config_flags = config(self._charm_flag)
if not config_flags: if not config_flags:
return {} return {}
flags = config_flags_parser(config_flags) return {self._template_flag:
return {'user_config_flags': flags} config_flags_parser(config_flags)}
class SubordinateConfigContext(OSContextGenerator): class SubordinateConfigContext(OSContextGenerator):
@ -867,7 +932,7 @@ class SubordinateConfigContext(OSContextGenerator):
else: else:
ctxt[k] = v ctxt[k] = v
log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
return ctxt return ctxt
@ -922,3 +987,34 @@ class WorkerConfigContext(OSContextGenerator):
"workers": self.num_cpus * multiplier "workers": self.num_cpus * multiplier
} }
return ctxt return ctxt
class ZeroMQContext(OSContextGenerator):
interfaces = ['zeromq-configuration']
def __call__(self):
ctxt = {}
if is_relation_made('zeromq-configuration', 'host'):
for rid in relation_ids('zeromq-configuration'):
for unit in related_units(rid):
ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
ctxt['zmq_host'] = relation_get('host', unit, rid)
return ctxt
class NotificationDriverContext(OSContextGenerator):
def __init__(self, zmq_relation='zeromq-configuration', amqp_relation='amqp'):
"""
:param zmq_relation : Name of Zeromq relation to check
"""
self.zmq_relation = zmq_relation
self.amqp_relation = amqp_relation
def __call__(self):
ctxt = {
'notifications': 'False',
}
if is_relation_made(self.amqp_relation):
ctxt['notifications'] = "True"
return ctxt

View File

@ -138,10 +138,25 @@ def neutron_plugins():
relation_prefix='neutron', relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)], ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [['neutron-plugin-cisco']], 'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-cisco']],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
'neutron-plugin-cisco'], 'neutron-plugin-cisco'],
'server_services': ['neutron-server'] 'server_services': ['neutron-server']
},
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['calico-compute', 'bird', 'neutron-dhcp-agent']],
'server_packages': ['neutron-server', 'calico-control'],
'server_services': ['neutron-server']
} }
} }
if release >= 'icehouse': if release >= 'icehouse':

View File

@ -42,7 +42,8 @@ frontend tcp-in_{{ service }}
{% for frontend in frontends -%} {% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
{% endfor %} {% endfor -%}
default_backend {{ service }}_{{ default_backend }}
{% for frontend in frontends -%} {% for frontend in frontends -%}
backend {{ service }}_{{ frontend }} backend {{ service }}_{{ frontend }}
balance leastconn balance leastconn

View File

@ -2,6 +2,7 @@
# Common python helper functions used for OpenStack charms. # Common python helper functions used for OpenStack charms.
from collections import OrderedDict from collections import OrderedDict
from functools import wraps
import subprocess import subprocess
import json import json
@ -468,6 +469,14 @@ def get_hostname(address, fqdn=True):
return result.split('.')[0] return result.split('.')[0]
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def sync_db_with_multi_ipv6_addresses(database, database_user, def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None): relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False) hosts = get_ipv6_addr(dynamic_only=False)
@ -484,3 +493,18 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
for rid in relation_ids('shared-db'): for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs) relation_set(relation_id=rid, **kwargs)
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap

View File

@ -113,7 +113,7 @@ def get_osds(service):
return None return None
def create_pool(service, name, replicas=2): def create_pool(service, name, replicas=3):
''' Create a new RADOS pool ''' ''' Create a new RADOS pool '''
if pool_exists(service, name): if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name), log("Ceph pool {} already exists, skipping creation".format(name),
@ -300,7 +300,8 @@ def copy_files(src, dst, symlinks=False, ignore=None):
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]): blk_device, fstype, system_services=[],
replicas=3):
""" """
NOTE: This function must only be called from a single service unit for NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur. the same rbd_img otherwise data loss will occur.
@ -317,7 +318,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
# Ensure pool, RBD image, RBD mappings are in place. # Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool): if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool)) log('ceph: Creating new pool {}.'.format(pool))
create_pool(service, pool) create_pool(service, pool, replicas=replicas)
if not rbd_exists(service, pool, rbd_img): if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img)) log('ceph: Creating RBD image ({}).'.format(rbd_img))

View File

@ -214,6 +214,12 @@ class Config(dict):
except KeyError: except KeyError:
return (self._prev_dict or {})[key] return (self._prev_dict or {})[key]
def keys(self):
prev_keys = []
if self._prev_dict is not None:
prev_keys = self._prev_dict.keys()
return list(set(prev_keys + dict.keys(self)))
def load_previous(self, path=None): def load_previous(self, path=None):
"""Load previous copy of config from disk. """Load previous copy of config from disk.

View File

@ -6,13 +6,13 @@
# Matthew Wedgwood <matthew.wedgwood@canonical.com> # Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os import os
import re
import pwd import pwd
import grp import grp
import random import random
import string import string
import subprocess import subprocess
import hashlib import hashlib
import shutil
from contextlib import contextmanager from contextlib import contextmanager
from collections import OrderedDict from collections import OrderedDict
@ -317,7 +317,13 @@ def list_nics(nic_type):
ip_output = (line for line in ip_output if line) ip_output = (line for line in ip_output if line)
for line in ip_output: for line in ip_output:
if line.split()[1].startswith(int_type): if line.split()[1].startswith(int_type):
interfaces.append(line.split()[1].replace(":", "")) matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
return interfaces return interfaces

View File

@ -1,2 +1,2 @@
from .base import * from .base import * # NOQA
from .helpers import * from .helpers import * # NOQA

View File

@ -72,6 +72,7 @@ CLOUD_ARCHIVE_POCKETS = {
FETCH_HANDLERS = ( FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
) )
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
@ -218,6 +219,7 @@ def add_source(source, key=None):
pocket for the release. pocket for the release.
'cloud:' may be used to activate official cloud archive pockets, 'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse' such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used @param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an to verify the signatures on packages. Ideally, this should be an
@ -251,8 +253,10 @@ def add_source(source, key=None):
release = lsb_release()['DISTRIB_CODENAME'] release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release)) apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else: else:
raise SourceConfigError("Unknown source: {!r}".format(source)) log("Unknown source: {!r}".format(source))
if key: if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:

View File

@ -0,0 +1,44 @@
import os
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.core.host import mkdir
try:
from git import Repo
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-git")
from git import Repo
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
#TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git'):
return False
else:
return True
def clone(self, source, dest, branch):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
repo = Repo.clone_from(source, dest)
repo.git.checkout(branch)
def install(self, source, branch="master"):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try:
self.clone(source, dest_dir, branch)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -88,6 +88,7 @@ class TestKeystoneContexts(CharmTestCase):
'keystone': '1.2.3.4', 'keystone': '1.2.3.4',
'unit-0': '10.0.0.0' 'unit-0': '10.0.0.0'
} }
}} }},
'default_backend': '1.2.3.4'
} }
) )