Merge upstream and sync charm-helpers
This commit is contained in:
commit
18427da1bb
10
config.yaml
10
config.yaml
@ -10,9 +10,12 @@ options:
|
||||
nvp|nsx - Nicira NVP/VMware NSX
|
||||
ext-port:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
External port to use for routing of instance
|
||||
traffic to the external public network.
|
||||
A space-separated list of external ports to use for routing of instance
|
||||
traffic to the external public network. Valid values are either MAC
|
||||
addresses (in which case only MAC addresses for interfaces without an IP
|
||||
address already assigned will be used), or interfaces (eth0)
|
||||
openstack-origin:
|
||||
type: string
|
||||
default: distro
|
||||
@ -38,6 +41,7 @@ options:
|
||||
Use leader and none when configuring multiple floating pools
|
||||
external-network-id:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Optional configuration to set the external-network-id. Only needed when
|
||||
configuring multiple external networks and should be used in conjunction
|
||||
@ -73,6 +77,7 @@ options:
|
||||
If set to True, supporting services will log to syslog.
|
||||
instance-mtu:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Configure DHCP services to provide MTU configuration to instances
|
||||
within the cloud. This is useful in deployments where its not
|
||||
@ -90,6 +95,7 @@ options:
|
||||
# by default all access is over 'private-address'
|
||||
os-data-network:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
The IP address and netmask of the OpenStack Data network (e.g.,
|
||||
192.168.0.0/24)
|
||||
|
@ -1,10 +1,11 @@
|
||||
import glob
|
||||
import sys
|
||||
|
||||
from functools import partial
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.core.hookenv import (
|
||||
ERROR, log, config,
|
||||
ERROR, log,
|
||||
)
|
||||
|
||||
try:
|
||||
@ -156,19 +157,102 @@ get_iface_for_address = partial(_get_for_address, key='iface')
|
||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||
|
||||
|
||||
def get_ipv6_addr(iface="eth0"):
|
||||
def format_ipv6_addr(address):
|
||||
"""
|
||||
IPv6 needs to be wrapped with [] in url link to parse correctly.
|
||||
"""
|
||||
if is_ipv6(address):
|
||||
address = "[%s]" % address
|
||||
else:
|
||||
log("Not an valid ipv6 address: %s" % address,
|
||||
level=ERROR)
|
||||
address = None
|
||||
return address
|
||||
|
||||
|
||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None):
|
||||
"""
|
||||
Return the assigned IP address for a given interface, if any, or [].
|
||||
"""
|
||||
# Extract nic if passed /dev/ethX
|
||||
if '/' in iface:
|
||||
iface = iface.split('/')[-1]
|
||||
if not exc_list:
|
||||
exc_list = []
|
||||
try:
|
||||
iface_addrs = netifaces.ifaddresses(iface)
|
||||
if netifaces.AF_INET6 not in iface_addrs:
|
||||
raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
|
||||
inet_num = getattr(netifaces, inet_type)
|
||||
except AttributeError:
|
||||
raise Exception('Unknown inet type ' + str(inet_type))
|
||||
|
||||
addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
|
||||
ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
|
||||
and config('vip') != a['addr']]
|
||||
if not ipv6_addr:
|
||||
raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
|
||||
interfaces = netifaces.interfaces()
|
||||
if inc_aliases:
|
||||
ifaces = []
|
||||
for _iface in interfaces:
|
||||
if iface == _iface or _iface.split(':')[0] == iface:
|
||||
ifaces.append(_iface)
|
||||
if fatal and not ifaces:
|
||||
raise Exception("Invalid interface '%s'" % iface)
|
||||
ifaces.sort()
|
||||
else:
|
||||
if iface not in interfaces:
|
||||
if fatal:
|
||||
raise Exception("%s not found " % (iface))
|
||||
else:
|
||||
return []
|
||||
else:
|
||||
ifaces = [iface]
|
||||
|
||||
return ipv6_addr[0]
|
||||
addresses = []
|
||||
for netiface in ifaces:
|
||||
net_info = netifaces.ifaddresses(netiface)
|
||||
if inet_num in net_info:
|
||||
for entry in net_info[inet_num]:
|
||||
if 'addr' in entry and entry['addr'] not in exc_list:
|
||||
addresses.append(entry['addr'])
|
||||
if fatal and not addresses:
|
||||
raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type))
|
||||
return addresses
|
||||
|
||||
except ValueError:
|
||||
raise ValueError("Invalid interface '%s'" % iface)
|
||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
||||
|
||||
|
||||
def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None):
|
||||
"""
|
||||
Return the assigned IPv6 address for a given interface, if any, or [].
|
||||
"""
|
||||
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
|
||||
inc_aliases=inc_aliases, fatal=fatal,
|
||||
exc_list=exc_list)
|
||||
remotly_addressable = []
|
||||
for address in addresses:
|
||||
if not address.startswith('fe80'):
|
||||
remotly_addressable.append(address)
|
||||
if fatal and not remotly_addressable:
|
||||
raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
|
||||
return remotly_addressable
|
||||
|
||||
|
||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
||||
"""
|
||||
Return a list of bridges on the system or []
|
||||
"""
|
||||
b_rgex = vnic_dir + '/*/bridge'
|
||||
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
|
||||
|
||||
|
||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
||||
"""
|
||||
Return a list of nics comprising a given bridge on the system or []
|
||||
"""
|
||||
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
||||
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
|
||||
|
||||
|
||||
def is_bridge_member(nic):
|
||||
"""
|
||||
Check if a given nic is a member of a bridge
|
||||
"""
|
||||
for bridge in get_bridges():
|
||||
if nic in get_bridge_nics(bridge):
|
||||
return True
|
||||
return False
|
||||
|
@ -1,3 +1,6 @@
|
||||
from bzrlib.branch import Branch
|
||||
import os
|
||||
import re
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
@ -16,11 +19,41 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
self.openstack = openstack
|
||||
self.source = source
|
||||
|
||||
def _is_dev_branch(self):
|
||||
"""Determine if branch being tested is a dev (i.e. next) branch."""
|
||||
branch = Branch.open(os.getcwd())
|
||||
parent = branch.get_parent()
|
||||
pattern = re.compile("^.*/next/$")
|
||||
if (pattern.match(parent)):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _determine_branch_locations(self, other_services):
|
||||
"""Determine the branch locations for the other services.
|
||||
|
||||
If the branch being tested is a dev branch, then determine the
|
||||
development branch locations for the other services. Otherwise,
|
||||
the default charm store branches will be used."""
|
||||
name = 0
|
||||
if self._is_dev_branch():
|
||||
updated_services = []
|
||||
for svc in other_services:
|
||||
if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
|
||||
location = 'lp:charms/{}'.format(svc[name])
|
||||
else:
|
||||
temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
|
||||
location = temp.format(svc[name])
|
||||
updated_services.append(svc + (location,))
|
||||
other_services = updated_services
|
||||
return other_services
|
||||
|
||||
def _add_services(self, this_service, other_services):
|
||||
"""Add services to the deployment and set openstack-origin."""
|
||||
"""Add services to the deployment and set openstack-origin/source."""
|
||||
name = 0
|
||||
other_services = self._determine_branch_locations(other_services)
|
||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||
other_services)
|
||||
name = 0
|
||||
services = other_services
|
||||
services.append(this_service)
|
||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
|
||||
|
@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
|
||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
||||
version = f.read().strip()
|
||||
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
|
||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
||||
local_path = os.path.join('tests', cirros_img)
|
||||
|
||||
if not os.path.exists(cirros_img):
|
||||
if not os.path.exists(local_path):
|
||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
||||
version, cirros_img)
|
||||
opener.retrieve(cirros_url, cirros_img)
|
||||
opener.retrieve(cirros_url, local_path)
|
||||
f.close()
|
||||
|
||||
with open(cirros_img) as f:
|
||||
with open(local_path) as f:
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare', data=f)
|
||||
|
@ -421,6 +421,11 @@ class HAProxyContext(OSContextGenerator):
|
||||
'units': cluster_hosts,
|
||||
}
|
||||
|
||||
if config('haproxy-server-timeout'):
|
||||
ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout')
|
||||
if config('haproxy-client-timeout'):
|
||||
ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout')
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
ctxt['local_host'] = 'ip6-localhost'
|
||||
ctxt['haproxy_host'] = '::'
|
||||
|
@ -70,6 +70,7 @@ SWIFT_CODENAMES = OrderedDict([
|
||||
('1.13.0', 'icehouse'),
|
||||
('1.12.0', 'icehouse'),
|
||||
('1.11.0', 'icehouse'),
|
||||
('2.0.0', 'juno'),
|
||||
])
|
||||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
|
@ -156,12 +156,15 @@ def hook_name():
|
||||
|
||||
|
||||
class Config(dict):
|
||||
"""A Juju charm config dictionary that can write itself to
|
||||
disk (as json) and track which values have changed since
|
||||
the previous hook invocation.
|
||||
"""A dictionary representation of the charm's config.yaml, with some
|
||||
extra features:
|
||||
|
||||
Do not instantiate this object directly - instead call
|
||||
``hookenv.config()``
|
||||
- See which values in the dictionary have changed since the previous hook.
|
||||
- For values that have changed, see what the previous value was.
|
||||
- Store arbitrary data for use in a later hook.
|
||||
|
||||
NOTE: Do not instantiate this object directly - instead call
|
||||
``hookenv.config()``, which will return an instance of :class:`Config`.
|
||||
|
||||
Example usage::
|
||||
|
||||
@ -170,8 +173,8 @@ class Config(dict):
|
||||
>>> config = hookenv.config()
|
||||
>>> config['foo']
|
||||
'bar'
|
||||
>>> # store a new key/value for later use
|
||||
>>> config['mykey'] = 'myval'
|
||||
>>> config.save()
|
||||
|
||||
|
||||
>>> # user runs `juju set mycharm foo=baz`
|
||||
@ -188,22 +191,34 @@ class Config(dict):
|
||||
>>> # keys/values that we add are preserved across hooks
|
||||
>>> config['mykey']
|
||||
'myval'
|
||||
>>> # don't forget to save at the end of hook!
|
||||
>>> config.save()
|
||||
|
||||
"""
|
||||
CONFIG_FILE_NAME = '.juju-persistent-config'
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
super(Config, self).__init__(*args, **kw)
|
||||
self.implicit_save = True
|
||||
self._prev_dict = None
|
||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||
if os.path.exists(self.path):
|
||||
self.load_previous()
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""For regular dict lookups, check the current juju config first,
|
||||
then the previous (saved) copy. This ensures that user-saved values
|
||||
will be returned by a dict lookup.
|
||||
|
||||
"""
|
||||
try:
|
||||
return dict.__getitem__(self, key)
|
||||
except KeyError:
|
||||
return (self._prev_dict or {})[key]
|
||||
|
||||
def load_previous(self, path=None):
|
||||
"""Load previous copy of config from disk so that current values
|
||||
can be compared to previous values.
|
||||
"""Load previous copy of config from disk.
|
||||
|
||||
In normal usage you don't need to call this method directly - it
|
||||
is called automatically at object initialization.
|
||||
|
||||
:param path:
|
||||
|
||||
@ -218,8 +233,8 @@ class Config(dict):
|
||||
self._prev_dict = json.load(f)
|
||||
|
||||
def changed(self, key):
|
||||
"""Return true if the value for this key has changed since
|
||||
the last save.
|
||||
"""Return True if the current value for this key is different from
|
||||
the previous value.
|
||||
|
||||
"""
|
||||
if self._prev_dict is None:
|
||||
@ -228,7 +243,7 @@ class Config(dict):
|
||||
|
||||
def previous(self, key):
|
||||
"""Return previous value for this key, or None if there
|
||||
is no "previous" value.
|
||||
is no previous value.
|
||||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
@ -238,7 +253,13 @@ class Config(dict):
|
||||
def save(self):
|
||||
"""Save this config to disk.
|
||||
|
||||
Preserves items in _prev_dict that do not exist in self.
|
||||
If the charm is using the :mod:`Services Framework <services.base>`
|
||||
or :meth:'@hook <Hooks.hook>' decorator, this
|
||||
is called automatically at the end of successful hook execution.
|
||||
Otherwise, it should be called directly by user code.
|
||||
|
||||
To disable automatic saves, set ``implicit_save=False`` on this
|
||||
instance.
|
||||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
@ -465,9 +486,10 @@ class Hooks(object):
|
||||
hooks.execute(sys.argv)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, config_save=True):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
self._config_save = config_save
|
||||
|
||||
def register(self, name, function):
|
||||
"""Register a hook"""
|
||||
@ -478,6 +500,10 @@ class Hooks(object):
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
self._hooks[hook_name]()
|
||||
if self._config_save:
|
||||
cfg = config()
|
||||
if cfg.implicit_save:
|
||||
cfg.save()
|
||||
else:
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
|
@ -68,8 +68,8 @@ def service_available(service_name):
|
||||
"""Determine whether a system service is available"""
|
||||
try:
|
||||
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
except subprocess.CalledProcessError as e:
|
||||
return 'unrecognized service' not in e.output
|
||||
else:
|
||||
return True
|
||||
|
||||
@ -209,10 +209,15 @@ def mounts():
|
||||
return system_mounts
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
"""Generate a md5 hash of the contents of 'path' or None if not found """
|
||||
def file_hash(path, hash_type='md5'):
|
||||
"""
|
||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
||||
|
||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
"""
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
h = getattr(hashlib, hash_type)()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
return h.hexdigest()
|
||||
@ -220,6 +225,26 @@ def file_hash(path):
|
||||
return None
|
||||
|
||||
|
||||
def check_hash(path, checksum, hash_type='md5'):
|
||||
"""
|
||||
Validate a file using a cryptographic checksum.
|
||||
|
||||
:param str checksum: Value of the checksum used to validate the file.
|
||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
:raises ChecksumError: If the file fails the checksum
|
||||
|
||||
"""
|
||||
actual_checksum = file_hash(path, hash_type)
|
||||
if checksum != actual_checksum:
|
||||
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
|
||||
|
||||
|
||||
class ChecksumError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def restart_on_change(restart_map, stopstart=False):
|
||||
"""Restart services based on configuration files changing
|
||||
|
||||
|
@ -118,6 +118,9 @@ class ServiceManager(object):
|
||||
else:
|
||||
self.provide_data()
|
||||
self.reconfigure_services()
|
||||
cfg = hookenv.config()
|
||||
if cfg.implicit_save:
|
||||
cfg.save()
|
||||
|
||||
def provide_data(self):
|
||||
"""
|
||||
|
@ -1,3 +1,5 @@
|
||||
import os
|
||||
import yaml
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import templating
|
||||
|
||||
@ -19,15 +21,21 @@ class RelationContext(dict):
|
||||
the `name` attribute that are complete will used to populate the dictionary
|
||||
values (see `get_data`, below).
|
||||
|
||||
The generated context will be namespaced under the interface type, to prevent
|
||||
potential naming conflicts.
|
||||
The generated context will be namespaced under the relation :attr:`name`,
|
||||
to prevent potential naming conflicts.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = None
|
||||
interface = None
|
||||
required_keys = []
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RelationContext, self).__init__(*args, **kwargs)
|
||||
def __init__(self, name=None, additional_required_keys=None):
|
||||
if name is not None:
|
||||
self.name = name
|
||||
if additional_required_keys is not None:
|
||||
self.required_keys.extend(additional_required_keys)
|
||||
self.get_data()
|
||||
|
||||
def __bool__(self):
|
||||
@ -101,9 +109,115 @@ class RelationContext(dict):
|
||||
return {}
|
||||
|
||||
|
||||
class MysqlRelation(RelationContext):
|
||||
"""
|
||||
Relation context for the `mysql` interface.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = 'db'
|
||||
interface = 'mysql'
|
||||
required_keys = ['host', 'user', 'password', 'database']
|
||||
|
||||
|
||||
class HttpRelation(RelationContext):
|
||||
"""
|
||||
Relation context for the `http` interface.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = 'website'
|
||||
interface = 'http'
|
||||
required_keys = ['host', 'port']
|
||||
|
||||
def provide_data(self):
|
||||
return {
|
||||
'host': hookenv.unit_get('private-address'),
|
||||
'port': 80,
|
||||
}
|
||||
|
||||
|
||||
class RequiredConfig(dict):
|
||||
"""
|
||||
Data context that loads config options with one or more mandatory options.
|
||||
|
||||
Once the required options have been changed from their default values, all
|
||||
config options will be available, namespaced under `config` to prevent
|
||||
potential naming conflicts (for example, between a config option and a
|
||||
relation property).
|
||||
|
||||
:param list *args: List of options that must be changed from their default values.
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
self.required_options = args
|
||||
self['config'] = hookenv.config()
|
||||
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
||||
self.config = yaml.load(fp).get('options', {})
|
||||
|
||||
def __bool__(self):
|
||||
for option in self.required_options:
|
||||
if option not in self['config']:
|
||||
return False
|
||||
current_value = self['config'][option]
|
||||
default_value = self.config[option].get('default')
|
||||
if current_value == default_value:
|
||||
return False
|
||||
if current_value in (None, '') and default_value in (None, ''):
|
||||
return False
|
||||
return True
|
||||
|
||||
def __nonzero__(self):
|
||||
return self.__bool__()
|
||||
|
||||
|
||||
class StoredContext(dict):
|
||||
"""
|
||||
A data context that always returns the data that it was first created with.
|
||||
|
||||
This is useful to do a one-time generation of things like passwords, that
|
||||
will thereafter use the same value that was originally generated, instead
|
||||
of generating a new value each time it is run.
|
||||
"""
|
||||
def __init__(self, file_name, config_data):
|
||||
"""
|
||||
If the file exists, populate `self` with the data from the file.
|
||||
Otherwise, populate with the given data and persist it to the file.
|
||||
"""
|
||||
if os.path.exists(file_name):
|
||||
self.update(self.read_context(file_name))
|
||||
else:
|
||||
self.store_context(file_name, config_data)
|
||||
self.update(config_data)
|
||||
|
||||
def store_context(self, file_name, config_data):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'w') as file_stream:
|
||||
os.fchmod(file_stream.fileno(), 0600)
|
||||
yaml.dump(config_data, file_stream)
|
||||
|
||||
def read_context(self, file_name):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'r') as file_stream:
|
||||
data = yaml.load(file_stream)
|
||||
if not data:
|
||||
raise OSError("%s is empty" % file_name)
|
||||
return data
|
||||
|
||||
|
||||
class TemplateCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will render a template, for use as a ready action.
|
||||
Callback class that will render a Jinja2 template, for use as a ready action.
|
||||
|
||||
:param str source: The template source file, relative to `$CHARM_DIR/templates`
|
||||
:param str target: The target to write the rendered template to
|
||||
:param str owner: The owner of the rendered file
|
||||
:param str group: The group of the rendered file
|
||||
:param int perms: The permissions of the rendered file
|
||||
"""
|
||||
def __init__(self, source, target, owner='root', group='root', perms=0444):
|
||||
self.source = source
|
||||
|
@ -208,7 +208,8 @@ def add_source(source, key=None):
|
||||
"""Add a package source to this system.
|
||||
|
||||
@param source: a URL or sources.list entry, as supported by
|
||||
add-apt-repository(1). Examples:
|
||||
add-apt-repository(1). Examples::
|
||||
|
||||
ppa:charmers/example
|
||||
deb https://stub:key@private.example.com/ubuntu trusty main
|
||||
|
||||
@ -311,22 +312,35 @@ def configure_sources(update=False,
|
||||
apt_update(fatal=True)
|
||||
|
||||
|
||||
def install_remote(source):
|
||||
def install_remote(source, *args, **kwargs):
|
||||
"""
|
||||
Install a file tree from a remote source
|
||||
|
||||
The specified source should be a url of the form:
|
||||
scheme://[host]/path[#[option=value][&...]]
|
||||
|
||||
Schemes supported are based on this modules submodules
|
||||
Options supported are submodule-specific"""
|
||||
Schemes supported are based on this modules submodules.
|
||||
Options supported are submodule-specific.
|
||||
Additional arguments are passed through to the submodule.
|
||||
|
||||
For example::
|
||||
|
||||
dest = install_remote('http://example.com/archive.tgz',
|
||||
checksum='deadbeef',
|
||||
hash_type='sha1')
|
||||
|
||||
This will download `archive.tgz`, validate it using SHA1 and, if
|
||||
the file is ok, extract it and return the directory in which it
|
||||
was extracted. If the checksum fails, it will raise
|
||||
:class:`charmhelpers.core.host.ChecksumError`.
|
||||
"""
|
||||
# We ONLY check for True here because can_handle may return a string
|
||||
# explaining why it can't handle a given source.
|
||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||
installed_to = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
installed_to = handler.install(source)
|
||||
installed_to = handler.install(source, *args, **kwargs)
|
||||
except UnhandledSource:
|
||||
pass
|
||||
if not installed_to:
|
||||
|
@ -1,6 +1,8 @@
|
||||
import os
|
||||
import urllib2
|
||||
from urllib import urlretrieve
|
||||
import urlparse
|
||||
import hashlib
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
@ -10,11 +12,19 @@ from charmhelpers.payload.archive import (
|
||||
get_archive_handler,
|
||||
extract,
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
from charmhelpers.core.host import mkdir, check_hash
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for archives via generic URLs"""
|
||||
"""
|
||||
Handler to download archive files from arbitrary URLs.
|
||||
|
||||
Can fetch from http, https, ftp, and file URLs.
|
||||
|
||||
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
|
||||
|
||||
Installs the contents of the archive in $CHARM_DIR/fetched/.
|
||||
"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||
@ -24,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
return False
|
||||
|
||||
def download(self, source, dest):
|
||||
"""
|
||||
Download an archive file.
|
||||
|
||||
:param str source: URL pointing to an archive file.
|
||||
:param str dest: Local path location to download archive file to.
|
||||
"""
|
||||
# propogate all exceptions
|
||||
# URLError, OSError, etc
|
||||
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
|
||||
@ -48,7 +64,30 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
os.unlink(dest)
|
||||
raise e
|
||||
|
||||
def install(self, source):
|
||||
# Mandatory file validation via Sha1 or MD5 hashing.
|
||||
def download_and_validate(self, url, hashsum, validate="sha1"):
|
||||
tempfile, headers = urlretrieve(url)
|
||||
check_hash(tempfile, hashsum, validate)
|
||||
return tempfile
|
||||
|
||||
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
|
||||
"""
|
||||
Download and install an archive file, with optional checksum validation.
|
||||
|
||||
The checksum can also be given on the `source` URL's fragment.
|
||||
For example::
|
||||
|
||||
handler.install('http://example.com/file.tgz#sha1=deadbeef')
|
||||
|
||||
:param str source: URL pointing to an archive file.
|
||||
:param str dest: Local destination path to install to. If not given,
|
||||
installs to `$CHARM_DIR/archives/archive_file_name`.
|
||||
:param str checksum: If given, validate the archive file after download.
|
||||
:param str hash_type: Algorithm used to generate `checksum`.
|
||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
|
||||
"""
|
||||
url_parts = self.parse_url(source)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||
if not os.path.exists(dest_dir):
|
||||
@ -60,4 +99,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
raise UnhandledSource(e.reason)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return extract(dld_file)
|
||||
options = urlparse.parse_qs(url_parts.fragment)
|
||||
for key, value in options.items():
|
||||
if key in hashlib.algorithms:
|
||||
check_hash(dld_file, value, key)
|
||||
if checksum:
|
||||
check_hash(dld_file, checksum, hash_type)
|
||||
return extract(dld_file, dest)
|
||||
|
@ -28,7 +28,12 @@ from charmhelpers.contrib.hahelpers.cluster import(
|
||||
eligible_leader
|
||||
)
|
||||
import re
|
||||
from charmhelpers.contrib.network.ip import get_address_in_network
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_address_in_network,
|
||||
get_ipv4_addr,
|
||||
get_ipv6_addr,
|
||||
is_bridge_member,
|
||||
)
|
||||
|
||||
DB_USER = "quantum"
|
||||
QUANTUM_DB = "quantum"
|
||||
@ -146,16 +151,29 @@ class ExternalPortContext(OSContextGenerator):
|
||||
def __call__(self):
|
||||
if not config('ext-port'):
|
||||
return None
|
||||
hwaddrs = {}
|
||||
hwaddr_to_nic = {}
|
||||
hwaddr_to_ip = {}
|
||||
for nic in list_nics(['eth', 'bond']):
|
||||
hwaddrs[get_nic_hwaddr(nic)] = nic
|
||||
hwaddr = get_nic_hwaddr(nic)
|
||||
hwaddr_to_nic[hwaddr] = nic
|
||||
addresses = get_ipv4_addr(nic, fatal=False) + \
|
||||
get_ipv6_addr(nic, fatal=False)
|
||||
hwaddr_to_ip[hwaddr] = addresses
|
||||
mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
|
||||
for entry in config('ext-port').split():
|
||||
entry = entry.strip()
|
||||
if re.match(mac_regex, entry):
|
||||
if entry in hwaddrs:
|
||||
return {"ext_port": hwaddrs[entry]}
|
||||
if entry in hwaddr_to_nic and len(hwaddr_to_ip[entry]) == 0:
|
||||
# If the nic is part of a bridge then don't use it
|
||||
if is_bridge_member(hwaddr_to_nic[entry]):
|
||||
continue
|
||||
# Entry is a MAC address for a valid interface that doesn't
|
||||
# have an IP address assigned yet.
|
||||
return {"ext_port": hwaddr_to_nic[entry]}
|
||||
else:
|
||||
# If the passed entry is not a MAC address, assume it's a valid
|
||||
# interface, and that the user put it there on purpose (we can
|
||||
# trust it to be the real external network).
|
||||
return {"ext_port": entry}
|
||||
return None
|
||||
|
||||
|
@ -1,7 +1,8 @@
|
||||
from charmhelpers.core.host import (
|
||||
service_running,
|
||||
service_stop,
|
||||
service_restart
|
||||
service_restart,
|
||||
lsb_release
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
@ -112,7 +113,6 @@ NEUTRON_GATEWAY_PKGS = {
|
||||
"nova-api-metadata",
|
||||
"neutron-plugin-metering-agent",
|
||||
"neutron-lbaas-agent",
|
||||
"openswan"
|
||||
],
|
||||
NVP: [
|
||||
"neutron-dhcp-agent",
|
||||
@ -148,10 +148,13 @@ def get_packages():
|
||||
plugin = remap_plugin(config('plugin'))
|
||||
packages = deepcopy(GATEWAY_PKGS[networking_name()][plugin])
|
||||
if (get_os_codename_install_source(config('openstack-origin'))
|
||||
>= 'icehouse' and plugin == 'ovs'):
|
||||
>= 'icehouse' and plugin == 'ovs'
|
||||
and lsb_release()['DISTRIB_CODENAME'] < 'utopic'):
|
||||
# NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for icehouse
|
||||
# but openswan was removed in utopic.
|
||||
packages.remove('neutron-l3-agent')
|
||||
packages.append('neutron-vpn-agent')
|
||||
packages.append('openswan')
|
||||
return packages
|
||||
|
||||
|
||||
|
@ -24,10 +24,10 @@ class AmuletDeployment(object):
|
||||
"""Add services.
|
||||
|
||||
Add services to the deployment where this_service is the local charm
|
||||
that we're focused on testing and other_services are the other
|
||||
charms that come from the charm store.
|
||||
that we're testing and other_services are the other services that
|
||||
are being used in the amulet tests.
|
||||
"""
|
||||
name, units = range(2)
|
||||
name, units, location = range(3)
|
||||
|
||||
if this_service[name] != os.path.basename(os.getcwd()):
|
||||
s = this_service[name]
|
||||
@ -37,12 +37,13 @@ class AmuletDeployment(object):
|
||||
self.d.add(this_service[name], units=this_service[units])
|
||||
|
||||
for svc in other_services:
|
||||
if self.series:
|
||||
self.d.add(svc[name],
|
||||
charm='cs:{}/{}'.format(self.series, svc[name]),
|
||||
units=svc[units])
|
||||
if len(svc) > 2:
|
||||
branch_location = svc[location]
|
||||
elif self.series:
|
||||
branch_location = 'cs:{}/{}'.format(self.series, svc[name]),
|
||||
else:
|
||||
self.d.add(svc[name], units=svc[units])
|
||||
branch_location = None
|
||||
self.d.add(svc[name], charm=branch_location, units=svc[units])
|
||||
|
||||
def _add_relations(self, relations):
|
||||
"""Add all of the relations for the services."""
|
||||
@ -57,7 +58,7 @@ class AmuletDeployment(object):
|
||||
def _deploy(self):
|
||||
"""Deploy environment and wait for all hooks to finish executing."""
|
||||
try:
|
||||
self.d.setup()
|
||||
self.d.setup(timeout=900)
|
||||
self.d.sentry.wait(timeout=900)
|
||||
except amulet.helpers.TimeoutError:
|
||||
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
|
||||
|
@ -1,3 +1,6 @@
|
||||
from bzrlib.branch import Branch
|
||||
import os
|
||||
import re
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
@ -16,11 +19,41 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
self.openstack = openstack
|
||||
self.source = source
|
||||
|
||||
def _is_dev_branch(self):
|
||||
"""Determine if branch being tested is a dev (i.e. next) branch."""
|
||||
branch = Branch.open(os.getcwd())
|
||||
parent = branch.get_parent()
|
||||
pattern = re.compile("^.*/next/$")
|
||||
if (pattern.match(parent)):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _determine_branch_locations(self, other_services):
|
||||
"""Determine the branch locations for the other services.
|
||||
|
||||
If the branch being tested is a dev branch, then determine the
|
||||
development branch locations for the other services. Otherwise,
|
||||
the default charm store branches will be used."""
|
||||
name = 0
|
||||
if self._is_dev_branch():
|
||||
updated_services = []
|
||||
for svc in other_services:
|
||||
if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
|
||||
location = 'lp:charms/{}'.format(svc[name])
|
||||
else:
|
||||
temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
|
||||
location = temp.format(svc[name])
|
||||
updated_services.append(svc + (location,))
|
||||
other_services = updated_services
|
||||
return other_services
|
||||
|
||||
def _add_services(self, this_service, other_services):
|
||||
"""Add services to the deployment and set openstack-origin."""
|
||||
"""Add services to the deployment and set openstack-origin/source."""
|
||||
name = 0
|
||||
other_services = self._determine_branch_locations(other_services)
|
||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||
other_services)
|
||||
name = 0
|
||||
services = other_services
|
||||
services.append(this_service)
|
||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
|
||||
|
@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
|
||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
||||
version = f.read().strip()
|
||||
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
|
||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
||||
local_path = os.path.join('tests', cirros_img)
|
||||
|
||||
if not os.path.exists(cirros_img):
|
||||
if not os.path.exists(local_path):
|
||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
||||
version, cirros_img)
|
||||
opener.retrieve(cirros_url, cirros_img)
|
||||
opener.retrieve(cirros_url, local_path)
|
||||
f.close()
|
||||
|
||||
with open(cirros_img) as f:
|
||||
with open(local_path) as f:
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare', data=f)
|
||||
|
@ -12,17 +12,19 @@ from test_utils import (
|
||||
)
|
||||
|
||||
TO_PATCH = [
|
||||
'apt_install',
|
||||
'config',
|
||||
'context_complete',
|
||||
'eligible_leader',
|
||||
'get_ipv4_addr',
|
||||
'get_ipv6_addr',
|
||||
'get_nic_hwaddr',
|
||||
'get_os_codename_install_source',
|
||||
'list_nics',
|
||||
'relation_get',
|
||||
'relation_ids',
|
||||
'related_units',
|
||||
'context_complete',
|
||||
'unit_get',
|
||||
'apt_install',
|
||||
'get_os_codename_install_source',
|
||||
'eligible_leader',
|
||||
'list_nics',
|
||||
'get_nic_hwaddr',
|
||||
]
|
||||
|
||||
|
||||
@ -120,37 +122,57 @@ class TestExternalPortContext(CharmTestCase):
|
||||
def setUp(self):
|
||||
super(TestExternalPortContext, self).setUp(quantum_contexts,
|
||||
TO_PATCH)
|
||||
self.machine_macs = {
|
||||
'eth0': 'fe:c5:ce:8e:2b:00',
|
||||
'eth1': 'fe:c5:ce:8e:2b:01',
|
||||
'eth2': 'fe:c5:ce:8e:2b:02',
|
||||
'eth3': 'fe:c5:ce:8e:2b:03',
|
||||
}
|
||||
self.machine_nics = {
|
||||
'eth0': ['192.168.0.1'],
|
||||
'eth1': ['192.168.0.2'],
|
||||
'eth2': [],
|
||||
'eth3': [],
|
||||
}
|
||||
self.absent_macs = "aa:a5:ae:ae:ab:a4 "
|
||||
|
||||
def test_no_ext_port(self):
|
||||
self.config.return_value = None
|
||||
self.assertEquals(quantum_contexts.ExternalPortContext()(),
|
||||
None)
|
||||
self.assertIsNone(quantum_contexts.ExternalPortContext()())
|
||||
|
||||
def test_ext_port_eth(self):
|
||||
self.config.return_value = 'eth1010'
|
||||
self.assertEquals(quantum_contexts.ExternalPortContext()(),
|
||||
{'ext_port': 'eth1010'})
|
||||
|
||||
def test_ext_port_mac(self):
|
||||
machine_macs = {
|
||||
'eth0': 'fe:c5:ce:8e:2b:00',
|
||||
'eth1': 'fe:c5:ce:8e:2b:01',
|
||||
'eth2': 'fe:c5:ce:8e:2b:02',
|
||||
'eth3': 'fe:c5:ce:8e:2b:03',
|
||||
}
|
||||
absent_macs = "aa:a5:ae:ae:ab:a4 "
|
||||
config_macs = absent_macs + " " + machine_macs['eth2']
|
||||
def _fake_get_hwaddr(self, arg):
|
||||
return self.machine_macs[arg]
|
||||
|
||||
def get_hwaddr(arg):
|
||||
return machine_macs[arg]
|
||||
def _fake_get_ipv4(self, arg, fatal=False):
|
||||
return self.machine_nics[arg]
|
||||
|
||||
def test_ext_port_mac(self):
|
||||
config_macs = self.absent_macs + " " + self.machine_macs['eth2']
|
||||
self.get_ipv4_addr.side_effect = self._fake_get_ipv4
|
||||
self.get_ipv6_addr.return_value = []
|
||||
self.config.return_value = config_macs
|
||||
self.list_nics.return_value = machine_macs.keys()
|
||||
self.get_nic_hwaddr.side_effect = get_hwaddr
|
||||
self.list_nics.return_value = self.machine_macs.keys()
|
||||
self.get_nic_hwaddr.side_effect = self._fake_get_hwaddr
|
||||
self.assertEquals(quantum_contexts.ExternalPortContext()(),
|
||||
{'ext_port': 'eth2'})
|
||||
self.config.return_value = absent_macs
|
||||
self.config.return_value = self.absent_macs
|
||||
self.assertIsNone(quantum_contexts.ExternalPortContext()())
|
||||
|
||||
def test_ext_port_mac_one_used_nic(self):
|
||||
config_macs = self.machine_macs['eth1'] + " " + \
|
||||
self.machine_macs['eth2']
|
||||
self.get_ipv4_addr.side_effect = self._fake_get_ipv4
|
||||
self.get_ipv6_addr.return_value = []
|
||||
self.config.return_value = config_macs
|
||||
self.list_nics.return_value = self.machine_macs.keys()
|
||||
self.get_nic_hwaddr.side_effect = self._fake_get_hwaddr
|
||||
self.assertEquals(quantum_contexts.ExternalPortContext()(),
|
||||
None)
|
||||
{'ext_port': 'eth2'})
|
||||
|
||||
|
||||
class TestL3AgentContext(CharmTestCase):
|
||||
|
@ -43,6 +43,7 @@ TO_PATCH = [
|
||||
'service_restart',
|
||||
'remap_plugin',
|
||||
'is_relation_made',
|
||||
'lsb_release'
|
||||
]
|
||||
|
||||
|
||||
@ -62,6 +63,7 @@ class TestQuantumUtils(CharmTestCase):
|
||||
super(TestQuantumUtils, self).setUp(quantum_utils, TO_PATCH)
|
||||
self.networking_name.return_value = 'neutron'
|
||||
self.headers_package.return_value = 'linux-headers-2.6.18'
|
||||
self._set_distrib_codename('trusty')
|
||||
|
||||
def noop(value):
|
||||
return value
|
||||
@ -71,6 +73,9 @@ class TestQuantumUtils(CharmTestCase):
|
||||
# Reset cached cache
|
||||
hookenv.cache = {}
|
||||
|
||||
def _set_distrib_codename(self, newcodename):
|
||||
self.lsb_release.return_value = {'DISTRIB_CODENAME': newcodename}
|
||||
|
||||
def test_valid_plugin(self):
|
||||
self.config.return_value = 'ovs'
|
||||
self.assertTrue(quantum_utils.valid_plugin())
|
||||
@ -113,6 +118,19 @@ class TestQuantumUtils(CharmTestCase):
|
||||
self.assertTrue('neutron-vpn-agent' in quantum_utils.get_packages())
|
||||
self.assertFalse('neutron-l3-agent' in quantum_utils.get_packages())
|
||||
|
||||
def test_get_packages_ovs_juno_utopic(self):
|
||||
self.config.return_value = 'ovs'
|
||||
self.get_os_codename_install_source.return_value = 'juno'
|
||||
self._set_distrib_codename('utopic')
|
||||
self.assertFalse('neutron-vpn-agent' in quantum_utils.get_packages())
|
||||
self.assertTrue('neutron-l3-agent' in quantum_utils.get_packages())
|
||||
|
||||
def test_get_packages_ovs_juno_trusty(self):
|
||||
self.config.return_value = 'ovs'
|
||||
self.get_os_codename_install_source.return_value = 'juno'
|
||||
self.assertTrue('neutron-vpn-agent' in quantum_utils.get_packages())
|
||||
self.assertFalse('neutron-l3-agent' in quantum_utils.get_packages())
|
||||
|
||||
def test_configure_ovs_starts_service_if_required(self):
|
||||
self.config.return_value = 'ovs'
|
||||
self.service_running.return_value = False
|
||||
|
Loading…
Reference in New Issue
Block a user