[james-page,r=coreycb] Add option to disable security groups like nova-compute
This commit is contained in:
commit
ff27468326
4
Makefile
4
Makefile
@ -6,7 +6,7 @@ lint:
|
||||
@flake8 --exclude hooks/charmhelpers unit_tests
|
||||
@charm proof
|
||||
|
||||
test:
|
||||
unit_test:
|
||||
@echo Starting tests...
|
||||
@$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
|
||||
|
||||
@ -18,6 +18,6 @@ bin/charm_helpers_sync.py:
|
||||
sync: bin/charm_helpers_sync.py
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
|
||||
|
||||
publish: lint test
|
||||
publish: lint unit_test
|
||||
bzr push lp:charms/neutron-openvswitch
|
||||
bzr push lp:charms/trusty/neutron-openvswitch
|
||||
|
14
README.md
14
README.md
@ -27,3 +27,17 @@ The neutron-api and neutron-openvswitch charms must be related to the same insta
|
||||
# Restrictions
|
||||
|
||||
It should only be used with OpenStack Icehouse and above and requires a seperate neutron-api service to have been deployed.
|
||||
|
||||
# Disabling security group management
|
||||
|
||||
WARNING: this feature allows you to effectively disable security on your cloud!
|
||||
|
||||
This charm has a configuration option to allow users to disable any per-instance security group management; this must used with neutron-security-groups enabled in the neutron-api charm and could be used to turn off security on selected set of compute nodes:
|
||||
|
||||
juju deploy neutron-openvswitch neutron-openvswitch-insecure
|
||||
juju set neutron-openvswitch-insecure disable-security-groups=True
|
||||
juju deploy nova-compute nova-compute-insecure
|
||||
juju add-relation nova-compute-insecure neutron-openvswitch-insecure
|
||||
...
|
||||
|
||||
These compute nodes could then be accessed by cloud users via use of host aggregates with specific flavors to target instances to hypervisors with no per-instance security.
|
||||
|
@ -27,6 +27,15 @@ options:
|
||||
description: |
|
||||
The data port will be added to br-data and will allow usage of flat or VLAN
|
||||
network types
|
||||
disable-security-groups:
|
||||
type: boolean
|
||||
default: false
|
||||
description: |
|
||||
Disable neutron based security groups - setting this configuration option
|
||||
will override any settings configured via the neutron-api charm.
|
||||
.
|
||||
BE CAREFUL - this option allows you to disable all port level security within
|
||||
an OpenStack cloud.
|
||||
# Network configuration options
|
||||
# by default all access is over 'private-address'
|
||||
os-data-network:
|
||||
|
@ -20,20 +20,27 @@ from charmhelpers.core.hookenv import (
|
||||
)
|
||||
|
||||
|
||||
def get_cert():
|
||||
def get_cert(cn=None):
|
||||
# TODO: deal with multiple https endpoints via charm config
|
||||
cert = config_get('ssl_cert')
|
||||
key = config_get('ssl_key')
|
||||
if not (cert and key):
|
||||
log("Inspecting identity-service relations for SSL certificate.",
|
||||
level=INFO)
|
||||
cert = key = None
|
||||
if cn:
|
||||
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
|
||||
ssl_key_attr = 'ssl_key_{}'.format(cn)
|
||||
else:
|
||||
ssl_cert_attr = 'ssl_cert'
|
||||
ssl_key_attr = 'ssl_key'
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not cert:
|
||||
cert = relation_get('ssl_cert',
|
||||
cert = relation_get(ssl_cert_attr,
|
||||
rid=r_id, unit=unit)
|
||||
if not key:
|
||||
key = relation_get('ssl_key',
|
||||
key = relation_get(ssl_key_attr,
|
||||
rid=r_id, unit=unit)
|
||||
return (cert, key)
|
||||
|
||||
|
@ -6,6 +6,11 @@
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
"""
|
||||
Helpers for clustering and determining "cluster leadership" and other
|
||||
clustering-related helpers.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
@ -19,6 +24,7 @@ from charmhelpers.core.hookenv import (
|
||||
config as config_get,
|
||||
INFO,
|
||||
ERROR,
|
||||
WARNING,
|
||||
unit_get,
|
||||
)
|
||||
|
||||
@ -27,6 +33,29 @@ class HAIncompleteConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def is_elected_leader(resource):
|
||||
"""
|
||||
Returns True if the charm executing this is the elected cluster leader.
|
||||
|
||||
It relies on two mechanisms to determine leadership:
|
||||
1. If the charm is part of a corosync cluster, call corosync to
|
||||
determine leadership.
|
||||
2. If the charm is not part of a corosync cluster, the leader is
|
||||
determined as being "the alive unit with the lowest unit numer". In
|
||||
other words, the oldest surviving unit.
|
||||
"""
|
||||
if is_clustered():
|
||||
if not is_crm_leader(resource):
|
||||
log('Deferring action to CRM leader.', level=INFO)
|
||||
return False
|
||||
else:
|
||||
peers = peer_units()
|
||||
if peers and not oldest_peer(peers):
|
||||
log('Deferring action to oldest service unit.', level=INFO)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_clustered():
|
||||
for r_id in (relation_ids('ha') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
@ -38,7 +67,11 @@ def is_clustered():
|
||||
return False
|
||||
|
||||
|
||||
def is_leader(resource):
|
||||
def is_crm_leader(resource):
|
||||
"""
|
||||
Returns True if the charm calling this is the elected corosync leader,
|
||||
as returned by calling the external "crm" command.
|
||||
"""
|
||||
cmd = [
|
||||
"crm", "resource",
|
||||
"show", resource
|
||||
@ -54,15 +87,31 @@ def is_leader(resource):
|
||||
return False
|
||||
|
||||
|
||||
def peer_units():
|
||||
def is_leader(resource):
|
||||
log("is_leader is deprecated. Please consider using is_crm_leader "
|
||||
"instead.", level=WARNING)
|
||||
return is_crm_leader(resource)
|
||||
|
||||
|
||||
def peer_units(peer_relation="cluster"):
|
||||
peers = []
|
||||
for r_id in (relation_ids('cluster') or []):
|
||||
for r_id in (relation_ids(peer_relation) or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
peers.append(unit)
|
||||
return peers
|
||||
|
||||
|
||||
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
||||
'''Return a dict of peers and their private-address'''
|
||||
peers = {}
|
||||
for r_id in relation_ids(peer_relation):
|
||||
for unit in relation_list(r_id):
|
||||
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
||||
return peers
|
||||
|
||||
|
||||
def oldest_peer(peers):
|
||||
"""Determines who the oldest peer is by comparing unit numbers."""
|
||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||
for peer in peers:
|
||||
remote_unit_no = int(peer.split('/')[1])
|
||||
@ -72,16 +121,9 @@ def oldest_peer(peers):
|
||||
|
||||
|
||||
def eligible_leader(resource):
|
||||
if is_clustered():
|
||||
if not is_leader(resource):
|
||||
log('Deferring action to CRM leader.', level=INFO)
|
||||
return False
|
||||
else:
|
||||
peers = peer_units()
|
||||
if peers and not oldest_peer(peers):
|
||||
log('Deferring action to oldest service unit.', level=INFO)
|
||||
return False
|
||||
return True
|
||||
log("eligible_leader is deprecated. Please consider using "
|
||||
"is_elected_leader instead.", level=WARNING)
|
||||
return is_elected_leader(resource)
|
||||
|
||||
|
||||
def https():
|
||||
@ -97,10 +139,9 @@ def https():
|
||||
return True
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
|
||||
rel_state = [
|
||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||
relation_get('ssl_cert', rid=r_id, unit=unit),
|
||||
relation_get('ssl_key', rid=r_id, unit=unit),
|
||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||
]
|
||||
# NOTE: works around (LP: #1203241)
|
||||
|
@ -1,10 +1,16 @@
|
||||
import glob
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from functools import partial
|
||||
|
||||
from charmhelpers.core.hookenv import unit_get
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.core.hookenv import (
|
||||
ERROR, log,
|
||||
WARNING,
|
||||
ERROR,
|
||||
log
|
||||
)
|
||||
|
||||
try:
|
||||
@ -51,6 +57,8 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
||||
else:
|
||||
if fatal:
|
||||
not_found_error_out()
|
||||
else:
|
||||
return None
|
||||
|
||||
_validate_cidr(network)
|
||||
network = netaddr.IPNetwork(network)
|
||||
@ -132,7 +140,8 @@ def _get_for_address(address, key):
|
||||
if address.version == 4 and netifaces.AF_INET in addresses:
|
||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||
network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||
cidr = network.cidr
|
||||
if address in cidr:
|
||||
if key == 'iface':
|
||||
return iface
|
||||
@ -141,11 +150,14 @@ def _get_for_address(address, key):
|
||||
if address.version == 6 and netifaces.AF_INET6 in addresses:
|
||||
for addr in addresses[netifaces.AF_INET6]:
|
||||
if not addr['addr'].startswith('fe80'):
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||
addr['netmask']))
|
||||
network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||
addr['netmask']))
|
||||
cidr = network.cidr
|
||||
if address in cidr:
|
||||
if key == 'iface':
|
||||
return iface
|
||||
elif key == 'netmask' and cidr:
|
||||
return str(cidr).split('/')[1]
|
||||
else:
|
||||
return addr[key]
|
||||
return None
|
||||
@ -154,3 +166,184 @@ def _get_for_address(address, key):
|
||||
get_iface_for_address = partial(_get_for_address, key='iface')
|
||||
|
||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||
|
||||
|
||||
def format_ipv6_addr(address):
|
||||
"""
|
||||
IPv6 needs to be wrapped with [] in url link to parse correctly.
|
||||
"""
|
||||
if is_ipv6(address):
|
||||
address = "[%s]" % address
|
||||
else:
|
||||
log("Not a valid ipv6 address: %s" % address, level=WARNING)
|
||||
address = None
|
||||
|
||||
return address
|
||||
|
||||
|
||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
||||
fatal=True, exc_list=None):
|
||||
"""
|
||||
Return the assigned IP address for a given interface, if any, or [].
|
||||
"""
|
||||
# Extract nic if passed /dev/ethX
|
||||
if '/' in iface:
|
||||
iface = iface.split('/')[-1]
|
||||
if not exc_list:
|
||||
exc_list = []
|
||||
try:
|
||||
inet_num = getattr(netifaces, inet_type)
|
||||
except AttributeError:
|
||||
raise Exception('Unknown inet type ' + str(inet_type))
|
||||
|
||||
interfaces = netifaces.interfaces()
|
||||
if inc_aliases:
|
||||
ifaces = []
|
||||
for _iface in interfaces:
|
||||
if iface == _iface or _iface.split(':')[0] == iface:
|
||||
ifaces.append(_iface)
|
||||
if fatal and not ifaces:
|
||||
raise Exception("Invalid interface '%s'" % iface)
|
||||
ifaces.sort()
|
||||
else:
|
||||
if iface not in interfaces:
|
||||
if fatal:
|
||||
raise Exception("%s not found " % (iface))
|
||||
else:
|
||||
return []
|
||||
else:
|
||||
ifaces = [iface]
|
||||
|
||||
addresses = []
|
||||
for netiface in ifaces:
|
||||
net_info = netifaces.ifaddresses(netiface)
|
||||
if inet_num in net_info:
|
||||
for entry in net_info[inet_num]:
|
||||
if 'addr' in entry and entry['addr'] not in exc_list:
|
||||
addresses.append(entry['addr'])
|
||||
if fatal and not addresses:
|
||||
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
||||
(iface, inet_type))
|
||||
return addresses
|
||||
|
||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
||||
|
||||
|
||||
def get_iface_from_addr(addr):
|
||||
"""Work out on which interface the provided address is configured."""
|
||||
for iface in netifaces.interfaces():
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
for inet_type in addresses:
|
||||
for _addr in addresses[inet_type]:
|
||||
_addr = _addr['addr']
|
||||
# link local
|
||||
ll_key = re.compile("(.+)%.*")
|
||||
raw = re.match(ll_key, _addr)
|
||||
if raw:
|
||||
_addr = raw.group(1)
|
||||
if _addr == addr:
|
||||
log("Address '%s' is configured on iface '%s'" %
|
||||
(addr, iface))
|
||||
return iface
|
||||
|
||||
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def sniff_iface(f):
|
||||
"""If no iface provided, inject net iface inferred from unit private
|
||||
address.
|
||||
"""
|
||||
def iface_sniffer(*args, **kwargs):
|
||||
if not kwargs.get('iface', None):
|
||||
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
|
||||
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return iface_sniffer
|
||||
|
||||
|
||||
@sniff_iface
|
||||
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
||||
dynamic_only=True):
|
||||
"""Get assigned IPv6 address for a given interface.
|
||||
|
||||
Returns list of addresses found. If no address found, returns empty list.
|
||||
|
||||
If iface is None, we infer the current primary interface by doing a reverse
|
||||
lookup on the unit private-address.
|
||||
|
||||
We currently only support scope global IPv6 addresses i.e. non-temporary
|
||||
addresses. If no global IPv6 address is found, return the first one found
|
||||
in the ipv6 address list.
|
||||
"""
|
||||
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
|
||||
inc_aliases=inc_aliases, fatal=fatal,
|
||||
exc_list=exc_list)
|
||||
|
||||
if addresses:
|
||||
global_addrs = []
|
||||
for addr in addresses:
|
||||
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
|
||||
m = re.match(key_scope_link_local, addr)
|
||||
if m:
|
||||
eui_64_mac = m.group(1)
|
||||
iface = m.group(2)
|
||||
else:
|
||||
global_addrs.append(addr)
|
||||
|
||||
if global_addrs:
|
||||
# Make sure any found global addresses are not temporary
|
||||
cmd = ['ip', 'addr', 'show', iface]
|
||||
out = subprocess.check_output(cmd)
|
||||
if dynamic_only:
|
||||
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
||||
else:
|
||||
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
|
||||
|
||||
addrs = []
|
||||
for line in out.split('\n'):
|
||||
line = line.strip()
|
||||
m = re.match(key, line)
|
||||
if m and 'temporary' not in line:
|
||||
# Return the first valid address we find
|
||||
for addr in global_addrs:
|
||||
if m.group(1) == addr:
|
||||
if not dynamic_only or \
|
||||
m.group(1).endswith(eui_64_mac):
|
||||
addrs.append(addr)
|
||||
|
||||
if addrs:
|
||||
return addrs
|
||||
|
||||
if fatal:
|
||||
raise Exception("Interface '%s' doesn't have a scope global "
|
||||
"non-temporary ipv6 address." % iface)
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
||||
"""
|
||||
Return a list of bridges on the system or []
|
||||
"""
|
||||
b_rgex = vnic_dir + '/*/bridge'
|
||||
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
|
||||
|
||||
|
||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
||||
"""
|
||||
Return a list of nics comprising a given bridge on the system or []
|
||||
"""
|
||||
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
||||
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
|
||||
|
||||
|
||||
def is_bridge_member(nic):
|
||||
"""
|
||||
Check if a given nic is a member of a bridge
|
||||
"""
|
||||
for bridge in get_bridges():
|
||||
if nic in get_bridge_nics(bridge):
|
||||
return True
|
||||
return False
|
||||
|
@ -4,35 +4,68 @@ from charmhelpers.contrib.amulet.deployment import (
|
||||
|
||||
|
||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
"""This class inherits from AmuletDeployment and has additional support
|
||||
that is specifically for use by OpenStack charms."""
|
||||
"""OpenStack amulet deployment.
|
||||
|
||||
def __init__(self, series=None, openstack=None, source=None):
|
||||
This class inherits from AmuletDeployment and has additional support
|
||||
that is specifically for use by OpenStack charms.
|
||||
"""
|
||||
|
||||
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
||||
"""Initialize the deployment environment."""
|
||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||
self.openstack = openstack
|
||||
self.source = source
|
||||
self.stable = stable
|
||||
# Note(coreycb): this needs to be changed when new next branches come
|
||||
# out.
|
||||
self.current_next = "trusty"
|
||||
|
||||
def _determine_branch_locations(self, other_services):
|
||||
"""Determine the branch locations for the other services.
|
||||
|
||||
Determine if the local branch being tested is derived from its
|
||||
stable or next (dev) branch, and based on this, use the corresonding
|
||||
stable or next branches for the other_services."""
|
||||
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
|
||||
|
||||
if self.stable:
|
||||
for svc in other_services:
|
||||
temp = 'lp:charms/{}'
|
||||
svc['location'] = temp.format(svc['name'])
|
||||
else:
|
||||
for svc in other_services:
|
||||
if svc['name'] in base_charms:
|
||||
temp = 'lp:charms/{}'
|
||||
svc['location'] = temp.format(svc['name'])
|
||||
else:
|
||||
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||
svc['location'] = temp.format(self.current_next,
|
||||
svc['name'])
|
||||
return other_services
|
||||
|
||||
def _add_services(self, this_service, other_services):
|
||||
"""Add services to the deployment and set openstack-origin."""
|
||||
"""Add services to the deployment and set openstack-origin/source."""
|
||||
other_services = self._determine_branch_locations(other_services)
|
||||
|
||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||
other_services)
|
||||
name = 0
|
||||
|
||||
services = other_services
|
||||
services.append(this_service)
|
||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
|
||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||
'ceph-osd', 'ceph-radosgw']
|
||||
|
||||
if self.openstack:
|
||||
for svc in services:
|
||||
if svc[name] not in use_source:
|
||||
if svc['name'] not in use_source:
|
||||
config = {'openstack-origin': self.openstack}
|
||||
self.d.configure(svc[name], config)
|
||||
self.d.configure(svc['name'], config)
|
||||
|
||||
if self.source:
|
||||
for svc in services:
|
||||
if svc[name] in use_source:
|
||||
if svc['name'] in use_source:
|
||||
config = {'source': self.source}
|
||||
self.d.configure(svc[name], config)
|
||||
self.d.configure(svc['name'], config)
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
@ -40,11 +73,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _get_openstack_release(self):
|
||||
"""Return an integer representing the enum value of the openstack
|
||||
release."""
|
||||
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
|
||||
self.precise_havana, self.precise_icehouse, \
|
||||
self.trusty_icehouse = range(6)
|
||||
"""Get openstack release.
|
||||
|
||||
Return an integer representing the enum value of the openstack
|
||||
release.
|
||||
"""
|
||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||
self.precise_havana, self.precise_icehouse,
|
||||
self.trusty_icehouse) = range(6)
|
||||
releases = {
|
||||
('precise', None): self.precise_essex,
|
||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||
|
@ -16,8 +16,11 @@ ERROR = logging.ERROR
|
||||
|
||||
|
||||
class OpenStackAmuletUtils(AmuletUtils):
|
||||
"""This class inherits from AmuletUtils and has additional support
|
||||
that is specifically for use by OpenStack charms."""
|
||||
"""OpenStack amulet utilities.
|
||||
|
||||
This class inherits from AmuletUtils and has additional support
|
||||
that is specifically for use by OpenStack charms.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level=ERROR):
|
||||
"""Initialize the deployment environment."""
|
||||
@ -25,13 +28,17 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
|
||||
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||
public_port, expected):
|
||||
"""Validate actual endpoint data vs expected endpoint data. The ports
|
||||
are used to find the matching endpoint."""
|
||||
"""Validate endpoint data.
|
||||
|
||||
Validate actual endpoint data vs expected endpoint data. The ports
|
||||
are used to find the matching endpoint.
|
||||
"""
|
||||
found = False
|
||||
for ep in endpoints:
|
||||
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||
if admin_port in ep.adminurl and internal_port in ep.internalurl \
|
||||
and public_port in ep.publicurl:
|
||||
if (admin_port in ep.adminurl and
|
||||
internal_port in ep.internalurl and
|
||||
public_port in ep.publicurl):
|
||||
found = True
|
||||
actual = {'id': ep.id,
|
||||
'region': ep.region,
|
||||
@ -47,8 +54,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return 'endpoint not found'
|
||||
|
||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||
"""Validate a list of actual service catalog endpoints vs a list of
|
||||
expected service catalog endpoints."""
|
||||
"""Validate service catalog endpoint data.
|
||||
|
||||
Validate a list of actual service catalog endpoints vs a list of
|
||||
expected service catalog endpoints.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for k, v in expected.iteritems():
|
||||
if k in actual:
|
||||
@ -60,8 +70,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return ret
|
||||
|
||||
def validate_tenant_data(self, expected, actual):
|
||||
"""Validate a list of actual tenant data vs list of expected tenant
|
||||
data."""
|
||||
"""Validate tenant data.
|
||||
|
||||
Validate a list of actual tenant data vs list of expected tenant
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
@ -78,8 +91,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return ret
|
||||
|
||||
def validate_role_data(self, expected, actual):
|
||||
"""Validate a list of actual role data vs a list of expected role
|
||||
data."""
|
||||
"""Validate role data.
|
||||
|
||||
Validate a list of actual role data vs a list of expected role
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
@ -95,8 +111,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return ret
|
||||
|
||||
def validate_user_data(self, expected, actual):
|
||||
"""Validate a list of actual user data vs a list of expected user
|
||||
data."""
|
||||
"""Validate user data.
|
||||
|
||||
Validate a list of actual user data vs a list of expected user
|
||||
data.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for e in expected:
|
||||
found = False
|
||||
@ -114,21 +133,24 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return ret
|
||||
|
||||
def validate_flavor_data(self, expected, actual):
|
||||
"""Validate a list of actual flavors vs a list of expected flavors."""
|
||||
"""Validate flavor data.
|
||||
|
||||
Validate a list of actual flavors vs a list of expected flavors.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
act = [a.name for a in actual]
|
||||
return self._validate_list_data(expected, act)
|
||||
|
||||
def tenant_exists(self, keystone, tenant):
|
||||
"""Return True if tenant exists"""
|
||||
"""Return True if tenant exists."""
|
||||
return tenant in [t.name for t in keystone.tenants.list()]
|
||||
|
||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||
tenant):
|
||||
"""Authenticates admin user with the keystone admin endpoint."""
|
||||
service_ip = \
|
||||
keystone_sentry.relation('shared-db',
|
||||
'mysql:shared-db')['private-address']
|
||||
unit = keystone_sentry
|
||||
service_ip = unit.relation('shared-db',
|
||||
'mysql:shared-db')['private-address']
|
||||
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||
return keystone_client.Client(username=user, password=password,
|
||||
tenant_name=tenant, auth_url=ep)
|
||||
@ -165,24 +187,53 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
|
||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
||||
version = f.read().strip()
|
||||
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
|
||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
||||
local_path = os.path.join('tests', cirros_img)
|
||||
|
||||
if not os.path.exists(cirros_img):
|
||||
if not os.path.exists(local_path):
|
||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
||||
version, cirros_img)
|
||||
opener.retrieve(cirros_url, cirros_img)
|
||||
opener.retrieve(cirros_url, local_path)
|
||||
f.close()
|
||||
|
||||
with open(cirros_img) as f:
|
||||
with open(local_path) as f:
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare', data=f)
|
||||
count = 1
|
||||
status = image.status
|
||||
while status != 'active' and count < 10:
|
||||
time.sleep(3)
|
||||
image = glance.images.get(image.id)
|
||||
status = image.status
|
||||
self.log.debug('image status: {}'.format(status))
|
||||
count += 1
|
||||
|
||||
if status != 'active':
|
||||
self.log.error('image creation timed out')
|
||||
return None
|
||||
|
||||
return image
|
||||
|
||||
def delete_image(self, glance, image):
|
||||
"""Delete the specified image."""
|
||||
num_before = len(list(glance.images.list()))
|
||||
glance.images.delete(image)
|
||||
|
||||
count = 1
|
||||
num_after = len(list(glance.images.list()))
|
||||
while num_after != (num_before - 1) and count < 10:
|
||||
time.sleep(3)
|
||||
num_after = len(list(glance.images.list()))
|
||||
self.log.debug('number of images: {}'.format(num_after))
|
||||
count += 1
|
||||
|
||||
if num_after != (num_before - 1):
|
||||
self.log.error('image deletion timed out')
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||
"""Create the specified instance."""
|
||||
image = nova.images.find(name=image_name)
|
||||
@ -199,11 +250,27 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
self.log.debug('instance status: {}'.format(status))
|
||||
count += 1
|
||||
|
||||
if status == 'BUILD':
|
||||
if status != 'ACTIVE':
|
||||
self.log.error('instance creation timed out')
|
||||
return None
|
||||
|
||||
return instance
|
||||
|
||||
def delete_instance(self, nova, instance):
|
||||
"""Delete the specified instance."""
|
||||
num_before = len(list(nova.servers.list()))
|
||||
nova.servers.delete(instance)
|
||||
|
||||
count = 1
|
||||
num_after = len(list(nova.servers.list()))
|
||||
while num_after != (num_before - 1) and count < 10:
|
||||
time.sleep(3)
|
||||
num_after = len(list(nova.servers.list()))
|
||||
self.log.debug('number of instances: {}'.format(num_after))
|
||||
count += 1
|
||||
|
||||
if num_after != (num_before - 1):
|
||||
self.log.error('instance deletion timed out')
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -8,7 +8,6 @@ from subprocess import (
|
||||
check_call
|
||||
)
|
||||
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
filter_installed_packages,
|
||||
@ -28,6 +27,11 @@ from charmhelpers.core.hookenv import (
|
||||
INFO
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
mkdir,
|
||||
write_file
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_apache_port,
|
||||
determine_api_port,
|
||||
@ -38,13 +42,22 @@ from charmhelpers.contrib.hahelpers.cluster import (
|
||||
from charmhelpers.contrib.hahelpers.apache import (
|
||||
get_cert,
|
||||
get_ca_cert,
|
||||
install_ca_cert,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.neutron import (
|
||||
neutron_plugin_attribute,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import get_address_in_network
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_address_in_network,
|
||||
get_ipv6_addr,
|
||||
get_netmask_for_address,
|
||||
format_ipv6_addr,
|
||||
is_address_in_network
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import get_host_ip
|
||||
|
||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
|
||||
@ -165,8 +178,10 @@ class SharedDBContext(OSContextGenerator):
|
||||
for rid in relation_ids('shared-db'):
|
||||
for unit in related_units(rid):
|
||||
rdata = relation_get(rid=rid, unit=unit)
|
||||
host = rdata.get('db_host')
|
||||
host = format_ipv6_addr(host) or host
|
||||
ctxt = {
|
||||
'database_host': rdata.get('db_host'),
|
||||
'database_host': host,
|
||||
'database': self.database,
|
||||
'database_user': self.user,
|
||||
'database_password': rdata.get(password_setting),
|
||||
@ -242,10 +257,15 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
rdata = relation_get(rid=rid, unit=unit)
|
||||
serv_host = rdata.get('service_host')
|
||||
serv_host = format_ipv6_addr(serv_host) or serv_host
|
||||
auth_host = rdata.get('auth_host')
|
||||
auth_host = format_ipv6_addr(auth_host) or auth_host
|
||||
|
||||
ctxt = {
|
||||
'service_port': rdata.get('service_port'),
|
||||
'service_host': rdata.get('service_host'),
|
||||
'auth_host': rdata.get('auth_host'),
|
||||
'service_host': serv_host,
|
||||
'auth_host': auth_host,
|
||||
'auth_port': rdata.get('auth_port'),
|
||||
'admin_tenant_name': rdata.get('service_tenant'),
|
||||
'admin_user': rdata.get('service_username'),
|
||||
@ -294,11 +314,13 @@ class AMQPContext(OSContextGenerator):
|
||||
for unit in related_units(rid):
|
||||
if relation_get('clustered', rid=rid, unit=unit):
|
||||
ctxt['clustered'] = True
|
||||
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
|
||||
unit=unit)
|
||||
vip = relation_get('vip', rid=rid, unit=unit)
|
||||
vip = format_ipv6_addr(vip) or vip
|
||||
ctxt['rabbitmq_host'] = vip
|
||||
else:
|
||||
ctxt['rabbitmq_host'] = relation_get('private-address',
|
||||
rid=rid, unit=unit)
|
||||
host = relation_get('private-address', rid=rid, unit=unit)
|
||||
host = format_ipv6_addr(host) or host
|
||||
ctxt['rabbitmq_host'] = host
|
||||
ctxt.update({
|
||||
'rabbitmq_user': username,
|
||||
'rabbitmq_password': relation_get('password', rid=rid,
|
||||
@ -337,8 +359,9 @@ class AMQPContext(OSContextGenerator):
|
||||
and len(related_units(rid)) > 1:
|
||||
rabbitmq_hosts = []
|
||||
for unit in related_units(rid):
|
||||
rabbitmq_hosts.append(relation_get('private-address',
|
||||
rid=rid, unit=unit))
|
||||
host = relation_get('private-address', rid=rid, unit=unit)
|
||||
host = format_ipv6_addr(host) or host
|
||||
rabbitmq_hosts.append(host)
|
||||
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
@ -367,6 +390,7 @@ class CephContext(OSContextGenerator):
|
||||
ceph_addr = \
|
||||
relation_get('ceph-public-address', rid=rid, unit=unit) or \
|
||||
relation_get('private-address', rid=rid, unit=unit)
|
||||
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
|
||||
mon_hosts.append(ceph_addr)
|
||||
|
||||
ctxt = {
|
||||
@ -387,6 +411,9 @@ class CephContext(OSContextGenerator):
|
||||
return ctxt
|
||||
|
||||
|
||||
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
||||
|
||||
|
||||
class HAProxyContext(OSContextGenerator):
|
||||
interfaces = ['cluster']
|
||||
|
||||
@ -399,27 +426,79 @@ class HAProxyContext(OSContextGenerator):
|
||||
if not relation_ids('cluster'):
|
||||
return {}
|
||||
|
||||
cluster_hosts = {}
|
||||
l_unit = local_unit().replace('/', '-')
|
||||
cluster_hosts[l_unit] = \
|
||||
get_address_in_network(config('os-internal-network'),
|
||||
unit_get('private-address'))
|
||||
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
_unit = unit.replace('/', '-')
|
||||
addr = relation_get('private-address', rid=rid, unit=unit)
|
||||
cluster_hosts[_unit] = addr
|
||||
if config('prefer-ipv6'):
|
||||
addr = get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
else:
|
||||
addr = get_host_ip(unit_get('private-address'))
|
||||
|
||||
cluster_hosts = {}
|
||||
|
||||
# NOTE(jamespage): build out map of configured network endpoints
|
||||
# and associated backends
|
||||
for addr_type in ADDRESS_TYPES:
|
||||
laddr = get_address_in_network(
|
||||
config('os-{}-network'.format(addr_type)))
|
||||
if laddr:
|
||||
cluster_hosts[laddr] = {}
|
||||
cluster_hosts[laddr]['network'] = "{}/{}".format(
|
||||
laddr,
|
||||
get_netmask_for_address(laddr)
|
||||
)
|
||||
cluster_hosts[laddr]['backends'] = {}
|
||||
cluster_hosts[laddr]['backends'][l_unit] = laddr
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
_unit = unit.replace('/', '-')
|
||||
_laddr = relation_get('{}-address'.format(addr_type),
|
||||
rid=rid, unit=unit)
|
||||
if _laddr:
|
||||
cluster_hosts[laddr]['backends'][_unit] = _laddr
|
||||
|
||||
# NOTE(jamespage) no split configurations found, just use
|
||||
# private addresses
|
||||
if not cluster_hosts:
|
||||
cluster_hosts[addr] = {}
|
||||
cluster_hosts[addr]['network'] = "{}/{}".format(
|
||||
addr,
|
||||
get_netmask_for_address(addr)
|
||||
)
|
||||
cluster_hosts[addr]['backends'] = {}
|
||||
cluster_hosts[addr]['backends'][l_unit] = addr
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
_unit = unit.replace('/', '-')
|
||||
_laddr = relation_get('private-address',
|
||||
rid=rid, unit=unit)
|
||||
if _laddr:
|
||||
cluster_hosts[addr]['backends'][_unit] = _laddr
|
||||
|
||||
ctxt = {
|
||||
'units': cluster_hosts,
|
||||
'frontends': cluster_hosts,
|
||||
}
|
||||
if len(cluster_hosts.keys()) > 1:
|
||||
# Enable haproxy when we have enough peers.
|
||||
log('Ensuring haproxy enabled in /etc/default/haproxy.')
|
||||
with open('/etc/default/haproxy', 'w') as out:
|
||||
out.write('ENABLED=1\n')
|
||||
return ctxt
|
||||
|
||||
if config('haproxy-server-timeout'):
|
||||
ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
|
||||
if config('haproxy-client-timeout'):
|
||||
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
ctxt['local_host'] = 'ip6-localhost'
|
||||
ctxt['haproxy_host'] = '::'
|
||||
ctxt['stat_port'] = ':::8888'
|
||||
else:
|
||||
ctxt['local_host'] = '127.0.0.1'
|
||||
ctxt['haproxy_host'] = '0.0.0.0'
|
||||
ctxt['stat_port'] = ':8888'
|
||||
|
||||
for frontend in cluster_hosts:
|
||||
if len(cluster_hosts[frontend]['backends']) > 1:
|
||||
# Enable haproxy when we have enough peers.
|
||||
log('Ensuring haproxy enabled in /etc/default/haproxy.')
|
||||
with open('/etc/default/haproxy', 'w') as out:
|
||||
out.write('ENABLED=1\n')
|
||||
return ctxt
|
||||
log('HAProxy context is incomplete, this unit has no peers.')
|
||||
return {}
|
||||
|
||||
@ -474,22 +553,36 @@ class ApacheSSLContext(OSContextGenerator):
|
||||
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
|
||||
check_call(cmd)
|
||||
|
||||
def configure_cert(self):
|
||||
if not os.path.isdir('/etc/apache2/ssl'):
|
||||
os.mkdir('/etc/apache2/ssl')
|
||||
def configure_cert(self, cn=None):
|
||||
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
|
||||
if not os.path.isdir(ssl_dir):
|
||||
os.mkdir(ssl_dir)
|
||||
cert, key = get_cert()
|
||||
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
|
||||
cert_out.write(b64decode(cert))
|
||||
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
|
||||
key_out.write(b64decode(key))
|
||||
mkdir(path=ssl_dir)
|
||||
cert, key = get_cert(cn)
|
||||
if cn:
|
||||
cert_filename = 'cert_{}'.format(cn)
|
||||
key_filename = 'key_{}'.format(cn)
|
||||
else:
|
||||
cert_filename = 'cert'
|
||||
key_filename = 'key'
|
||||
write_file(path=os.path.join(ssl_dir, cert_filename),
|
||||
content=b64decode(cert))
|
||||
write_file(path=os.path.join(ssl_dir, key_filename),
|
||||
content=b64decode(key))
|
||||
|
||||
def configure_ca(self):
|
||||
ca_cert = get_ca_cert()
|
||||
if ca_cert:
|
||||
with open(CA_CERT_PATH, 'w') as ca_out:
|
||||
ca_out.write(b64decode(ca_cert))
|
||||
check_call(['update-ca-certificates'])
|
||||
install_ca_cert(b64decode(ca_cert))
|
||||
|
||||
def canonical_names(self):
|
||||
'''Figure out which canonical names clients will access this service'''
|
||||
cns = []
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in related_units(r_id):
|
||||
rdata = relation_get(rid=r_id, unit=unit)
|
||||
for k in rdata:
|
||||
if k.startswith('ssl_key_'):
|
||||
cns.append(k.lstrip('ssl_key_'))
|
||||
return list(set(cns))
|
||||
|
||||
def __call__(self):
|
||||
if isinstance(self.external_ports, basestring):
|
||||
@ -497,21 +590,47 @@ class ApacheSSLContext(OSContextGenerator):
|
||||
if (not self.external_ports or not https()):
|
||||
return {}
|
||||
|
||||
self.configure_cert()
|
||||
self.configure_ca()
|
||||
self.enable_modules()
|
||||
|
||||
ctxt = {
|
||||
'namespace': self.service_namespace,
|
||||
'private_address': unit_get('private-address'),
|
||||
'endpoints': []
|
||||
'endpoints': [],
|
||||
'ext_ports': []
|
||||
}
|
||||
if is_clustered():
|
||||
ctxt['private_address'] = config('vip')
|
||||
for api_port in self.external_ports:
|
||||
ext_port = determine_apache_port(api_port)
|
||||
int_port = determine_api_port(api_port)
|
||||
portmap = (int(ext_port), int(int_port))
|
||||
ctxt['endpoints'].append(portmap)
|
||||
|
||||
for cn in self.canonical_names():
|
||||
self.configure_cert(cn)
|
||||
|
||||
addresses = []
|
||||
vips = []
|
||||
if config('vip'):
|
||||
vips = config('vip').split()
|
||||
|
||||
for network_type in ['os-internal-network',
|
||||
'os-admin-network',
|
||||
'os-public-network']:
|
||||
address = get_address_in_network(config(network_type),
|
||||
unit_get('private-address'))
|
||||
if len(vips) > 0 and is_clustered():
|
||||
for vip in vips:
|
||||
if is_address_in_network(config(network_type),
|
||||
vip):
|
||||
addresses.append((address, vip))
|
||||
break
|
||||
elif is_clustered():
|
||||
addresses.append((address, config('vip')))
|
||||
else:
|
||||
addresses.append((address, address))
|
||||
|
||||
for address, endpoint in set(addresses):
|
||||
for api_port in self.external_ports:
|
||||
ext_port = determine_apache_port(api_port)
|
||||
int_port = determine_api_port(api_port)
|
||||
portmap = (address, endpoint, int(ext_port), int(int_port))
|
||||
ctxt['endpoints'].append(portmap)
|
||||
ctxt['ext_ports'].append(int(ext_port))
|
||||
ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
|
||||
return ctxt
|
||||
|
||||
|
||||
@ -641,22 +760,22 @@ class NeutronContext(OSContextGenerator):
|
||||
|
||||
class OSConfigFlagContext(OSContextGenerator):
|
||||
|
||||
"""
|
||||
Responsible for adding user-defined config-flags in charm config to a
|
||||
template context.
|
||||
"""
|
||||
Responsible for adding user-defined config-flags in charm config to a
|
||||
template context.
|
||||
|
||||
NOTE: the value of config-flags may be a comma-separated list of
|
||||
key=value pairs and some Openstack config files support
|
||||
comma-separated lists as values.
|
||||
"""
|
||||
NOTE: the value of config-flags may be a comma-separated list of
|
||||
key=value pairs and some Openstack config files support
|
||||
comma-separated lists as values.
|
||||
"""
|
||||
|
||||
def __call__(self):
|
||||
config_flags = config('config-flags')
|
||||
if not config_flags:
|
||||
return {}
|
||||
def __call__(self):
|
||||
config_flags = config('config-flags')
|
||||
if not config_flags:
|
||||
return {}
|
||||
|
||||
flags = config_flags_parser(config_flags)
|
||||
return {'user_config_flags': flags}
|
||||
flags = config_flags_parser(config_flags)
|
||||
return {'user_config_flags': flags}
|
||||
|
||||
|
||||
class SubordinateConfigContext(OSContextGenerator):
|
||||
@ -753,6 +872,17 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||
return ctxt
|
||||
|
||||
|
||||
class LogLevelContext(OSContextGenerator):
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
ctxt['debug'] = \
|
||||
False if config('debug') is None else config('debug')
|
||||
ctxt['verbose'] = \
|
||||
False if config('verbose') is None else config('verbose')
|
||||
return ctxt
|
||||
|
||||
|
||||
class SyslogContext(OSContextGenerator):
|
||||
|
||||
def __call__(self):
|
||||
@ -760,3 +890,35 @@ class SyslogContext(OSContextGenerator):
|
||||
'use_syslog': config('use-syslog')
|
||||
}
|
||||
return ctxt
|
||||
|
||||
|
||||
class BindHostContext(OSContextGenerator):
|
||||
|
||||
def __call__(self):
|
||||
if config('prefer-ipv6'):
|
||||
return {
|
||||
'bind_host': '::'
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'bind_host': '0.0.0.0'
|
||||
}
|
||||
|
||||
|
||||
class WorkerConfigContext(OSContextGenerator):
|
||||
|
||||
@property
|
||||
def num_cpus(self):
|
||||
try:
|
||||
from psutil import NUM_CPUS
|
||||
except ImportError:
|
||||
apt_install('python-psutil', fatal=True)
|
||||
from psutil import NUM_CPUS
|
||||
return NUM_CPUS
|
||||
|
||||
def __call__(self):
|
||||
multiplier = config('worker-multiplier') or 1
|
||||
ctxt = {
|
||||
"workers": self.num_cpus * multiplier
|
||||
}
|
||||
return ctxt
|
||||
|
@ -7,6 +7,7 @@ from charmhelpers.contrib.network.ip import (
|
||||
get_address_in_network,
|
||||
is_address_in_network,
|
||||
is_ipv6,
|
||||
get_ipv6_addr,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||
@ -64,10 +65,13 @@ def resolve_address(endpoint_type=PUBLIC):
|
||||
vip):
|
||||
resolved_address = vip
|
||||
else:
|
||||
if config('prefer-ipv6'):
|
||||
fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
else:
|
||||
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
|
||||
resolved_address = get_address_in_network(
|
||||
config(_address_map[endpoint_type]['config']),
|
||||
unit_get(_address_map[endpoint_type]['fallback'])
|
||||
)
|
||||
config(_address_map[endpoint_type]['config']), fallback_addr)
|
||||
|
||||
if resolved_address is None:
|
||||
raise ValueError('Unable to resolve a suitable IP address'
|
||||
' based on charm state and configuration')
|
||||
|
@ -1,6 +1,6 @@
|
||||
global
|
||||
log 127.0.0.1 local0
|
||||
log 127.0.0.1 local1 notice
|
||||
log {{ local_host }} local0
|
||||
log {{ local_host }} local1 notice
|
||||
maxconn 20000
|
||||
user haproxy
|
||||
group haproxy
|
||||
@ -14,10 +14,19 @@ defaults
|
||||
retries 3
|
||||
timeout queue 1000
|
||||
timeout connect 1000
|
||||
{% if haproxy_client_timeout -%}
|
||||
timeout client {{ haproxy_client_timeout }}
|
||||
{% else -%}
|
||||
timeout client 30000
|
||||
timeout server 30000
|
||||
{% endif -%}
|
||||
|
||||
listen stats :8888
|
||||
{% if haproxy_server_timeout -%}
|
||||
timeout server {{ haproxy_server_timeout }}
|
||||
{% else -%}
|
||||
timeout server 30000
|
||||
{% endif -%}
|
||||
|
||||
listen stats {{ stat_port }}
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
@ -25,17 +34,21 @@ listen stats :8888
|
||||
stats uri /
|
||||
stats auth admin:password
|
||||
|
||||
{% if units -%}
|
||||
{% if frontends -%}
|
||||
{% for service, ports in service_ports.iteritems() -%}
|
||||
listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
|
||||
balance roundrobin
|
||||
{% for unit, address in units.iteritems() -%}
|
||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||
frontend tcp-in_{{ service }}
|
||||
bind *:{{ ports[0] }}
|
||||
bind :::{{ ports[0] }}
|
||||
{% for frontend in frontends -%}
|
||||
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
|
||||
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
|
||||
{% endfor %}
|
||||
listen {{ service }}_ipv6 :::{{ ports[0] }}
|
||||
balance roundrobin
|
||||
{% for unit, address in units.iteritems() -%}
|
||||
{% for frontend in frontends -%}
|
||||
backend {{ service }}_{{ frontend }}
|
||||
balance leastconn
|
||||
{% for unit, address in frontends[frontend]['backends'].iteritems() -%}
|
||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||
{% endfor %}
|
||||
{% endfor -%}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
@ -1,16 +1,18 @@
|
||||
{% if endpoints -%}
|
||||
{% for ext, int in endpoints -%}
|
||||
Listen {{ ext }}
|
||||
NameVirtualHost *:{{ ext }}
|
||||
<VirtualHost *:{{ ext }}>
|
||||
ServerName {{ private_address }}
|
||||
{% for ext_port in ext_ports -%}
|
||||
Listen {{ ext_port }}
|
||||
{% endfor -%}
|
||||
{% for address, endpoint, ext, int in endpoints -%}
|
||||
<VirtualHost {{ address }}:{{ ext }}>
|
||||
ServerName {{ endpoint }}
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
||||
ProxyPass / http://localhost:{{ int }}/
|
||||
ProxyPassReverse / http://localhost:{{ int }}/
|
||||
ProxyPreserveHost on
|
||||
</VirtualHost>
|
||||
{% endfor -%}
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
@ -19,5 +21,4 @@ NameVirtualHost *:{{ ext }}
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Location>
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
@ -1,16 +1,18 @@
|
||||
{% if endpoints -%}
|
||||
{% for ext, int in endpoints -%}
|
||||
Listen {{ ext }}
|
||||
NameVirtualHost *:{{ ext }}
|
||||
<VirtualHost *:{{ ext }}>
|
||||
ServerName {{ private_address }}
|
||||
{% for ext_port in ext_ports -%}
|
||||
Listen {{ ext_port }}
|
||||
{% endfor -%}
|
||||
{% for address, endpoint, ext, int in endpoints -%}
|
||||
<VirtualHost {{ address }}:{{ ext }}>
|
||||
ServerName {{ endpoint }}
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
||||
ProxyPass / http://localhost:{{ int }}/
|
||||
ProxyPassReverse / http://localhost:{{ int }}/
|
||||
ProxyPreserveHost on
|
||||
</VirtualHost>
|
||||
{% endfor -%}
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
@ -19,5 +21,4 @@ NameVirtualHost *:{{ ext }}
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Location>
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
@ -4,6 +4,7 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
@ -13,7 +14,9 @@ from charmhelpers.core.hookenv import (
|
||||
log as juju_log,
|
||||
charm_dir,
|
||||
ERROR,
|
||||
INFO
|
||||
INFO,
|
||||
relation_ids,
|
||||
relation_set
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.storage.linux.lvm import (
|
||||
@ -22,8 +25,12 @@ from charmhelpers.contrib.storage.linux.lvm import (
|
||||
remove_lvm_physical_volume,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_ipv6_addr
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.fetch import apt_install, apt_cache
|
||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||
|
||||
@ -70,6 +77,9 @@ SWIFT_CODENAMES = OrderedDict([
|
||||
('1.13.0', 'icehouse'),
|
||||
('1.12.0', 'icehouse'),
|
||||
('1.11.0', 'icehouse'),
|
||||
('2.0.0', 'juno'),
|
||||
('2.1.0', 'juno'),
|
||||
('2.2.0', 'juno'),
|
||||
])
|
||||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
@ -134,13 +144,8 @@ def get_os_version_codename(codename):
|
||||
def get_os_codename_package(package, fatal=True):
|
||||
'''Derive OpenStack release codename from an installed package.'''
|
||||
import apt_pkg as apt
|
||||
apt.init()
|
||||
|
||||
# Tell apt to build an in-memory cache to prevent race conditions (if
|
||||
# another process is already building the cache).
|
||||
apt.config.set("Dir::Cache::pkgcache", "")
|
||||
|
||||
cache = apt.Cache()
|
||||
cache = apt_cache()
|
||||
|
||||
try:
|
||||
pkg = cache[package]
|
||||
@ -461,3 +466,21 @@ def get_hostname(address, fqdn=True):
|
||||
return result
|
||||
else:
|
||||
return result.split('.')[0]
|
||||
|
||||
|
||||
def sync_db_with_multi_ipv6_addresses(database, database_user,
|
||||
relation_prefix=None):
|
||||
hosts = get_ipv6_addr(dynamic_only=False)
|
||||
|
||||
kwargs = {'database': database,
|
||||
'username': database_user,
|
||||
'hostname': json.dumps(hosts)}
|
||||
|
||||
if relation_prefix:
|
||||
keys = kwargs.keys()
|
||||
for key in keys:
|
||||
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
|
||||
del kwargs[key]
|
||||
|
||||
for rid in relation_ids('shared-db'):
|
||||
relation_set(relation_id=rid, **kwargs)
|
||||
|
@ -46,5 +46,8 @@ def is_device_mounted(device):
|
||||
:returns: boolean: True if the path represents a mounted device, False if
|
||||
it doesn't.
|
||||
'''
|
||||
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
||||
out = check_output(['mount'])
|
||||
if is_partition:
|
||||
return bool(re.search(device + r"\b", out))
|
||||
return bool(re.search(device + r"[0-9]+\b", out))
|
||||
|
@ -156,12 +156,15 @@ def hook_name():
|
||||
|
||||
|
||||
class Config(dict):
|
||||
"""A Juju charm config dictionary that can write itself to
|
||||
disk (as json) and track which values have changed since
|
||||
the previous hook invocation.
|
||||
"""A dictionary representation of the charm's config.yaml, with some
|
||||
extra features:
|
||||
|
||||
Do not instantiate this object directly - instead call
|
||||
``hookenv.config()``
|
||||
- See which values in the dictionary have changed since the previous hook.
|
||||
- For values that have changed, see what the previous value was.
|
||||
- Store arbitrary data for use in a later hook.
|
||||
|
||||
NOTE: Do not instantiate this object directly - instead call
|
||||
``hookenv.config()``, which will return an instance of :class:`Config`.
|
||||
|
||||
Example usage::
|
||||
|
||||
@ -170,8 +173,8 @@ class Config(dict):
|
||||
>>> config = hookenv.config()
|
||||
>>> config['foo']
|
||||
'bar'
|
||||
>>> # store a new key/value for later use
|
||||
>>> config['mykey'] = 'myval'
|
||||
>>> config.save()
|
||||
|
||||
|
||||
>>> # user runs `juju set mycharm foo=baz`
|
||||
@ -188,22 +191,34 @@ class Config(dict):
|
||||
>>> # keys/values that we add are preserved across hooks
|
||||
>>> config['mykey']
|
||||
'myval'
|
||||
>>> # don't forget to save at the end of hook!
|
||||
>>> config.save()
|
||||
|
||||
"""
|
||||
CONFIG_FILE_NAME = '.juju-persistent-config'
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
super(Config, self).__init__(*args, **kw)
|
||||
self.implicit_save = True
|
||||
self._prev_dict = None
|
||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||
if os.path.exists(self.path):
|
||||
self.load_previous()
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""For regular dict lookups, check the current juju config first,
|
||||
then the previous (saved) copy. This ensures that user-saved values
|
||||
will be returned by a dict lookup.
|
||||
|
||||
"""
|
||||
try:
|
||||
return dict.__getitem__(self, key)
|
||||
except KeyError:
|
||||
return (self._prev_dict or {})[key]
|
||||
|
||||
def load_previous(self, path=None):
|
||||
"""Load previous copy of config from disk so that current values
|
||||
can be compared to previous values.
|
||||
"""Load previous copy of config from disk.
|
||||
|
||||
In normal usage you don't need to call this method directly - it
|
||||
is called automatically at object initialization.
|
||||
|
||||
:param path:
|
||||
|
||||
@ -218,8 +233,8 @@ class Config(dict):
|
||||
self._prev_dict = json.load(f)
|
||||
|
||||
def changed(self, key):
|
||||
"""Return true if the value for this key has changed since
|
||||
the last save.
|
||||
"""Return True if the current value for this key is different from
|
||||
the previous value.
|
||||
|
||||
"""
|
||||
if self._prev_dict is None:
|
||||
@ -228,7 +243,7 @@ class Config(dict):
|
||||
|
||||
def previous(self, key):
|
||||
"""Return previous value for this key, or None if there
|
||||
is no "previous" value.
|
||||
is no previous value.
|
||||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
@ -238,7 +253,13 @@ class Config(dict):
|
||||
def save(self):
|
||||
"""Save this config to disk.
|
||||
|
||||
Preserves items in _prev_dict that do not exist in self.
|
||||
If the charm is using the :mod:`Services Framework <services.base>`
|
||||
or :meth:'@hook <Hooks.hook>' decorator, this
|
||||
is called automatically at the end of successful hook execution.
|
||||
Otherwise, it should be called directly by user code.
|
||||
|
||||
To disable automatic saves, set ``implicit_save=False`` on this
|
||||
instance.
|
||||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None):
|
||||
raise
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||
"""Set relation information for the current unit"""
|
||||
relation_settings = relation_settings if relation_settings else {}
|
||||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
@ -464,9 +486,10 @@ class Hooks(object):
|
||||
hooks.execute(sys.argv)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, config_save=True):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
self._config_save = config_save
|
||||
|
||||
def register(self, name, function):
|
||||
"""Register a hook"""
|
||||
@ -477,6 +500,10 @@ class Hooks(object):
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
self._hooks[hook_name]()
|
||||
if self._config_save:
|
||||
cfg = config()
|
||||
if cfg.implicit_save:
|
||||
cfg.save()
|
||||
else:
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
|
@ -12,6 +12,8 @@ import random
|
||||
import string
|
||||
import subprocess
|
||||
import hashlib
|
||||
import shutil
|
||||
from contextlib import contextmanager
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
@ -52,7 +54,7 @@ def service(action, service_name):
|
||||
def service_running(service):
|
||||
"""Determine whether a system service is running"""
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'])
|
||||
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
@ -62,6 +64,16 @@ def service_running(service):
|
||||
return False
|
||||
|
||||
|
||||
def service_available(service_name):
|
||||
"""Determine whether a system service is available"""
|
||||
try:
|
||||
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
return 'unrecognized service' not in e.output
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
"""Add a user to the system"""
|
||||
try:
|
||||
@ -197,10 +209,15 @@ def mounts():
|
||||
return system_mounts
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
"""Generate a md5 hash of the contents of 'path' or None if not found """
|
||||
def file_hash(path, hash_type='md5'):
|
||||
"""
|
||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
||||
|
||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
"""
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
h = getattr(hashlib, hash_type)()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
return h.hexdigest()
|
||||
@ -208,6 +225,26 @@ def file_hash(path):
|
||||
return None
|
||||
|
||||
|
||||
def check_hash(path, checksum, hash_type='md5'):
|
||||
"""
|
||||
Validate a file using a cryptographic checksum.
|
||||
|
||||
:param str checksum: Value of the checksum used to validate the file.
|
||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
:raises ChecksumError: If the file fails the checksum
|
||||
|
||||
"""
|
||||
actual_checksum = file_hash(path, hash_type)
|
||||
if checksum != actual_checksum:
|
||||
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
|
||||
|
||||
|
||||
class ChecksumError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def restart_on_change(restart_map, stopstart=False):
|
||||
"""Restart services based on configuration files changing
|
||||
|
||||
@ -320,12 +357,29 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||
|
||||
'''
|
||||
import apt_pkg
|
||||
from charmhelpers.fetch import apt_cache
|
||||
if not pkgcache:
|
||||
apt_pkg.init()
|
||||
# Force Apt to build its cache in memory. That way we avoid race
|
||||
# conditions with other applications building the cache in the same
|
||||
# place.
|
||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||
pkgcache = apt_pkg.Cache()
|
||||
pkgcache = apt_cache()
|
||||
pkg = pkgcache[package]
|
||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def chdir(d):
|
||||
cur = os.getcwd()
|
||||
try:
|
||||
yield os.chdir(d)
|
||||
finally:
|
||||
os.chdir(cur)
|
||||
|
||||
|
||||
def chownr(path, owner, group):
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
for name in dirs + files:
|
||||
full = os.path.join(root, name)
|
||||
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
||||
if not broken_symlink:
|
||||
os.chown(full, uid, gid)
|
||||
|
@ -1,4 +1,5 @@
|
||||
import importlib
|
||||
from tempfile import NamedTemporaryFile
|
||||
import time
|
||||
from yaml import safe_load
|
||||
from charmhelpers.core.host import (
|
||||
@ -116,14 +117,7 @@ class BaseFetchHandler(object):
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
"""Returns a list of packages that require installation"""
|
||||
import apt_pkg
|
||||
apt_pkg.init()
|
||||
|
||||
# Tell apt to build an in-memory cache to prevent race conditions (if
|
||||
# another process is already building the cache).
|
||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||
|
||||
cache = apt_pkg.Cache()
|
||||
cache = apt_cache()
|
||||
_pkgs = []
|
||||
for package in packages:
|
||||
try:
|
||||
@ -136,6 +130,16 @@ def filter_installed_packages(packages):
|
||||
return _pkgs
|
||||
|
||||
|
||||
def apt_cache(in_memory=True):
|
||||
"""Build and return an apt cache"""
|
||||
import apt_pkg
|
||||
apt_pkg.init()
|
||||
if in_memory:
|
||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
||||
return apt_pkg.Cache()
|
||||
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
if options is None:
|
||||
@ -201,6 +205,28 @@ def apt_hold(packages, fatal=False):
|
||||
|
||||
|
||||
def add_source(source, key=None):
|
||||
"""Add a package source to this system.
|
||||
|
||||
@param source: a URL or sources.list entry, as supported by
|
||||
add-apt-repository(1). Examples::
|
||||
|
||||
ppa:charmers/example
|
||||
deb https://stub:key@private.example.com/ubuntu trusty main
|
||||
|
||||
In addition:
|
||||
'proposed:' may be used to enable the standard 'proposed'
|
||||
pocket for the release.
|
||||
'cloud:' may be used to activate official cloud archive pockets,
|
||||
such as 'cloud:icehouse'
|
||||
|
||||
@param key: A key to be added to the system's APT keyring and used
|
||||
to verify the signatures on packages. Ideally, this should be an
|
||||
ASCII format GPG public key including the block headers. A GPG key
|
||||
id may also be used, but be aware that only insecure protocols are
|
||||
available to retrieve the actual public key from a public keyserver
|
||||
placing your Juju environment at risk. ppa and cloud archive keys
|
||||
are securely added automtically, so sould not be provided.
|
||||
"""
|
||||
if source is None:
|
||||
log('Source is not present. Skipping')
|
||||
return
|
||||
@ -225,10 +251,23 @@ def add_source(source, key=None):
|
||||
release = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
apt.write(PROPOSED_POCKET.format(release))
|
||||
else:
|
||||
raise SourceConfigError("Unknown source: {!r}".format(source))
|
||||
|
||||
if key:
|
||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||
'hkp://keyserver.ubuntu.com:80', '--recv',
|
||||
key])
|
||||
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||
with NamedTemporaryFile() as key_file:
|
||||
key_file.write(key)
|
||||
key_file.flush()
|
||||
key_file.seek(0)
|
||||
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
||||
else:
|
||||
# Note that hkp: is in no way a secure protocol. Using a
|
||||
# GPG key id is pointless from a security POV unless you
|
||||
# absolutely trust your network and DNS.
|
||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||
'hkp://keyserver.ubuntu.com:80', '--recv',
|
||||
key])
|
||||
|
||||
|
||||
def configure_sources(update=False,
|
||||
@ -238,7 +277,8 @@ def configure_sources(update=False,
|
||||
Configure multiple sources from charm configuration.
|
||||
|
||||
The lists are encoded as yaml fragments in the configuration.
|
||||
The frament needs to be included as a string.
|
||||
The frament needs to be included as a string. Sources and their
|
||||
corresponding keys are of the types supported by add_source().
|
||||
|
||||
Example config:
|
||||
install_sources: |
|
||||
@ -272,22 +312,35 @@ def configure_sources(update=False,
|
||||
apt_update(fatal=True)
|
||||
|
||||
|
||||
def install_remote(source):
|
||||
def install_remote(source, *args, **kwargs):
|
||||
"""
|
||||
Install a file tree from a remote source
|
||||
|
||||
The specified source should be a url of the form:
|
||||
scheme://[host]/path[#[option=value][&...]]
|
||||
|
||||
Schemes supported are based on this modules submodules
|
||||
Options supported are submodule-specific"""
|
||||
Schemes supported are based on this modules submodules.
|
||||
Options supported are submodule-specific.
|
||||
Additional arguments are passed through to the submodule.
|
||||
|
||||
For example::
|
||||
|
||||
dest = install_remote('http://example.com/archive.tgz',
|
||||
checksum='deadbeef',
|
||||
hash_type='sha1')
|
||||
|
||||
This will download `archive.tgz`, validate it using SHA1 and, if
|
||||
the file is ok, extract it and return the directory in which it
|
||||
was extracted. If the checksum fails, it will raise
|
||||
:class:`charmhelpers.core.host.ChecksumError`.
|
||||
"""
|
||||
# We ONLY check for True here because can_handle may return a string
|
||||
# explaining why it can't handle a given source.
|
||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||
installed_to = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
installed_to = handler.install(source)
|
||||
installed_to = handler.install(source, *args, **kwargs)
|
||||
except UnhandledSource:
|
||||
pass
|
||||
if not installed_to:
|
||||
|
@ -1,6 +1,8 @@
|
||||
import os
|
||||
import urllib2
|
||||
from urllib import urlretrieve
|
||||
import urlparse
|
||||
import hashlib
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
@ -10,11 +12,19 @@ from charmhelpers.payload.archive import (
|
||||
get_archive_handler,
|
||||
extract,
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
from charmhelpers.core.host import mkdir, check_hash
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for archives via generic URLs"""
|
||||
"""
|
||||
Handler to download archive files from arbitrary URLs.
|
||||
|
||||
Can fetch from http, https, ftp, and file URLs.
|
||||
|
||||
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
|
||||
|
||||
Installs the contents of the archive in $CHARM_DIR/fetched/.
|
||||
"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||
@ -24,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
return False
|
||||
|
||||
def download(self, source, dest):
|
||||
"""
|
||||
Download an archive file.
|
||||
|
||||
:param str source: URL pointing to an archive file.
|
||||
:param str dest: Local path location to download archive file to.
|
||||
"""
|
||||
# propogate all exceptions
|
||||
# URLError, OSError, etc
|
||||
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
|
||||
@ -48,7 +64,30 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
os.unlink(dest)
|
||||
raise e
|
||||
|
||||
def install(self, source):
|
||||
# Mandatory file validation via Sha1 or MD5 hashing.
|
||||
def download_and_validate(self, url, hashsum, validate="sha1"):
|
||||
tempfile, headers = urlretrieve(url)
|
||||
check_hash(tempfile, hashsum, validate)
|
||||
return tempfile
|
||||
|
||||
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
|
||||
"""
|
||||
Download and install an archive file, with optional checksum validation.
|
||||
|
||||
The checksum can also be given on the `source` URL's fragment.
|
||||
For example::
|
||||
|
||||
handler.install('http://example.com/file.tgz#sha1=deadbeef')
|
||||
|
||||
:param str source: URL pointing to an archive file.
|
||||
:param str dest: Local destination path to install to. If not given,
|
||||
installs to `$CHARM_DIR/archives/archive_file_name`.
|
||||
:param str checksum: If given, validate the archive file after download.
|
||||
:param str hash_type: Algorithm used to generate `checksum`.
|
||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
|
||||
"""
|
||||
url_parts = self.parse_url(source)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||
if not os.path.exists(dest_dir):
|
||||
@ -60,4 +99,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
raise UnhandledSource(e.reason)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return extract(dld_file)
|
||||
options = urlparse.parse_qs(url_parts.fragment)
|
||||
for key, value in options.items():
|
||||
if key in hashlib.algorithms:
|
||||
check_hash(dld_file, value, key)
|
||||
if checksum:
|
||||
check_hash(dld_file, checksum, hash_type)
|
||||
return extract(dld_file, dest)
|
||||
|
@ -37,6 +37,9 @@ def _neutron_api_settings():
|
||||
'neutron_security_groups': rdata['neutron-security-groups'],
|
||||
'overlay_network_type': rdata['overlay-network-type'],
|
||||
}
|
||||
# Override with configuration if set to true
|
||||
if config('disable-security-groups'):
|
||||
neutron_settings['neutron_security_groups'] = False
|
||||
return neutron_settings
|
||||
return neutron_settings
|
||||
|
||||
|
@ -31,7 +31,7 @@ tunnel_types = {{ overlay_network_type }}
|
||||
l2_population = {{ l2_population }}
|
||||
|
||||
[securitygroup]
|
||||
{% if neutron_security_groups == 'True' -%}
|
||||
{% if neutron_security_groups -%}
|
||||
enable_security_group = True
|
||||
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
{% else -%}
|
||||
|
@ -88,7 +88,7 @@ class OVSPluginContextTest(CharmTestCase):
|
||||
_is_clus.return_value = False
|
||||
self.related_units.return_value = ['unit1']
|
||||
self.relation_ids.return_value = ['rid2']
|
||||
self.test_relation.set({'neutron-security-groups': 'yes',
|
||||
self.test_relation.set({'neutron-security-groups': True,
|
||||
'l2-population': True,
|
||||
'overlay-network-type': 'gre',
|
||||
})
|
||||
@ -97,7 +97,60 @@ class OVSPluginContextTest(CharmTestCase):
|
||||
napi_ctxt = context.OVSPluginContext()
|
||||
expect = {
|
||||
'neutron_alchemy_flags': {},
|
||||
'neutron_security_groups': 'yes',
|
||||
'neutron_security_groups': True,
|
||||
'verbose': True,
|
||||
'local_ip': '127.0.0.15',
|
||||
'config': 'neutron.randomconfig',
|
||||
'use_syslog': True,
|
||||
'network_manager': 'neutron',
|
||||
'debug': True,
|
||||
'core_plugin': 'neutron.randomdriver',
|
||||
'neutron_plugin': 'ovs',
|
||||
'neutron_url': 'https://127.0.0.13:9696',
|
||||
'l2_population': True,
|
||||
'overlay_network_type': 'gre',
|
||||
}
|
||||
self.assertEquals(expect, napi_ctxt())
|
||||
self.service_start.assertCalled()
|
||||
|
||||
@patch.object(charmhelpers.contrib.openstack.context, 'config')
|
||||
@patch.object(charmhelpers.contrib.openstack.context, 'unit_get')
|
||||
@patch.object(charmhelpers.contrib.openstack.context, 'is_clustered')
|
||||
@patch.object(charmhelpers.contrib.openstack.context, 'https')
|
||||
@patch.object(context.OVSPluginContext, '_save_flag_file')
|
||||
@patch.object(context.OVSPluginContext, '_ensure_packages')
|
||||
@patch.object(charmhelpers.contrib.openstack.context,
|
||||
'neutron_plugin_attribute')
|
||||
@patch.object(charmhelpers.contrib.openstack.context, 'unit_private_ip')
|
||||
def test_neutroncc_context_api_rel_disable_security(self,
|
||||
_unit_priv_ip, _npa,
|
||||
_ens_pkgs, _save_ff,
|
||||
_https, _is_clus,
|
||||
_unit_get,
|
||||
_config):
|
||||
def mock_npa(plugin, section, manager):
|
||||
if section == "driver":
|
||||
return "neutron.randomdriver"
|
||||
if section == "config":
|
||||
return "neutron.randomconfig"
|
||||
_npa.side_effect = mock_npa
|
||||
_config.return_value = 'ovs'
|
||||
_unit_get.return_value = '127.0.0.13'
|
||||
_unit_priv_ip.return_value = '127.0.0.14'
|
||||
_is_clus.return_value = False
|
||||
self.test_config.set('disable-security-groups', True)
|
||||
self.related_units.return_value = ['unit1']
|
||||
self.relation_ids.return_value = ['rid2']
|
||||
self.test_relation.set({'neutron-security-groups': True,
|
||||
'l2-population': True,
|
||||
'overlay-network-type': 'gre',
|
||||
})
|
||||
self.get_host_ip.return_value = '127.0.0.15'
|
||||
self.service_running.return_value = False
|
||||
napi_ctxt = context.OVSPluginContext()
|
||||
expect = {
|
||||
'neutron_alchemy_flags': {},
|
||||
'neutron_security_groups': False,
|
||||
'verbose': True,
|
||||
'local_ip': '127.0.0.15',
|
||||
'config': 'neutron.randomconfig',
|
||||
|
@ -38,6 +38,7 @@ class NeutronOVSHooksTests(CharmTestCase):
|
||||
super(NeutronOVSHooksTests, self).setUp(hooks, TO_PATCH)
|
||||
|
||||
self.config.side_effect = self.test_config.get
|
||||
hooks.hooks._config_save = False
|
||||
|
||||
def _call_hook(self, hookname):
|
||||
hooks.hooks.execute([
|
||||
|
Loading…
Reference in New Issue
Block a user