[hopem,r=]

Ceilometer api HA
This commit is contained in:
Edward Hope-Morley
2014-10-21 19:49:06 +01:00
parent 2507541dad
commit 0b13902584
26 changed files with 1022 additions and 156 deletions

View File

@@ -84,4 +84,24 @@ options:
192.168.0.0/24) 192.168.0.0/24)
. .
This network will be used for public endpoints. This network will be used for public endpoints.
# HA configuration settings
vip:
type: string
default:
description: |
Virtual IP(s) to use to front API services in HA configuration.
.
If multiple networks are being used, a VIP should be provided for each
network, separated by spaces.
ha-bindiface:
type: string
default: eth0
description: |
Default network interface on which HA cluster will bind to communication
with the other members of the HA Cluster.
ha-mcastport:
type: int
default: 5403
description: |
Default multicast port number that will be used to communicate between
HA Cluster nodes.

View File

@@ -8,6 +8,7 @@ from charmhelpers.fetch import (
) )
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
open_port, open_port,
relation_get,
relation_set, relation_set,
relation_ids, relation_ids,
config, config,
@@ -37,6 +38,13 @@ from charmhelpers.contrib.openstack.ip import (
canonical_url, canonical_url,
PUBLIC, INTERNAL, ADMIN PUBLIC, INTERNAL, ADMIN
) )
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config
)
hooks = Hooks() hooks = Hooks()
CONFIGS = register_configs() CONFIGS = register_configs()
@@ -101,6 +109,65 @@ def upgrade_charm():
any_changed() any_changed()
@hooks.hook('ha-relation-joined')
def ha_joined():
cluster_config = get_hacluster_config()
resources = {
'res_ceilometer_haproxy': 'lsb:haproxy'
}
resource_params = {
'res_ceilometer_haproxy': 'op monitor interval="5s"'
}
vip_group = []
for vip in cluster_config['vip'].split():
res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_ceilometer_{}_vip'.format(iface)
resources[vip_key] = res_ceilometer_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
relation_set(groups={'grp_ceilometer_vips': ' '.join(vip_group)})
init_services = {
'res_ceilometer_haproxy': 'haproxy'
}
clones = {
'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
}
relation_set(init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
clones=clones)
@hooks.hook('ha-relation-changed')
def ha_changed():
clustered = relation_get('clustered')
if not clustered or clustered in [None, 'None', '']:
log('ha_changed: hacluster subordinate not fully clustered.')
else:
log('Cluster configured, notifying other services and updating '
'keystone endpoint configuration')
for rid in relation_ids('identity-service'):
keystone_joined(rid=rid)
@hooks.hook("identity-service-relation-joined") @hooks.hook("identity-service-relation-joined")
def keystone_joined(relid=None): def keystone_joined(relid=None):
public_url = "{}:{}".format( public_url = "{}:{}".format(

View File

@@ -10,7 +10,7 @@ from ceilometer_contexts import (
ApacheSSLContext, ApacheSSLContext,
LoggingConfigContext, LoggingConfigContext,
MongoDBContext, MongoDBContext,
CeilometerContext, CeilometerContext
) )
from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.openstack.utils import (
get_os_codename_package, get_os_codename_package,
@@ -21,11 +21,13 @@ from charmhelpers.core.hookenv import config, log
from charmhelpers.fetch import apt_update, apt_install, apt_upgrade from charmhelpers.fetch import apt_update, apt_install, apt_upgrade
from copy import deepcopy from copy import deepcopy
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
CEILOMETER_CONF_DIR = "/etc/ceilometer" CEILOMETER_CONF_DIR = "/etc/ceilometer"
CEILOMETER_CONF = "%s/ceilometer.conf" % CEILOMETER_CONF_DIR CEILOMETER_CONF = "%s/ceilometer.conf" % CEILOMETER_CONF_DIR
HTTPS_APACHE_CONF = "/etc/apache2/sites-available/openstack_https_frontend" HTTPS_APACHE_CONF = "/etc/apache2/sites-available/openstack_https_frontend"
HTTPS_APACHE_24_CONF = "/etc/apache2/sites-available/" \ HTTPS_APACHE_24_CONF = "/etc/apache2/sites-available/" \
"openstack_https_frontend.conf" "openstack_https_frontend.conf"
CLUSTER_RES = 'grp_ceilometer_vips'
CEILOMETER_SERVICES = [ CEILOMETER_SERVICES = [
'ceilometer-agent-central', 'ceilometer-agent-central',
@@ -37,6 +39,7 @@ CEILOMETER_DB = "ceilometer"
CEILOMETER_SERVICE = "ceilometer" CEILOMETER_SERVICE = "ceilometer"
CEILOMETER_PACKAGES = [ CEILOMETER_PACKAGES = [
'haproxy',
'apache2', 'apache2',
'ceilometer-agent-central', 'ceilometer-agent-central',
'ceilometer-collector', 'ceilometer-collector',
@@ -62,6 +65,10 @@ CONFIG_FILES = OrderedDict([
context.SyslogContext()], context.SyslogContext()],
'services': CEILOMETER_SERVICES 'services': CEILOMETER_SERVICES
}), }),
(HAPROXY_CONF, {
'hook_contexts': [context.HAProxyContext()],
'services': ['haproxy'],
}),
(HTTPS_APACHE_CONF, { (HTTPS_APACHE_CONF, {
'hook_contexts': [ApacheSSLContext()], 'hook_contexts': [ApacheSSLContext()],
'services': ['apache2'], 'services': ['apache2'],

View File

@@ -20,20 +20,27 @@ from charmhelpers.core.hookenv import (
) )
def get_cert(): def get_cert(cn=None):
# TODO: deal with multiple https endpoints via charm config
cert = config_get('ssl_cert') cert = config_get('ssl_cert')
key = config_get('ssl_key') key = config_get('ssl_key')
if not (cert and key): if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.", log("Inspecting identity-service relations for SSL certificate.",
level=INFO) level=INFO)
cert = key = None cert = key = None
if cn:
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
ssl_key_attr = 'ssl_key_{}'.format(cn)
else:
ssl_cert_attr = 'ssl_cert'
ssl_key_attr = 'ssl_key'
for r_id in relation_ids('identity-service'): for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id): for unit in relation_list(r_id):
if not cert: if not cert:
cert = relation_get('ssl_cert', cert = relation_get(ssl_cert_attr,
rid=r_id, unit=unit) rid=r_id, unit=unit)
if not key: if not key:
key = relation_get('ssl_key', key = relation_get(ssl_key_attr,
rid=r_id, unit=unit) rid=r_id, unit=unit)
return (cert, key) return (cert, key)

View File

@@ -139,10 +139,9 @@ def https():
return True return True
for r_id in relation_ids('identity-service'): for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id): for unit in relation_list(r_id):
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
rel_state = [ rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit), relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit),
] ]
# NOTE: works around (LP: #1203241) # NOTE: works around (LP: #1203241)

View File

@@ -1,10 +1,15 @@
import glob
import re
import subprocess
import sys import sys
from functools import partial from functools import partial
from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
ERROR, log, config, ERROR,
log
) )
try: try:
@@ -51,6 +56,8 @@ def get_address_in_network(network, fallback=None, fatal=False):
else: else:
if fatal: if fatal:
not_found_error_out() not_found_error_out()
else:
return None
_validate_cidr(network) _validate_cidr(network)
network = netaddr.IPNetwork(network) network = netaddr.IPNetwork(network)
@@ -132,7 +139,8 @@ def _get_for_address(address, key):
if address.version == 4 and netifaces.AF_INET in addresses: if address.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr'] addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask'] netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
cidr = network.cidr
if address in cidr: if address in cidr:
if key == 'iface': if key == 'iface':
return iface return iface
@@ -141,11 +149,14 @@ def _get_for_address(address, key):
if address.version == 6 and netifaces.AF_INET6 in addresses: if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]: for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'): if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask'])) addr['netmask']))
cidr = network.cidr
if address in cidr: if address in cidr:
if key == 'iface': if key == 'iface':
return iface return iface
elif key == 'netmask' and cidr:
return str(cidr).split('/')[1]
else: else:
return addr[key] return addr[key]
return None return None
@@ -156,19 +167,181 @@ get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask') get_netmask_for_address = partial(_get_for_address, key='netmask')
def get_ipv6_addr(iface="eth0"): def format_ipv6_addr(address):
"""
IPv6 needs to be wrapped with [] in url link to parse correctly.
"""
if is_ipv6(address):
address = "[%s]" % address
else:
address = None
return address
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None):
"""
Return the assigned IP address for a given interface, if any, or [].
"""
# Extract nic if passed /dev/ethX
if '/' in iface:
iface = iface.split('/')[-1]
if not exc_list:
exc_list = []
try: try:
iface_addrs = netifaces.ifaddresses(iface) inet_num = getattr(netifaces, inet_type)
if netifaces.AF_INET6 not in iface_addrs: except AttributeError:
raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) raise Exception('Unknown inet type ' + str(inet_type))
addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] interfaces = netifaces.interfaces()
ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') if inc_aliases:
and config('vip') != a['addr']] ifaces = []
if not ipv6_addr: for _iface in interfaces:
raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) if iface == _iface or _iface.split(':')[0] == iface:
ifaces.append(_iface)
if fatal and not ifaces:
raise Exception("Invalid interface '%s'" % iface)
ifaces.sort()
else:
if iface not in interfaces:
if fatal:
raise Exception("%s not found " % (iface))
else:
return []
else:
ifaces = [iface]
return ipv6_addr[0] addresses = []
for netiface in ifaces:
net_info = netifaces.ifaddresses(netiface)
if inet_num in net_info:
for entry in net_info[inet_num]:
if 'addr' in entry and entry['addr'] not in exc_list:
addresses.append(entry['addr'])
if fatal and not addresses:
raise Exception("Interface '%s' doesn't have any %s addresses." %
(iface, inet_type))
return addresses
except ValueError: get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
raise ValueError("Invalid interface '%s'" % iface)
def get_iface_from_addr(addr):
"""Work out on which interface the provided address is configured."""
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
for inet_type in addresses:
for _addr in addresses[inet_type]:
_addr = _addr['addr']
# link local
ll_key = re.compile("(.+)%.*")
raw = re.match(ll_key, _addr)
if raw:
_addr = raw.group(1)
if _addr == addr:
log("Address '%s' is configured on iface '%s'" %
(addr, iface))
return iface
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
raise Exception(msg)
def sniff_iface(f):
"""If no iface provided, inject net iface inferred from unit private
address.
"""
def iface_sniffer(*args, **kwargs):
if not kwargs.get('iface', None):
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
return f(*args, **kwargs)
return iface_sniffer
@sniff_iface
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
dynamic_only=True):
"""Get assigned IPv6 address for a given interface.
Returns list of addresses found. If no address found, returns empty list.
If iface is None, we infer the current primary interface by doing a reverse
lookup on the unit private-address.
We currently only support scope global IPv6 addresses i.e. non-temporary
addresses. If no global IPv6 address is found, return the first one found
in the ipv6 address list.
"""
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
inc_aliases=inc_aliases, fatal=fatal,
exc_list=exc_list)
if addresses:
global_addrs = []
for addr in addresses:
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
m = re.match(key_scope_link_local, addr)
if m:
eui_64_mac = m.group(1)
iface = m.group(2)
else:
global_addrs.append(addr)
if global_addrs:
# Make sure any found global addresses are not temporary
cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd)
if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
else:
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
addrs = []
for line in out.split('\n'):
line = line.strip()
m = re.match(key, line)
if m and 'temporary' not in line:
# Return the first valid address we find
for addr in global_addrs:
if m.group(1) == addr:
if not dynamic_only or \
m.group(1).endswith(eui_64_mac):
addrs.append(addr)
if addrs:
return addrs
if fatal:
raise Exception("Interface '%s' doesn't have a scope global "
"non-temporary ipv6 address." % iface)
return []
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
"""
Return a list of bridges on the system or []
"""
b_rgex = vnic_dir + '/*/bridge'
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
"""
Return a list of nics comprising a given bridge on the system or []
"""
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
def is_bridge_member(nic):
"""
Check if a given nic is a member of a bridge
"""
for bridge in get_bridges():
if nic in get_bridge_nics(bridge):
return True
return False

View File

@@ -10,32 +10,62 @@ class OpenStackAmuletDeployment(AmuletDeployment):
that is specifically for use by OpenStack charms. that is specifically for use by OpenStack charms.
""" """
def __init__(self, series=None, openstack=None, source=None): def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment.""" """Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series) super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack self.openstack = openstack
self.source = source self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
if self.stable:
for svc in other_services:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services): def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin.""" """Add services to the deployment and set openstack-origin/source."""
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service, super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services) other_services)
name = 0
services = other_services services = other_services
services.append(this_service) services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
if self.openstack: if self.openstack:
for svc in services: for svc in services:
if svc[name] not in use_source: if svc['name'] not in use_source:
config = {'openstack-origin': self.openstack} config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config) self.d.configure(svc['name'], config)
if self.source: if self.source:
for svc in services: for svc in services:
if svc[name] in use_source: if svc['name'] in use_source:
config = {'source': self.source} config = {'source': self.source}
self.d.configure(svc[name], config) self.d.configure(svc['name'], config)
def _configure_services(self, configs): def _configure_services(self, configs):
"""Configure all of the services.""" """Configure all of the services."""

View File

@@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils):
f = opener.open("http://download.cirros-cloud.net/version/released") f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip() version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) cirros_img = "cirros-{}-x86_64-disk.img".format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(cirros_img): if not os.path.exists(local_path):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img) version, cirros_img)
opener.retrieve(cirros_url, cirros_img) opener.retrieve(cirros_url, local_path)
f.close() f.close()
with open(cirros_img) as f: with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True, image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2', disk_format='qcow2',
container_format='bare', data=f) container_format='bare', data=f)

View File

@@ -8,7 +8,6 @@ from subprocess import (
check_call check_call
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
filter_installed_packages, filter_installed_packages,
@@ -16,6 +15,7 @@ from charmhelpers.fetch import (
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
is_relation_made,
local_unit, local_unit,
log, log,
relation_get, relation_get,
@@ -25,7 +25,12 @@ from charmhelpers.core.hookenv import (
unit_get, unit_get,
unit_private_ip, unit_private_ip,
ERROR, ERROR,
INFO DEBUG
)
from charmhelpers.core.host import (
mkdir,
write_file
) )
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
@@ -38,6 +43,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
from charmhelpers.contrib.hahelpers.apache import ( from charmhelpers.contrib.hahelpers.apache import (
get_cert, get_cert,
get_ca_cert, get_ca_cert,
install_ca_cert,
) )
from charmhelpers.contrib.openstack.neutron import ( from charmhelpers.contrib.openstack.neutron import (
@@ -47,8 +53,14 @@ from charmhelpers.contrib.openstack.neutron import (
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_address_in_network, get_address_in_network,
get_ipv6_addr, get_ipv6_addr,
get_netmask_for_address,
format_ipv6_addr,
is_address_in_network
) )
from charmhelpers.contrib.openstack.utils import (
get_host_ip,
)
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@@ -168,8 +180,10 @@ class SharedDBContext(OSContextGenerator):
for rid in relation_ids('shared-db'): for rid in relation_ids('shared-db'):
for unit in related_units(rid): for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit) rdata = relation_get(rid=rid, unit=unit)
host = rdata.get('db_host')
host = format_ipv6_addr(host) or host
ctxt = { ctxt = {
'database_host': rdata.get('db_host'), 'database_host': host,
'database': self.database, 'database': self.database,
'database_user': self.user, 'database_user': self.user,
'database_password': rdata.get(password_setting), 'database_password': rdata.get(password_setting),
@@ -245,10 +259,15 @@ class IdentityServiceContext(OSContextGenerator):
for rid in relation_ids('identity-service'): for rid in relation_ids('identity-service'):
for unit in related_units(rid): for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit) rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host')
serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
ctxt = { ctxt = {
'service_port': rdata.get('service_port'), 'service_port': rdata.get('service_port'),
'service_host': rdata.get('service_host'), 'service_host': serv_host,
'auth_host': rdata.get('auth_host'), 'auth_host': auth_host,
'auth_port': rdata.get('auth_port'), 'auth_port': rdata.get('auth_port'),
'admin_tenant_name': rdata.get('service_tenant'), 'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'), 'admin_user': rdata.get('service_username'),
@@ -297,11 +316,13 @@ class AMQPContext(OSContextGenerator):
for unit in related_units(rid): for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit): if relation_get('clustered', rid=rid, unit=unit):
ctxt['clustered'] = True ctxt['clustered'] = True
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, vip = relation_get('vip', rid=rid, unit=unit)
unit=unit) vip = format_ipv6_addr(vip) or vip
ctxt['rabbitmq_host'] = vip
else: else:
ctxt['rabbitmq_host'] = relation_get('private-address', host = relation_get('private-address', rid=rid, unit=unit)
rid=rid, unit=unit) host = format_ipv6_addr(host) or host
ctxt['rabbitmq_host'] = host
ctxt.update({ ctxt.update({
'rabbitmq_user': username, 'rabbitmq_user': username,
'rabbitmq_password': relation_get('password', rid=rid, 'rabbitmq_password': relation_get('password', rid=rid,
@@ -340,8 +361,9 @@ class AMQPContext(OSContextGenerator):
and len(related_units(rid)) > 1: and len(related_units(rid)) > 1:
rabbitmq_hosts = [] rabbitmq_hosts = []
for unit in related_units(rid): for unit in related_units(rid):
rabbitmq_hosts.append(relation_get('private-address', host = relation_get('private-address', rid=rid, unit=unit)
rid=rid, unit=unit)) host = format_ipv6_addr(host) or host
rabbitmq_hosts.append(host)
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
if not context_complete(ctxt): if not context_complete(ctxt):
return {} return {}
@@ -370,6 +392,7 @@ class CephContext(OSContextGenerator):
ceph_addr = \ ceph_addr = \
relation_get('ceph-public-address', rid=rid, unit=unit) or \ relation_get('ceph-public-address', rid=rid, unit=unit) or \
relation_get('private-address', rid=rid, unit=unit) relation_get('private-address', rid=rid, unit=unit)
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
mon_hosts.append(ceph_addr) mon_hosts.append(ceph_addr)
ctxt = { ctxt = {
@@ -390,6 +413,9 @@ class CephContext(OSContextGenerator):
return ctxt return ctxt
ADDRESS_TYPES = ['admin', 'internal', 'public']
class HAProxyContext(OSContextGenerator): class HAProxyContext(OSContextGenerator):
interfaces = ['cluster'] interfaces = ['cluster']
@@ -402,25 +428,63 @@ class HAProxyContext(OSContextGenerator):
if not relation_ids('cluster'): if not relation_ids('cluster'):
return {} return {}
cluster_hosts = {}
l_unit = local_unit().replace('/', '-') l_unit = local_unit().replace('/', '-')
if config('prefer-ipv6'):
addr = get_ipv6_addr()
else:
addr = unit_get('private-address')
cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
addr)
for rid in relation_ids('cluster'): if config('prefer-ipv6'):
for unit in related_units(rid): addr = get_ipv6_addr(exc_list=[config('vip')])[0]
_unit = unit.replace('/', '-') else:
addr = relation_get('private-address', rid=rid, unit=unit) addr = get_host_ip(unit_get('private-address'))
cluster_hosts[_unit] = addr
cluster_hosts = {}
# NOTE(jamespage): build out map of configured network endpoints
# and associated backends
for addr_type in ADDRESS_TYPES:
laddr = get_address_in_network(
config('os-{}-network'.format(addr_type)))
if laddr:
cluster_hosts[laddr] = {}
cluster_hosts[laddr]['network'] = "{}/{}".format(
laddr,
get_netmask_for_address(laddr)
)
cluster_hosts[laddr]['backends'] = {}
cluster_hosts[laddr]['backends'][l_unit] = laddr
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
_laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit)
if _laddr:
cluster_hosts[laddr]['backends'][_unit] = _laddr
# NOTE(jamespage) no split configurations found, just use
# private addresses
if not cluster_hosts:
cluster_hosts[addr] = {}
cluster_hosts[addr]['network'] = "{}/{}".format(
addr,
get_netmask_for_address(addr)
)
cluster_hosts[addr]['backends'] = {}
cluster_hosts[addr]['backends'][l_unit] = addr
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
_laddr = relation_get('private-address',
rid=rid, unit=unit)
if _laddr:
cluster_hosts[addr]['backends'][_unit] = _laddr
ctxt = { ctxt = {
'units': cluster_hosts, 'frontends': cluster_hosts,
} }
if config('haproxy-server-timeout'):
ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
if config('haproxy-client-timeout'):
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
if config('prefer-ipv6'): if config('prefer-ipv6'):
ctxt['local_host'] = 'ip6-localhost' ctxt['local_host'] = 'ip6-localhost'
ctxt['haproxy_host'] = '::' ctxt['haproxy_host'] = '::'
@@ -430,12 +494,13 @@ class HAProxyContext(OSContextGenerator):
ctxt['haproxy_host'] = '0.0.0.0' ctxt['haproxy_host'] = '0.0.0.0'
ctxt['stat_port'] = ':8888' ctxt['stat_port'] = ':8888'
if len(cluster_hosts.keys()) > 1: for frontend in cluster_hosts:
# Enable haproxy when we have enough peers. if len(cluster_hosts[frontend]['backends']) > 1:
log('Ensuring haproxy enabled in /etc/default/haproxy.') # Enable haproxy when we have enough peers.
with open('/etc/default/haproxy', 'w') as out: log('Ensuring haproxy enabled in /etc/default/haproxy.')
out.write('ENABLED=1\n') with open('/etc/default/haproxy', 'w') as out:
return ctxt out.write('ENABLED=1\n')
return ctxt
log('HAProxy context is incomplete, this unit has no peers.') log('HAProxy context is incomplete, this unit has no peers.')
return {} return {}
@@ -490,22 +555,36 @@ class ApacheSSLContext(OSContextGenerator):
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
check_call(cmd) check_call(cmd)
def configure_cert(self): def configure_cert(self, cn=None):
if not os.path.isdir('/etc/apache2/ssl'):
os.mkdir('/etc/apache2/ssl')
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
if not os.path.isdir(ssl_dir): mkdir(path=ssl_dir)
os.mkdir(ssl_dir) cert, key = get_cert(cn)
cert, key = get_cert() if cn:
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: cert_filename = 'cert_{}'.format(cn)
cert_out.write(b64decode(cert)) key_filename = 'key_{}'.format(cn)
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: else:
key_out.write(b64decode(key)) cert_filename = 'cert'
key_filename = 'key'
write_file(path=os.path.join(ssl_dir, cert_filename),
content=b64decode(cert))
write_file(path=os.path.join(ssl_dir, key_filename),
content=b64decode(key))
def configure_ca(self):
ca_cert = get_ca_cert() ca_cert = get_ca_cert()
if ca_cert: if ca_cert:
with open(CA_CERT_PATH, 'w') as ca_out: install_ca_cert(b64decode(ca_cert))
ca_out.write(b64decode(ca_cert))
check_call(['update-ca-certificates']) def canonical_names(self):
'''Figure out which canonical names clients will access this service'''
cns = []
for r_id in relation_ids('identity-service'):
for unit in related_units(r_id):
rdata = relation_get(rid=r_id, unit=unit)
for k in rdata:
if k.startswith('ssl_key_'):
cns.append(k.lstrip('ssl_key_'))
return list(set(cns))
def __call__(self): def __call__(self):
if isinstance(self.external_ports, basestring): if isinstance(self.external_ports, basestring):
@@ -513,21 +592,47 @@ class ApacheSSLContext(OSContextGenerator):
if (not self.external_ports or not https()): if (not self.external_ports or not https()):
return {} return {}
self.configure_cert() self.configure_ca()
self.enable_modules() self.enable_modules()
ctxt = { ctxt = {
'namespace': self.service_namespace, 'namespace': self.service_namespace,
'private_address': unit_get('private-address'), 'endpoints': [],
'endpoints': [] 'ext_ports': []
} }
if is_clustered():
ctxt['private_address'] = config('vip') for cn in self.canonical_names():
for api_port in self.external_ports: self.configure_cert(cn)
ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port) addresses = []
portmap = (int(ext_port), int(int_port)) vips = []
ctxt['endpoints'].append(portmap) if config('vip'):
vips = config('vip').split()
for network_type in ['os-internal-network',
'os-admin-network',
'os-public-network']:
address = get_address_in_network(config(network_type),
unit_get('private-address'))
if len(vips) > 0 and is_clustered():
for vip in vips:
if is_address_in_network(config(network_type),
vip):
addresses.append((address, vip))
break
elif is_clustered():
addresses.append((address, config('vip')))
else:
addresses.append((address, address))
for address, endpoint in set(addresses):
for api_port in self.external_ports:
ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port)
portmap = (address, endpoint, int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap)
ctxt['ext_ports'].append(int(ext_port))
ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
return ctxt return ctxt
@@ -657,22 +762,22 @@ class NeutronContext(OSContextGenerator):
class OSConfigFlagContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator):
""" """
Responsible for adding user-defined config-flags in charm config to a Responsible for adding user-defined config-flags in charm config to a
template context. template context.
NOTE: the value of config-flags may be a comma-separated list of NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support key=value pairs and some Openstack config files support
comma-separated lists as values. comma-separated lists as values.
""" """
def __call__(self): def __call__(self):
config_flags = config('config-flags') config_flags = config('config-flags')
if not config_flags: if not config_flags:
return {} return {}
flags = config_flags_parser(config_flags) flags = config_flags_parser(config_flags)
return {'user_config_flags': flags} return {'user_config_flags': flags}
class SubordinateConfigContext(OSContextGenerator): class SubordinateConfigContext(OSContextGenerator):
@@ -764,7 +869,7 @@ class SubordinateConfigContext(OSContextGenerator):
else: else:
ctxt[k] = v ctxt[k] = v
log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
return ctxt return ctxt
@@ -787,3 +892,66 @@ class SyslogContext(OSContextGenerator):
'use_syslog': config('use-syslog') 'use_syslog': config('use-syslog')
} }
return ctxt return ctxt
class BindHostContext(OSContextGenerator):
def __call__(self):
if config('prefer-ipv6'):
return {
'bind_host': '::'
}
else:
return {
'bind_host': '0.0.0.0'
}
class WorkerConfigContext(OSContextGenerator):
@property
def num_cpus(self):
try:
from psutil import NUM_CPUS
except ImportError:
apt_install('python-psutil', fatal=True)
from psutil import NUM_CPUS
return NUM_CPUS
def __call__(self):
multiplier = config('worker-multiplier') or 1
ctxt = {
"workers": self.num_cpus * multiplier
}
return ctxt
class ZeroMQContext(OSContextGenerator):
interfaces = ['zeromq-configuration']
def __call__(self):
ctxt = {}
if is_relation_made('zeromq-configuration', 'host'):
for rid in relation_ids('zeromq-configuration'):
for unit in related_units(rid):
ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
ctxt['zmq_host'] = relation_get('host', unit, rid)
return ctxt
class NotificationDriverContext(OSContextGenerator):
def __init__(self, zmq_relation='zeromq-configuration', amqp_relation='amqp'):
"""
:param zmq_relation : Name of Zeromq relation to check
"""
self.zmq_relation = zmq_relation
self.amqp_relation = amqp_relation
def __call__(self):
ctxt = {
'notifications': 'False',
}
if is_relation_made(self.amqp_relation):
ctxt['notifications'] = "True"
return ctxt

View File

@@ -66,7 +66,7 @@ def resolve_address(endpoint_type=PUBLIC):
resolved_address = vip resolved_address = vip
else: else:
if config('prefer-ipv6'): if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr() fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
else: else:
fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
resolved_address = get_address_in_network( resolved_address = get_address_in_network(

View File

@@ -2,8 +2,10 @@
# Common python helper functions used for OpenStack charms. # Common python helper functions used for OpenStack charms.
from collections import OrderedDict from collections import OrderedDict
from functools import wraps
import subprocess import subprocess
import json
import os import os
import socket import socket
import sys import sys
@@ -13,7 +15,9 @@ from charmhelpers.core.hookenv import (
log as juju_log, log as juju_log,
charm_dir, charm_dir,
ERROR, ERROR,
INFO INFO,
relation_ids,
relation_set
) )
from charmhelpers.contrib.storage.linux.lvm import ( from charmhelpers.contrib.storage.linux.lvm import (
@@ -22,6 +26,10 @@ from charmhelpers.contrib.storage.linux.lvm import (
remove_lvm_physical_volume, remove_lvm_physical_volume,
) )
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
from charmhelpers.core.host import lsb_release, mounts, umount from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install, apt_cache from charmhelpers.fetch import apt_install, apt_cache
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
@@ -70,6 +78,9 @@ SWIFT_CODENAMES = OrderedDict([
('1.13.0', 'icehouse'), ('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'), ('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'), ('1.11.0', 'icehouse'),
('2.0.0', 'juno'),
('2.1.0', 'juno'),
('2.2.0', 'juno'),
]) ])
DEFAULT_LOOPBACK_SIZE = '5G' DEFAULT_LOOPBACK_SIZE = '5G'
@@ -456,3 +467,44 @@ def get_hostname(address, fqdn=True):
return result return result
else: else:
return result.split('.')[0] return result.split('.')[0]
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False)
kwargs = {'database': database,
'username': database_user,
'hostname': json.dumps(hosts)}
if relation_prefix:
keys = kwargs.keys()
for key in keys:
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
del kwargs[key]
for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs)
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap

View File

@@ -113,7 +113,7 @@ def get_osds(service):
return None return None
def create_pool(service, name, replicas=2): def create_pool(service, name, replicas=3):
''' Create a new RADOS pool ''' ''' Create a new RADOS pool '''
if pool_exists(service, name): if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name), log("Ceph pool {} already exists, skipping creation".format(name),
@@ -300,7 +300,8 @@ def copy_files(src, dst, symlinks=False, ignore=None):
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]): blk_device, fstype, system_services=[],
replicas=3):
""" """
NOTE: This function must only be called from a single service unit for NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur. the same rbd_img otherwise data loss will occur.
@@ -317,7 +318,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
# Ensure pool, RBD image, RBD mappings are in place. # Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool): if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool)) log('ceph: Creating new pool {}.'.format(pool))
create_pool(service, pool) create_pool(service, pool, replicas=replicas)
if not rbd_exists(service, pool, rbd_img): if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img)) log('ceph: Creating RBD image ({}).'.format(rbd_img))

View File

@@ -156,12 +156,15 @@ def hook_name():
class Config(dict): class Config(dict):
"""A Juju charm config dictionary that can write itself to """A dictionary representation of the charm's config.yaml, with some
disk (as json) and track which values have changed since extra features:
the previous hook invocation.
Do not instantiate this object directly - instead call - See which values in the dictionary have changed since the previous hook.
``hookenv.config()`` - For values that have changed, see what the previous value was.
- Store arbitrary data for use in a later hook.
NOTE: Do not instantiate this object directly - instead call
``hookenv.config()``, which will return an instance of :class:`Config`.
Example usage:: Example usage::
@@ -170,8 +173,8 @@ class Config(dict):
>>> config = hookenv.config() >>> config = hookenv.config()
>>> config['foo'] >>> config['foo']
'bar' 'bar'
>>> # store a new key/value for later use
>>> config['mykey'] = 'myval' >>> config['mykey'] = 'myval'
>>> config.save()
>>> # user runs `juju set mycharm foo=baz` >>> # user runs `juju set mycharm foo=baz`
@@ -188,22 +191,40 @@ class Config(dict):
>>> # keys/values that we add are preserved across hooks >>> # keys/values that we add are preserved across hooks
>>> config['mykey'] >>> config['mykey']
'myval' 'myval'
>>> # don't forget to save at the end of hook!
>>> config.save()
""" """
CONFIG_FILE_NAME = '.juju-persistent-config' CONFIG_FILE_NAME = '.juju-persistent-config'
def __init__(self, *args, **kw): def __init__(self, *args, **kw):
super(Config, self).__init__(*args, **kw) super(Config, self).__init__(*args, **kw)
self.implicit_save = True
self._prev_dict = None self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path): if os.path.exists(self.path):
self.load_previous() self.load_previous()
def __getitem__(self, key):
"""For regular dict lookups, check the current juju config first,
then the previous (saved) copy. This ensures that user-saved values
will be returned by a dict lookup.
"""
try:
return dict.__getitem__(self, key)
except KeyError:
return (self._prev_dict or {})[key]
def keys(self):
prev_keys = []
if self._prev_dict is not None:
prev_keys = self._prev_dict.keys()
return list(set(prev_keys + dict.keys(self)))
def load_previous(self, path=None): def load_previous(self, path=None):
"""Load previous copy of config from disk so that current values """Load previous copy of config from disk.
can be compared to previous values.
In normal usage you don't need to call this method directly - it
is called automatically at object initialization.
:param path: :param path:
@@ -218,8 +239,8 @@ class Config(dict):
self._prev_dict = json.load(f) self._prev_dict = json.load(f)
def changed(self, key): def changed(self, key):
"""Return true if the value for this key has changed since """Return True if the current value for this key is different from
the last save. the previous value.
""" """
if self._prev_dict is None: if self._prev_dict is None:
@@ -228,7 +249,7 @@ class Config(dict):
def previous(self, key): def previous(self, key):
"""Return previous value for this key, or None if there """Return previous value for this key, or None if there
is no "previous" value. is no previous value.
""" """
if self._prev_dict: if self._prev_dict:
@@ -238,7 +259,13 @@ class Config(dict):
def save(self): def save(self):
"""Save this config to disk. """Save this config to disk.
Preserves items in _prev_dict that do not exist in self. If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
""" """
if self._prev_dict: if self._prev_dict:
@@ -465,9 +492,10 @@ class Hooks(object):
hooks.execute(sys.argv) hooks.execute(sys.argv)
""" """
def __init__(self): def __init__(self, config_save=True):
super(Hooks, self).__init__() super(Hooks, self).__init__()
self._hooks = {} self._hooks = {}
self._config_save = config_save
def register(self, name, function): def register(self, name, function):
"""Register a hook""" """Register a hook"""
@@ -478,6 +506,10 @@ class Hooks(object):
hook_name = os.path.basename(args[0]) hook_name = os.path.basename(args[0])
if hook_name in self._hooks: if hook_name in self._hooks:
self._hooks[hook_name]() self._hooks[hook_name]()
if self._config_save:
cfg = config()
if cfg.implicit_save:
cfg.save()
else: else:
raise UnregisteredHookError(hook_name) raise UnregisteredHookError(hook_name)

View File

@@ -6,13 +6,13 @@
# Matthew Wedgwood <matthew.wedgwood@canonical.com> # Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os import os
import re
import pwd import pwd
import grp import grp
import random import random
import string import string
import subprocess import subprocess
import hashlib import hashlib
import shutil
from contextlib import contextmanager from contextlib import contextmanager
from collections import OrderedDict from collections import OrderedDict
@@ -68,8 +68,8 @@ def service_available(service_name):
"""Determine whether a system service is available""" """Determine whether a system service is available"""
try: try:
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError: except subprocess.CalledProcessError as e:
return False return 'unrecognized service' not in e.output
else: else:
return True return True
@@ -209,10 +209,15 @@ def mounts():
return system_mounts return system_mounts
def file_hash(path): def file_hash(path, hash_type='md5'):
"""Generate a md5 hash of the contents of 'path' or None if not found """ """
Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
if os.path.exists(path): if os.path.exists(path):
h = hashlib.md5() h = getattr(hashlib, hash_type)()
with open(path, 'r') as source: with open(path, 'r') as source:
h.update(source.read()) # IGNORE:E1101 - it does have update h.update(source.read()) # IGNORE:E1101 - it does have update
return h.hexdigest() return h.hexdigest()
@@ -220,6 +225,26 @@ def file_hash(path):
return None return None
def check_hash(path, checksum, hash_type='md5'):
"""
Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
class ChecksumError(ValueError):
pass
def restart_on_change(restart_map, stopstart=False): def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing """Restart services based on configuration files changing
@@ -292,7 +317,13 @@ def list_nics(nic_type):
ip_output = (line for line in ip_output if line) ip_output = (line for line in ip_output if line)
for line in ip_output: for line in ip_output:
if line.split()[1].startswith(int_type): if line.split()[1].startswith(int_type):
interfaces.append(line.split()[1].replace(":", "")) matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
return interfaces return interfaces

View File

@@ -1,2 +1,2 @@
from .base import * from .base import * # NOQA
from .helpers import * from .helpers import * # NOQA

View File

@@ -118,6 +118,9 @@ class ServiceManager(object):
else: else:
self.provide_data() self.provide_data()
self.reconfigure_services() self.reconfigure_services()
cfg = hookenv.config()
if cfg.implicit_save:
cfg.save()
def provide_data(self): def provide_data(self):
""" """

View File

@@ -1,3 +1,5 @@
import os
import yaml
from charmhelpers.core import hookenv from charmhelpers.core import hookenv
from charmhelpers.core import templating from charmhelpers.core import templating
@@ -19,15 +21,21 @@ class RelationContext(dict):
the `name` attribute that are complete will used to populate the dictionary the `name` attribute that are complete will used to populate the dictionary
values (see `get_data`, below). values (see `get_data`, below).
The generated context will be namespaced under the interface type, to prevent The generated context will be namespaced under the relation :attr:`name`,
potential naming conflicts. to prevent potential naming conflicts.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
""" """
name = None name = None
interface = None interface = None
required_keys = [] required_keys = []
def __init__(self, *args, **kwargs): def __init__(self, name=None, additional_required_keys=None):
super(RelationContext, self).__init__(*args, **kwargs) if name is not None:
self.name = name
if additional_required_keys is not None:
self.required_keys.extend(additional_required_keys)
self.get_data() self.get_data()
def __bool__(self): def __bool__(self):
@@ -101,9 +109,115 @@ class RelationContext(dict):
return {} return {}
class MysqlRelation(RelationContext):
"""
Relation context for the `mysql` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'db'
interface = 'mysql'
required_keys = ['host', 'user', 'password', 'database']
class HttpRelation(RelationContext):
"""
Relation context for the `http` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'website'
interface = 'http'
required_keys = ['host', 'port']
def provide_data(self):
return {
'host': hookenv.unit_get('private-address'),
'port': 80,
}
class RequiredConfig(dict):
"""
Data context that loads config options with one or more mandatory options.
Once the required options have been changed from their default values, all
config options will be available, namespaced under `config` to prevent
potential naming conflicts (for example, between a config option and a
relation property).
:param list *args: List of options that must be changed from their default values.
"""
def __init__(self, *args):
self.required_options = args
self['config'] = hookenv.config()
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
self.config = yaml.load(fp).get('options', {})
def __bool__(self):
for option in self.required_options:
if option not in self['config']:
return False
current_value = self['config'][option]
default_value = self.config[option].get('default')
if current_value == default_value:
return False
if current_value in (None, '') and default_value in (None, ''):
return False
return True
def __nonzero__(self):
return self.__bool__()
class StoredContext(dict):
"""
A data context that always returns the data that it was first created with.
This is useful to do a one-time generation of things like passwords, that
will thereafter use the same value that was originally generated, instead
of generating a new value each time it is run.
"""
def __init__(self, file_name, config_data):
"""
If the file exists, populate `self` with the data from the file.
Otherwise, populate with the given data and persist it to the file.
"""
if os.path.exists(file_name):
self.update(self.read_context(file_name))
else:
self.store_context(file_name, config_data)
self.update(config_data)
def store_context(self, file_name, config_data):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'w') as file_stream:
os.fchmod(file_stream.fileno(), 0600)
yaml.dump(config_data, file_stream)
def read_context(self, file_name):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'r') as file_stream:
data = yaml.load(file_stream)
if not data:
raise OSError("%s is empty" % file_name)
return data
class TemplateCallback(ManagerCallback): class TemplateCallback(ManagerCallback):
""" """
Callback class that will render a template, for use as a ready action. Callback class that will render a Jinja2 template, for use as a ready action.
:param str source: The template source file, relative to `$CHARM_DIR/templates`
:param str target: The target to write the rendered template to
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
""" """
def __init__(self, source, target, owner='root', group='root', perms=0444): def __init__(self, source, target, owner='root', group='root', perms=0444):
self.source = source self.source = source

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import yaml
from subprocess import check_call
from charmhelpers.core.hookenv import (
log,
DEBUG,
)
def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 }
:type sysctl_dict: dict
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:returns: None
"""
sysctl_dict = yaml.load(sysctl_dict)
with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict.items():
fd.write("{}={}\n".format(key, value))
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict),
level=DEBUG)
check_call(["sysctl", "-p", sysctl_file])

View File

@@ -72,6 +72,7 @@ CLOUD_ARCHIVE_POCKETS = {
FETCH_HANDLERS = ( FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
) )
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
@@ -208,7 +209,8 @@ def add_source(source, key=None):
"""Add a package source to this system. """Add a package source to this system.
@param source: a URL or sources.list entry, as supported by @param source: a URL or sources.list entry, as supported by
add-apt-repository(1). Examples: add-apt-repository(1). Examples::
ppa:charmers/example ppa:charmers/example
deb https://stub:key@private.example.com/ubuntu trusty main deb https://stub:key@private.example.com/ubuntu trusty main
@@ -217,6 +219,7 @@ def add_source(source, key=None):
pocket for the release. pocket for the release.
'cloud:' may be used to activate official cloud archive pockets, 'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse' such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used @param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an to verify the signatures on packages. Ideally, this should be an
@@ -250,6 +253,8 @@ def add_source(source, key=None):
release = lsb_release()['DISTRIB_CODENAME'] release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release)) apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else: else:
raise SourceConfigError("Unknown source: {!r}".format(source)) raise SourceConfigError("Unknown source: {!r}".format(source))
@@ -311,22 +316,35 @@ def configure_sources(update=False,
apt_update(fatal=True) apt_update(fatal=True)
def install_remote(source): def install_remote(source, *args, **kwargs):
""" """
Install a file tree from a remote source Install a file tree from a remote source
The specified source should be a url of the form: The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]] scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules Schemes supported are based on this modules submodules.
Options supported are submodule-specific""" Options supported are submodule-specific.
Additional arguments are passed through to the submodule.
For example::
dest = install_remote('http://example.com/archive.tgz',
checksum='deadbeef',
hash_type='sha1')
This will download `archive.tgz`, validate it using SHA1 and, if
the file is ok, extract it and return the directory in which it
was extracted. If the checksum fails, it will raise
:class:`charmhelpers.core.host.ChecksumError`.
"""
# We ONLY check for True here because can_handle may return a string # We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source. # explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True] handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None installed_to = None
for handler in handlers: for handler in handlers:
try: try:
installed_to = handler.install(source) installed_to = handler.install(source, *args, **kwargs)
except UnhandledSource: except UnhandledSource:
pass pass
if not installed_to: if not installed_to:

View File

@@ -1,6 +1,8 @@
import os import os
import urllib2 import urllib2
from urllib import urlretrieve
import urlparse import urlparse
import hashlib
from charmhelpers.fetch import ( from charmhelpers.fetch import (
BaseFetchHandler, BaseFetchHandler,
@@ -10,11 +12,19 @@ from charmhelpers.payload.archive import (
get_archive_handler, get_archive_handler,
extract, extract,
) )
from charmhelpers.core.host import mkdir from charmhelpers.core.host import mkdir, check_hash
class ArchiveUrlFetchHandler(BaseFetchHandler): class ArchiveUrlFetchHandler(BaseFetchHandler):
"""Handler for archives via generic URLs""" """
Handler to download archive files from arbitrary URLs.
Can fetch from http, https, ftp, and file URLs.
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
Installs the contents of the archive in $CHARM_DIR/fetched/.
"""
def can_handle(self, source): def can_handle(self, source):
url_parts = self.parse_url(source) url_parts = self.parse_url(source)
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
@@ -24,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
return False return False
def download(self, source, dest): def download(self, source, dest):
"""
Download an archive file.
:param str source: URL pointing to an archive file.
:param str dest: Local path location to download archive file to.
"""
# propogate all exceptions # propogate all exceptions
# URLError, OSError, etc # URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source) proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
@@ -48,7 +64,30 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
os.unlink(dest) os.unlink(dest)
raise e raise e
def install(self, source): # Mandatory file validation via Sha1 or MD5 hashing.
def download_and_validate(self, url, hashsum, validate="sha1"):
tempfile, headers = urlretrieve(url)
check_hash(tempfile, hashsum, validate)
return tempfile
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
"""
Download and install an archive file, with optional checksum validation.
The checksum can also be given on the `source` URL's fragment.
For example::
handler.install('http://example.com/file.tgz#sha1=deadbeef')
:param str source: URL pointing to an archive file.
:param str dest: Local destination path to install to. If not given,
installs to `$CHARM_DIR/archives/archive_file_name`.
:param str checksum: If given, validate the archive file after download.
:param str hash_type: Algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
url_parts = self.parse_url(source) url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir): if not os.path.exists(dest_dir):
@@ -60,4 +99,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
raise UnhandledSource(e.reason) raise UnhandledSource(e.reason)
except OSError as e: except OSError as e:
raise UnhandledSource(e.strerror) raise UnhandledSource(e.strerror)
return extract(dld_file) options = urlparse.parse_qs(url_parts.fragment)
for key, value in options.items():
if key in hashlib.algorithms:
check_hash(dld_file, value, key)
if checksum:
check_hash(dld_file, checksum, hash_type)
return extract(dld_file, dest)

View File

@@ -0,0 +1,44 @@
import os
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.core.host import mkdir
try:
from git import Repo
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-git")
from git import Repo
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
#TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git'):
return False
else:
return True
def clone(self, source, dest, branch):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
repo = Repo.clone_from(source, dest)
repo.git.checkout(branch)
def install(self, source, branch="master"):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try:
self.clone(source, dest_dir, branch)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

1
hooks/ha-relation-changed Symbolic link
View File

@@ -0,0 +1 @@
ceilometer_hooks.py

1
hooks/ha-relation-joined Symbolic link
View File

@@ -0,0 +1 @@
ceilometer_hooks.py

View File

@@ -21,3 +21,9 @@ requires:
interface: rabbitmq interface: rabbitmq
identity-service: identity-service:
interface: keystone interface: keystone
ha:
interface: hacluster
scope: container
peers:
cluster:
interface: ceilometer-ha

View File

@@ -42,13 +42,15 @@ class CeilometerHooksTest(CharmTestCase):
ceilometer_utils.CEILOMETER_PACKAGES ceilometer_utils.CEILOMETER_PACKAGES
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'precise'} self.lsb_release.return_value = {'DISTRIB_CODENAME': 'precise'}
def test_configure_source(self): @patch("charmhelpers.core.hookenv.config")
def test_configure_source(self, mcok_config):
self.test_config.set('openstack-origin', 'cloud:precise-havana') self.test_config.set('openstack-origin', 'cloud:precise-havana')
hooks.hooks.execute(['hooks/install']) hooks.hooks.execute(['hooks/install'])
self.configure_installation_source.\ self.configure_installation_source.\
assert_called_with('cloud:precise-havana') assert_called_with('cloud:precise-havana')
def test_install_hook_precise(self): @patch("charmhelpers.core.hookenv.config")
def test_install_hook_precise(self, mock_config):
hooks.hooks.execute(['hooks/install']) hooks.hooks.execute(['hooks/install'])
self.configure_installation_source.\ self.configure_installation_source.\
assert_called_with('cloud:precise-grizzly') assert_called_with('cloud:precise-grizzly')
@@ -59,7 +61,8 @@ class CeilometerHooksTest(CharmTestCase):
fatal=True fatal=True
) )
def test_install_hook_distro(self): @patch("charmhelpers.core.hookenv.config")
def test_install_hook_distro(self, mock_config):
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'saucy'} self.lsb_release.return_value = {'DISTRIB_CODENAME': 'saucy'}
hooks.hooks.execute(['hooks/install']) hooks.hooks.execute(['hooks/install'])
self.configure_installation_source.\ self.configure_installation_source.\
@@ -71,32 +74,37 @@ class CeilometerHooksTest(CharmTestCase):
fatal=True fatal=True
) )
def test_amqp_joined(self): @patch("charmhelpers.core.hookenv.config")
def test_amqp_joined(self, mock_config):
hooks.hooks.execute(['hooks/amqp-relation-joined']) hooks.hooks.execute(['hooks/amqp-relation-joined'])
self.relation_set.assert_called_with( self.relation_set.assert_called_with(
username=self.test_config.get('rabbit-user'), username=self.test_config.get('rabbit-user'),
vhost=self.test_config.get('rabbit-vhost')) vhost=self.test_config.get('rabbit-vhost'))
def test_db_joined(self): @patch("charmhelpers.core.hookenv.config")
def test_db_joined(self, mock_config):
hooks.hooks.execute(['hooks/shared-db-relation-joined']) hooks.hooks.execute(['hooks/shared-db-relation-joined'])
self.relation_set.assert_called_with( self.relation_set.assert_called_with(
ceilometer_database='ceilometer') ceilometer_database='ceilometer')
@patch("charmhelpers.core.hookenv.config")
@patch.object(hooks, 'ceilometer_joined') @patch.object(hooks, 'ceilometer_joined')
def test_any_changed(self, joined): def test_any_changed(self, joined, mock_config):
hooks.hooks.execute(['hooks/shared-db-relation-changed']) hooks.hooks.execute(['hooks/shared-db-relation-changed'])
self.assertTrue(self.CONFIGS.write_all.called) self.assertTrue(self.CONFIGS.write_all.called)
self.assertTrue(joined.called) self.assertTrue(joined.called)
@patch("charmhelpers.core.hookenv.config")
@patch.object(hooks, 'install') @patch.object(hooks, 'install')
@patch.object(hooks, 'any_changed') @patch.object(hooks, 'any_changed')
def test_upgrade_charm(self, changed, install): def test_upgrade_charm(self, changed, install, mock_config):
hooks.hooks.execute(['hooks/upgrade-charm']) hooks.hooks.execute(['hooks/upgrade-charm'])
self.assertTrue(changed.called) self.assertTrue(changed.called)
self.assertTrue(install.called) self.assertTrue(install.called)
@patch("charmhelpers.core.hookenv.config")
@patch.object(hooks, 'ceilometer_joined') @patch.object(hooks, 'ceilometer_joined')
def test_config_changed_no_upgrade(self, joined): def test_config_changed_no_upgrade(self, joined, mock_config):
self.openstack_upgrade_available.return_value = False self.openstack_upgrade_available.return_value = False
hooks.hooks.execute(['hooks/config-changed']) hooks.hooks.execute(['hooks/config-changed'])
self.openstack_upgrade_available.\ self.openstack_upgrade_available.\
@@ -105,8 +113,9 @@ class CeilometerHooksTest(CharmTestCase):
self.assertTrue(self.CONFIGS.write_all.called) self.assertTrue(self.CONFIGS.write_all.called)
self.assertTrue(joined.called) self.assertTrue(joined.called)
@patch("charmhelpers.core.hookenv.config")
@patch.object(hooks, 'ceilometer_joined') @patch.object(hooks, 'ceilometer_joined')
def test_config_changed_upgrade(self, joined): def test_config_changed_upgrade(self, joined, mock_config):
self.openstack_upgrade_available.return_value = True self.openstack_upgrade_available.return_value = True
hooks.hooks.execute(['hooks/config-changed']) hooks.hooks.execute(['hooks/config-changed'])
self.openstack_upgrade_available.\ self.openstack_upgrade_available.\
@@ -115,7 +124,8 @@ class CeilometerHooksTest(CharmTestCase):
self.assertTrue(self.CONFIGS.write_all.called) self.assertTrue(self.CONFIGS.write_all.called)
self.assertTrue(joined.called) self.assertTrue(joined.called)
def test_keystone_joined(self): @patch("charmhelpers.core.hookenv.config")
def test_keystone_joined(self, mock_config):
self.canonical_url.return_value = "http://thishost" self.canonical_url.return_value = "http://thishost"
self.test_config.set('region', 'myregion') self.test_config.set('region', 'myregion')
hooks.hooks.execute(['hooks/identity-service-relation-joined']) hooks.hooks.execute(['hooks/identity-service-relation-joined'])
@@ -126,7 +136,8 @@ class CeilometerHooksTest(CharmTestCase):
requested_roles=hooks.CEILOMETER_ROLE, requested_roles=hooks.CEILOMETER_ROLE,
region='myregion', relation_id=None) region='myregion', relation_id=None)
def test_ceilometer_joined(self): @patch("charmhelpers.core.hookenv.config")
def test_ceilometer_joined(self, mock_config):
self.relation_ids.return_value = ['ceilometer:0'] self.relation_ids.return_value = ['ceilometer:0']
self.get_ceilometer_context.return_value = {'test': 'data'} self.get_ceilometer_context.return_value = {'test': 'data'}
hooks.hooks.execute(['hooks/ceilometer-service-relation-joined']) hooks.hooks.execute(['hooks/ceilometer-service-relation-joined'])

View File

@@ -45,6 +45,7 @@ class CeilometerUtilsTest(CharmTestCase):
'ceilometer-agent-central', 'ceilometer-agent-central',
'ceilometer-collector', 'ceilometer-collector',
'ceilometer-api'], 'ceilometer-api'],
'/etc/haproxy/haproxy.cfg': ['haproxy'],
"/etc/apache2/sites-available/openstack_https_frontend": [ "/etc/apache2/sites-available/openstack_https_frontend": [
'apache2'], 'apache2'],
"/etc/apache2/sites-available/openstack_https_frontend.conf": [ "/etc/apache2/sites-available/openstack_https_frontend.conf": [