Redux to use agree structure and OS templating

This commit is contained in:
James Page 2013-07-19 10:46:25 +01:00
parent f3ed95e621
commit 62a5561e69
39 changed files with 1169 additions and 1311 deletions

1
.bzrignore Normal file
View File

@ -0,0 +1 @@
.coverage

6
.coveragerc Normal file
View File

@ -0,0 +1,6 @@
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
if __name__ == .__main__.:
include=
hooks/quantum_*

View File

@ -4,6 +4,6 @@
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/quantum-gateway/hooks</path>
<path>/quantum-gateway/templates</path>
<path>/quantum-gateway/unit_tests</path>
</pydev_pathproperty>
</pydev_project>

View File

@ -1,8 +1,14 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers unit_tests
@charm proof
test:
@echo Starting tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
sync:
@charm-helper-sync -c charm-helpers-sync.yaml

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -0,0 +1,58 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -1,196 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from utils import (
relation_ids,
relation_list,
relation_get,
render_template,
juju_log,
config_get,
install,
get_host_ip,
restart
)
from cluster_utils import https
import os
import subprocess
from base64 import b64decode
APACHE_SITE_DIR = "/etc/apache2/sites-available"
SITE_TEMPLATE = "apache2_site.tmpl"
RELOAD_CHECK = "To activate the new configuration"
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
juju_log('INFO',
"Inspecting identity-service relations for SSL certificate.")
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
juju_log('INFO',
"Inspecting identity-service relations for CA SSL certificate.")
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])
def enable_https(port_maps, namespace, cert, key, ca_cert=None):
'''
For a given number of port mappings, configures apache2
HTTPs local reverse proxying using certficates and keys provided in
either configuration data (preferred) or relation data. Assumes ports
are not in use (calling charm should ensure that).
port_maps: dict: external to internal port mappings
namespace: str: name of charm
'''
def _write_if_changed(path, new_content):
content = None
if os.path.exists(path):
with open(path, 'r') as f:
content = f.read().strip()
if content != new_content:
with open(path, 'w') as f:
f.write(new_content)
return True
else:
return False
juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps))
http_restart = False
if cert:
cert = b64decode(cert)
if key:
key = b64decode(key)
if ca_cert:
ca_cert = b64decode(ca_cert)
if not cert and not key:
juju_log('ERROR',
"Expected but could not find SSL certificate data, not "
"configuring HTTPS!")
return False
install('apache2')
if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl',
'proxy', 'proxy_http']):
http_restart = True
ssl_dir = os.path.join('/etc/apache2/ssl', namespace)
if not os.path.exists(ssl_dir):
os.makedirs(ssl_dir)
if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)):
http_restart = True
if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)):
http_restart = True
os.chmod(os.path.join(ssl_dir, 'key'), 0600)
install_ca_cert(ca_cert)
sites_dir = '/etc/apache2/sites-available'
for ext_port, int_port in port_maps.items():
juju_log('INFO',
'Creating apache2 reverse proxy vhost'
' for {}:{}'.format(ext_port,
int_port))
site = "{}_{}".format(namespace, ext_port)
site_path = os.path.join(sites_dir, site)
with open(site_path, 'w') as fsite:
context = {
"ext": ext_port,
"int": int_port,
"namespace": namespace,
"private_address": get_host_ip()
}
fsite.write(render_template(SITE_TEMPLATE,
context))
if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]):
http_restart = True
if http_restart:
restart('apache2')
return True
def disable_https(port_maps, namespace):
'''
Ensure HTTPS reverse proxying is disables for given port mappings
port_maps: dict: of ext -> int port mappings
namespace: str: name of chamr
'''
juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))
if (not os.path.exists('/etc/apache2') or
not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
return
http_restart = False
for ext_port in port_maps.keys():
if os.path.exists(os.path.join(APACHE_SITE_DIR,
"{}_{}".format(namespace,
ext_port))):
juju_log('INFO',
"Disabling HTTPS reverse proxy"
" for {} {}.".format(namespace,
ext_port))
if (RELOAD_CHECK in
subprocess.check_output(['a2dissite',
'{}_{}'.format(namespace,
ext_port)])):
http_restart = True
if http_restart:
restart(['apache2'])
def setup_https(port_maps, namespace, cert, key, ca_cert=None):
'''
Ensures HTTPS is either enabled or disabled for given port
mapping.
port_maps: dict: of ext -> int port mappings
namespace: str: name of charm
'''
if not https:
disable_https(port_maps, namespace)
else:
enable_https(port_maps, namespace, cert, key, ca_cert)

View File

@ -9,10 +9,31 @@
#
import commands
import subprocess
import os
import shutil
import utils
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
INFO,
)
from charmhelpers.core.host import (
apt_install,
mount,
mounts,
service_start,
service_stop,
umount,
)
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
KEYFILE = '/etc/ceph/ceph.client.%s.key'
@ -24,23 +45,30 @@ CEPH_CONF = """[global]
"""
def execute(cmd):
subprocess.check_call(cmd)
def execute_shell(cmd):
subprocess.check_call(cmd, shell=True)
def running(service):
# this local util can be dropped as soon the following branch lands
# in lp:charm-helpers
# https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
try:
output = check_output(['service', service, 'status'])
except CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def install():
ceph_dir = "/etc/ceph"
if not os.path.isdir(ceph_dir):
os.mkdir(ceph_dir)
utils.install('ceph-common')
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %\
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
(service, pool))
return rbd_img in out
@ -56,8 +84,8 @@ def create_rbd_image(service, pool, image, sizemb):
service,
'--pool',
pool
]
execute(cmd)
]
check_call(cmd)
def pool_exists(service, name):
@ -72,8 +100,8 @@ def create_pool(service, name):
service,
'mkpool',
name
]
execute(cmd)
]
check_call(cmd)
def keyfile_path(service):
@ -87,35 +115,34 @@ def keyring_path(service):
def create_keyring(service, key):
keyring = keyring_path(service)
if os.path.exists(keyring):
utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
log('ceph: Keyring exists at %s.' % keyring, level=INFO)
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.%s' % service,
'--add-key=%s' % key
]
execute(cmd)
utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
# create a file containing the key
keyfile = keyfile_path(service)
if os.path.exists(keyfile):
utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
fd = open(keyfile, 'w')
fd.write(key)
fd.close()
utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
hosts = []
for r_id in utils.relation_ids('ceph'):
for unit in utils.relation_list(r_id):
hosts.append(utils.relation_get('private-address',
unit=unit, rid=r_id))
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
@ -144,26 +171,24 @@ def map_block_storage(service, pool, image):
service,
'--secret',
keyfile_path(service),
]
execute(cmd)
]
check_call(cmd)
def filesystem_mounted(fs):
return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
return fs in [f for m, f in mounts()]
def make_filesystem(blk_device, fstype='ext4'):
utils.juju_log('INFO',
'ceph: Formatting block device %s as filesystem %s.' %\
(blk_device, fstype))
log('ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
cmd = ['mkfs', '-t', fstype, blk_device]
execute(cmd)
check_call(cmd)
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
# mount block device into /mnt
cmd = ['mount', '-t', fstype, blk_device, '/mnt']
execute(cmd)
mount(blk_device, '/mnt')
# copy data to /mnt
try:
@ -172,29 +197,27 @@ def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
pass
# umount block device
cmd = ['umount', '/mnt']
execute(cmd)
umount('/mnt')
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
cmd = ['mount', '-t', fstype, blk_device, data_src_dst]
execute(cmd)
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
execute(cmd)
check_call(cmd)
# TODO: re-use
def modprobe_kernel_module(module):
utils.juju_log('INFO', 'Loading kernel module')
log('ceph: Loading kernel module', level=INFO)
cmd = ['modprobe', module]
execute(cmd)
check_call(cmd)
cmd = 'echo %s >> /etc/modules' % module
execute_shell(cmd)
check_call(cmd, shell=True)
def copy_files(src, dst, symlinks=False, ignore=None):
@ -222,15 +245,15 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
log('ceph: Creating new pool %s.' % pool, level=INFO)
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
map_block_storage(service, pool, rbd_img)
# make file system
@ -244,13 +267,12 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
make_filesystem(blk_device, fstype)
for svc in system_services:
if utils.running(svc):
utils.juju_log('INFO',
'Stopping services %s prior to migrating '\
'data' % svc)
utils.stop(svc)
if running(svc):
log('Stopping services %s prior to migrating data.' % svc,
level=INFO)
service_stop(svc)
place_data_on_ceph(service, blk_device, mount_point, fstype)
for svc in system_services:
utils.start(svc)
service_start(svc)

View File

@ -1,24 +1,26 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from utils import (
juju_log,
relation_ids,
relation_list,
relation_get,
get_unit_hostname,
config_get
)
import subprocess
import os
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
ERROR,
)
class HAIncompleteConfig(Exception):
pass
@ -39,7 +41,7 @@ def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
@ -71,12 +73,12 @@ def oldest_peer(peers):
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
juju_log('INFO', 'Deferring action to CRM leader.')
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
juju_log('INFO', 'Deferring action to oldest service unit.')
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
@ -153,7 +155,7 @@ def get_hacluster_config():
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
if missing:
juju_log('Insufficient config data to configure hacluster.')
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
return conf

View File

@ -1,55 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from utils import (
relation_ids,
relation_list,
relation_get,
unit_get,
reload,
render_template
)
import os
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
HAPROXY_DEFAULT = '/etc/default/haproxy'
def configure_haproxy(service_ports):
'''
Configure HAProxy based on the current peers in the service
cluster using the provided port map:
"swift": [ 8080, 8070 ]
HAproxy will also be reloaded/started if required
service_ports: dict: dict of lists of [ frontend, backend ]
'''
cluster_hosts = {}
cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
unit_get('private-address')
for r_id in relation_ids('cluster'):
for unit in relation_list(r_id):
cluster_hosts[unit.replace('/', '-')] = \
relation_get(attribute='private-address',
rid=r_id,
unit=unit)
context = {
'units': cluster_hosts,
'service_ports': service_ports
}
with open(HAPROXY_CONF, 'w') as f:
f.write(render_template(os.path.basename(HAPROXY_CONF),
context))
with open(HAPROXY_DEFAULT, 'w') as f:
f.write('ENABLED=1')
reload('haproxy')

View File

@ -1,333 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Paul Collins <paul.collins@canonical.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import json
import os
import subprocess
import socket
import sys
def do_hooks(hooks):
hook = os.path.basename(sys.argv[0])
try:
hook_func = hooks[hook]
except KeyError:
juju_log('INFO',
"This charm doesn't know how to handle '{}'.".format(hook))
else:
hook_func()
def install(*pkgs):
cmd = [
'apt-get',
'-y',
'install'
]
for pkg in pkgs:
cmd.append(pkg)
subprocess.check_call(cmd)
TEMPLATES_DIR = 'templates'
try:
import jinja2
except ImportError:
install('python-jinja2')
import jinja2
try:
import dns.resolver
except ImportError:
install('python-dnspython')
import dns.resolver
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
CLOUD_ARCHIVE = \
""" # Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
CLOUD_ARCHIVE_POCKETS = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly'
}
def configure_source():
source = str(config_get('openstack-origin'))
if not source:
return
if source.startswith('ppa:'):
cmd = [
'add-apt-repository',
source
]
subprocess.check_call(cmd)
if source.startswith('cloud:'):
# CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
# cloud:precise-folsom/updates or cloud:precise-folsom/proposed
install('ubuntu-cloud-keyring')
pocket = source.split(':')[1]
pocket = pocket.split('-')[1]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
if source.startswith('deb'):
l = len(source.split('|'))
if l == 2:
(apt_line, key) = source.split('|')
cmd = [
'apt-key',
'adv', '--keyserver keyserver.ubuntu.com',
'--recv-keys', key
]
subprocess.check_call(cmd)
elif l == 1:
apt_line = source
with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
apt.write(apt_line + "\n")
cmd = [
'apt-get',
'update'
]
subprocess.check_call(cmd)
# Protocols
TCP = 'TCP'
UDP = 'UDP'
def expose(port, protocol='TCP'):
cmd = [
'open-port',
'{}/{}'.format(port, protocol)
]
subprocess.check_call(cmd)
def juju_log(severity, message):
cmd = [
'juju-log',
'--log-level', severity,
message
]
subprocess.check_call(cmd)
cache = {}
def cached(func):
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
@cached
def relation_ids(relation):
cmd = [
'relation-ids',
relation
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_list(rid):
cmd = [
'relation-list',
'-r', rid,
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_get(attribute, unit=None, rid=None):
cmd = [
'relation-get',
]
if rid:
cmd.append('-r')
cmd.append(rid)
cmd.append(attribute)
if unit:
cmd.append(unit)
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def relation_get_dict(relation_id=None, remote_unit=None):
"""Obtain all relation data as dict by way of JSON"""
cmd = [
'relation-get', '--format=json'
]
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
if remote_unit:
remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None)
os.environ['JUJU_REMOTE_UNIT'] = remote_unit
j = subprocess.check_output(cmd)
if remote_unit and remote_unit_orig:
os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig
d = json.loads(j)
settings = {}
# convert unicode to strings
for k, v in d.iteritems():
settings[str(k)] = str(v)
return settings
def relation_set(**kwargs):
cmd = [
'relation-set'
]
args = []
for k, v in kwargs.items():
if k == 'rid':
if v:
cmd.append('-r')
cmd.append(v)
else:
args.append('{}={}'.format(k, v))
cmd += args
subprocess.check_call(cmd)
@cached
def unit_get(attribute):
cmd = [
'unit-get',
attribute
]
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def config_get(attribute):
cmd = [
'config-get',
'--format',
'json',
]
out = subprocess.check_output(cmd).strip() # IGNORE:E1103
cfg = json.loads(out)
try:
return cfg[attribute]
except KeyError:
return None
@cached
def get_unit_hostname():
return socket.gethostname()
@cached
def get_host_ip(hostname=None):
hostname = hostname or unit_get('private-address')
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
return None
def _svc_control(service, action):
subprocess.check_call(['service', service, action])
def restart(*services):
for service in services:
_svc_control(service, 'restart')
def stop(*services):
for service in services:
_svc_control(service, 'stop')
def start(*services):
for service in services:
_svc_control(service, 'start')
def reload(*services):
for service in services:
try:
_svc_control(service, 'reload')
except subprocess.CalledProcessError:
# Reload failed - either service does not support reload
# or it was not running - restart will fixup most things
_svc_control(service, 'restart')
def running(service):
try:
output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or
"is running" in output):
return True
else:
return False
def is_relation_made(relation, key='private-address'):
for r_id in (relation_ids(relation) or []):
for unit in (relation_list(r_id) or []):
if relation_get(key, rid=r_id, unit=unit):
return True
return False

View File

@ -0,0 +1,271 @@
import os
from base64 import b64decode
from subprocess import (
check_call
)
from charmhelpers.core.hookenv import (
config,
local_unit,
log,
relation_get,
relation_ids,
related_units,
unit_get,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_api_port,
determine_haproxy_port,
https,
is_clustered,
peer_units,
)
from charmhelpers.contrib.hahelpers.apache import (
get_cert,
get_ca_cert,
)
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
class OSContextError(Exception):
pass
def context_complete(ctxt):
_missing = []
for k, v in ctxt.iteritems():
if v is None or v == '':
_missing.append(k)
if _missing:
log('Missing required data: %s' % ' '.join(_missing), level='INFO')
return False
return True
class OSContextGenerator(object):
interfaces = []
def __call__(self):
raise NotImplementedError
class SharedDBContext(OSContextGenerator):
interfaces = ['shared-db']
def __call__(self):
log('Generating template context for shared-db')
conf = config()
try:
database = conf['database']
username = conf['database-user']
except KeyError as e:
log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e)
raise OSContextError
ctxt = {}
for rid in relation_ids('shared-db'):
for unit in related_units(rid):
ctxt = {
'database_host': relation_get('db_host', rid=rid,
unit=unit),
'database': database,
'database_user': username,
'database_password': relation_get('password', rid=rid,
unit=unit)
}
if not context_complete(ctxt):
return {}
return ctxt
class IdentityServiceContext(OSContextGenerator):
interfaces = ['identity-service']
def __call__(self):
log('Generating template context for identity-service')
ctxt = {}
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
ctxt = {
'service_port': relation_get('service_port', rid=rid,
unit=unit),
'service_host': relation_get('service_host', rid=rid,
unit=unit),
'auth_host': relation_get('auth_host', rid=rid, unit=unit),
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
'admin_tenant_name': relation_get('service_tenant',
rid=rid, unit=unit),
'admin_user': relation_get('service_username', rid=rid,
unit=unit),
'admin_password': relation_get('service_password', rid=rid,
unit=unit),
# XXX: Hard-coded http.
'service_protocol': 'http',
'auth_protocol': 'http',
}
if not context_complete(ctxt):
return {}
return ctxt
class AMQPContext(OSContextGenerator):
interfaces = ['amqp']
def __call__(self):
log('Generating template context for amqp')
conf = config()
try:
username = conf['rabbit-user']
vhost = conf['rabbit-vhost']
except KeyError as e:
log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e)
raise OSContextError
ctxt = {}
for rid in relation_ids('amqp'):
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
rabbitmq_host = relation_get('vip', rid=rid, unit=unit)
else:
rabbitmq_host = relation_get('private-address',
rid=rid, unit=unit)
ctxt = {
'rabbitmq_host': rabbitmq_host,
'rabbitmq_user': username,
'rabbitmq_password': relation_get('password', rid=rid,
unit=unit),
'rabbitmq_virtual_host': vhost,
}
if not context_complete(ctxt):
return {}
return ctxt
class CephContext(OSContextGenerator):
interfaces = ['ceph']
def __call__(self):
'''This generates context for /etc/ceph/ceph.conf templates'''
log('Generating tmeplate context for ceph')
mon_hosts = []
auth = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit)
ctxt = {
'mon_hosts': ' '.join(mon_hosts),
'auth': auth,
}
if not context_complete(ctxt):
return {}
return ctxt
class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
def __call__(self):
'''
Builds half a context for the haproxy template, which describes
all peers to be included in the cluster. Each charm needs to include
its own context generator that describes the port mapping.
'''
if not relation_ids('cluster'):
return {}
cluster_hosts = {}
l_unit = local_unit().replace('/', '-')
cluster_hosts[l_unit] = unit_get('private-address')
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
addr = relation_get('private-address', rid=rid, unit=unit)
cluster_hosts[_unit] = addr
ctxt = {
'units': cluster_hosts,
}
if len(cluster_hosts.keys()) > 1:
# Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.')
with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n')
return ctxt
log('HAProxy context is incomplete, this unit has no peers.')
return {}
class ApacheSSLContext(OSContextGenerator):
"""
Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context
looks something like:
{
'namespace': 'cinder',
'private_address': 'iscsi.mycinderhost.com',
'endpoints': [(8776, 8766), (8777, 8767)]
}
The endpoints list consists of a tuples mapping external ports
to internal ports.
"""
interfaces = ['https']
# charms should inherit this context and set external ports
# and service namespace accordingly.
external_ports = []
service_namespace = None
def enable_modules(self):
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
check_call(cmd)
def configure_cert(self):
if not os.path.isdir('/etc/apache2/ssl'):
os.mkdir('/etc/apache2/ssl')
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
if not os.path.isdir(ssl_dir):
os.mkdir(ssl_dir)
cert, key = get_cert()
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
cert_out.write(b64decode(cert))
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
key_out.write(b64decode(key))
ca_cert = get_ca_cert()
if ca_cert:
with open(CA_CERT_PATH, 'w') as ca_out:
ca_out.write(b64decode(ca_cert))
def __call__(self):
if isinstance(self.external_ports, basestring):
self.external_ports = [self.external_ports]
if (not self.external_ports or not https()):
return {}
self.configure_cert()
self.enable_modules()
ctxt = {
'namespace': self.service_namespace,
'private_address': unit_get('private-address'),
'endpoints': []
}
for ext_port in self.external_ports:
if peer_units() or is_clustered():
int_port = determine_haproxy_port(ext_port)
else:
int_port = determine_api_port(ext_port)
portmap = (int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap)
return ctxt

View File

@ -0,0 +1,2 @@
# dummy __init__.py to fool syncer into thinking this is a syncable python
# module

View File

@ -0,0 +1,261 @@
import os
from charmhelpers.core.host import apt_install
from charmhelpers.core.hookenv import (
log,
ERROR,
INFO
)
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
from jinja2 import FileSystemLoader, ChoiceLoader, Environment
except ImportError:
# python-jinja2 may not be installed yet, or we're running unittests.
FileSystemLoader = ChoiceLoader = Environment = None
class OSConfigException(Exception):
pass
def get_loader(templates_dir, os_release):
"""
Create a jinja2.ChoiceLoader containing template dirs up to
and including os_release. If directory template directory
is missing at templates_dir, it will be omitted from the loader.
templates_dir is added to the bottom of the search list as a base
loading dir.
A charm may also ship a templates dir with this module
and it will be appended to the bottom of the search list, eg:
hooks/charmhelpers/contrib/openstack/templates.
:param templates_dir: str: Base template directory containing release
sub-directories.
:param os_release : str: OpenStack release codename to construct template
loader.
:returns : jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
"""
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()]
if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir,
level=ERROR)
raise OSConfigException
# the bottom contains tempaltes_dir and possibly a common templates dir
# shipped with the helper.
loaders = [FileSystemLoader(templates_dir)]
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
if os.path.isdir(helper_templates):
loaders.append(FileSystemLoader(helper_templates))
for rel, tmpl_dir in tmpl_dirs:
if os.path.isdir(tmpl_dir):
loaders.insert(0, FileSystemLoader(tmpl_dir))
if rel == os_release:
break
log('Creating choice loader with dirs: %s' %
[l.searchpath for l in loaders], level=INFO)
return ChoiceLoader(loaders)
class OSConfigTemplate(object):
"""
Associates a config file template with a list of context generators.
Responsible for constructing a template context based on those generators.
"""
def __init__(self, config_file, contexts):
self.config_file = config_file
if hasattr(contexts, '__call__'):
self.contexts = [contexts]
else:
self.contexts = contexts
self._complete_contexts = []
def context(self):
ctxt = {}
for context in self.contexts:
_ctxt = context()
if _ctxt:
ctxt.update(_ctxt)
# track interfaces for every complete context.
[self._complete_contexts.append(interface)
for interface in context.interfaces
if interface not in self._complete_contexts]
return ctxt
def complete_contexts(self):
'''
Return a list of interfaces that have atisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
self.context()
return self._complete_contexts
class OSConfigRenderer(object):
"""
This class provides a common templating system to be used by OpenStack
charms. It is intended to help charms share common code and templates,
and ease the burden of managing config templates across multiple OpenStack
releases.
Basic usage:
# import some common context generates from charmhelpers
from charmhelpers.contrib.openstack import context
# Create a renderer object for a specific OS release.
configs = OSConfigRenderer(templates_dir='/tmp/templates',
openstack_release='folsom')
# register some config files with context generators.
configs.register(config_file='/etc/nova/nova.conf',
contexts=[context.SharedDBContext(),
context.AMQPContext()])
configs.register(config_file='/etc/nova/api-paste.ini',
contexts=[context.IdentityServiceContext()])
configs.register(config_file='/etc/haproxy/haproxy.conf',
contexts=[context.HAProxyContext()])
# write out a single config
configs.write('/etc/nova/nova.conf')
# write out all registered configs
configs.write_all()
Details:
OpenStack Releases and template loading
---------------------------------------
When the object is instantiated, it is associated with a specific OS
release. This dictates how the template loader will be constructed.
The constructed loader attempts to load the template from several places
in the following order:
- from the most recent OS release-specific template dir (if one exists)
- the base templates_dir
- a template directory shipped in the charm with this helper file.
For the example above, '/tmp/templates' contains the following structure:
/tmp/templates/nova.conf
/tmp/templates/api-paste.ini
/tmp/templates/grizzly/api-paste.ini
/tmp/templates/havana/api-paste.ini
Since it was registered with the grizzly release, it first seraches
the grizzly directory for nova.conf, then the templates dir.
When writing api-paste.ini, it will find the template in the grizzly
directory.
If the object were created with folsom, it would fall back to the
base templates dir for its api-paste.ini template.
This system should help manage changes in config files through
openstack releases, allowing charms to fall back to the most recently
updated config template for a given release
The haproxy.conf, since it is not shipped in the templates dir, will
be loaded from the module directory's template directory, eg
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
us to ship common templates (haproxy, apache) with the helpers.
Context generators
---------------------------------------
Context generators are used to generate template contexts during hook
execution. Doing so may require inspecting service relations, charm
config, etc. When registered, a config file is associated with a list
of generators. When a template is rendered and written, all context
generates are called in a chain to generate the context dictionary
passed to the jinja2 template. See context.py for more info.
"""
def __init__(self, templates_dir, openstack_release):
if not os.path.isdir(templates_dir):
log('Could not locate templates dir %s' % templates_dir,
level=ERROR)
raise OSConfigException
self.templates_dir = templates_dir
self.openstack_release = openstack_release
self.templates = {}
self._tmpl_env = None
if None in [Environment, ChoiceLoader, FileSystemLoader]:
# if this code is running, the object is created pre-install hook.
# jinja2 shouldn't get touched until the module is reloaded on next
# hook execution, with proper jinja2 bits successfully imported.
apt_install('python-jinja2')
def register(self, config_file, contexts):
"""
Register a config file with a list of context generators to be called
during rendering.
"""
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
contexts=contexts)
log('Registered config file: %s' % config_file, level=INFO)
def _get_tmpl_env(self):
if not self._tmpl_env:
loader = get_loader(self.templates_dir, self.openstack_release)
self._tmpl_env = Environment(loader=loader)
def _get_template(self, template):
self._get_tmpl_env()
template = self._tmpl_env.get_template(template)
log('Loaded template from %s' % template.filename, level=INFO)
return template
def render(self, config_file):
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
ctxt = self.templates[config_file].context()
_tmpl = os.path.basename(config_file)
log('Rendering from template: %s' % _tmpl, level=INFO)
template = self._get_template(_tmpl)
return template.render(ctxt)
def write(self, config_file):
"""
Write a single config file, raises if config file is not registered.
"""
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
with open(config_file, 'wb') as out:
out.write(self.render(config_file))
log('Wrote template %s.' % config_file, level=INFO)
def write_all(self):
"""
Write out all registered config files.
"""
[self.write(k) for k in self.templates.iterkeys()]
def set_release(self, openstack_release):
"""
Resets the template environment and generates a new template loader
based on a the new openstack release.
"""
self._tmpl_env = None
self.openstack_release = openstack_release
self._get_tmpl_env()
def complete_contexts(self):
'''
Returns a list of context interfaces that yield a complete context.
'''
interfaces = []
[interfaces.extend(i.complete_contexts())
for i in self.templates.itervalues()]
return interfaces

View File

@ -2,6 +2,8 @@
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
import apt_pkg as apt
import subprocess
import os
@ -9,48 +11,51 @@ import sys
from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
)
from charmhelpers.core.host import (
lsb_release,
apt_install,
)
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
ubuntu_openstack_release = {
'oneiric': 'diablo',
'precise': 'essex',
'quantal': 'folsom',
'raring': 'grizzly',
}
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
])
openstack_codenames = {
'2011.2': 'diablo',
'2012.1': 'essex',
'2012.2': 'folsom',
'2013.1': 'grizzly',
'2013.2': 'havana',
}
OPENSTACK_CODENAMES = OrderedDict([
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
])
# The ugly duckling
swift_codenames = {
SWIFT_CODENAMES = {
'1.4.3': 'diablo',
'1.4.8': 'essex',
'1.7.4': 'folsom',
'1.7.6': 'grizzly',
'1.7.7': 'grizzly',
'1.8.0': 'grizzly',
'1.9.0': 'havana',
'1.9.1': 'havana',
}
def juju_log(msg):
subprocess.check_call(['juju-log', msg])
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg)
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
@ -60,7 +65,7 @@ def get_os_codename_install_source(src):
rel = ''
if src == 'distro':
try:
rel = ubuntu_openstack_release[ubuntu_rel]
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
e = 'Could not derive openstack release for '\
'this Ubuntu release: %s' % ubuntu_rel
@ -74,7 +79,7 @@ def get_os_codename_install_source(src):
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in openstack_codenames.iteritems():
for k, v in OPENSTACK_CODENAMES.iteritems():
if v in src:
return v
@ -87,7 +92,7 @@ def get_os_version_install_source(src):
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return openstack_codenames[vers]
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
@ -95,7 +100,7 @@ def get_os_codename_version(vers):
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in openstack_codenames.iteritems():
for k, v in OPENSTACK_CODENAMES.iteritems():
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
@ -103,17 +108,26 @@ def get_os_version_codename(codename):
error_out(e)
def get_os_codename_package(pkg, fatal=True):
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
apt.init()
cache = apt.Cache()
try:
pkg = cache[pkg]
pkg = cache[package]
except:
if not fatal:
return None
e = 'Could not determine version of installed package: %s' % pkg
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation '\
'candidate: %s' % package
error_out(e)
if not pkg.current_ver:
if not fatal:
return None
# package is known, but no version is currently installed.
e = 'Could not determine version of uninstalled package: %s' % package
error_out(e)
vers = apt.UpstreamVersion(pkg.current_ver.ver_str)
@ -121,10 +135,10 @@ def get_os_codename_package(pkg, fatal=True):
try:
if 'swift' in pkg.name:
vers = vers[:5]
return swift_codenames[vers]
return SWIFT_CODENAMES[vers]
else:
vers = vers[:6]
return openstack_codenames[vers]
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
@ -138,9 +152,9 @@ def get_os_version_package(pkg, fatal=True):
return None
if 'swift' in pkg:
vers_map = swift_codenames
vers_map = SWIFT_CODENAMES
else:
vers_map = openstack_codenames
vers_map = OPENSTACK_CODENAMES
for version, cname in vers_map.iteritems():
if cname == codename:
@ -201,7 +215,10 @@ def configure_installation_source(rel):
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly'
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
}
try:
@ -211,8 +228,7 @@ def configure_installation_source(rel):
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
# TODO: Replace key import with cloud archive keyring pkg.
import_key(CLOUD_ARCHIVE_KEY_ID)
apt_install('ubuntu-cloud-keyring', fatal=True)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
@ -228,8 +244,9 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
updated config information necessary to perform health checks or
service changes.
"""
unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-')
juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path)
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")

View File

@ -197,7 +197,7 @@ def relation_ids(reltype=None):
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line))
return json.loads(subprocess.check_output(relid_cmd_line)) or []
return []
@ -208,7 +208,7 @@ def related_units(relid=None):
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line))
return json.loads(subprocess.check_output(units_cmd_line)) or []
@cached
@ -335,5 +335,6 @@ class Hooks(object):
return decorated
return wrapper
def charm_dir():
return os.environ.get('CHARM_DIR')

View File

@ -14,7 +14,7 @@ import hashlib
from collections import OrderedDict
from hookenv import log, execution_environment
from hookenv import log
def service_start(service_name):
@ -39,6 +39,18 @@ def service(action, service_name):
return subprocess.call(cmd) == 0
def service_running(service):
try:
output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user"""
try:
@ -74,36 +86,33 @@ def add_user_to_group(username, group):
def rsync(from_path, to_path, flags='-r', options=None):
"""Replicate the contents of a path"""
context = execution_environment()
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
cmd.extend(options)
cmd.append(from_path.format(**context))
cmd.append(to_path.format(**context))
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd).strip()
def symlink(source, destination):
"""Create a symbolic link"""
context = execution_environment()
log("Symlinking {} as {}".format(source, destination))
cmd = [
'ln',
'-sf',
source.format(**context),
destination.format(**context)
source,
destination,
]
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0555, force=False):
"""Create a directory"""
context = execution_environment()
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner.format(**context)).pw_uid
gid = grp.getgrnam(group.format(**context)).gr_gid
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
if os.path.exists(realpath):
if force and not os.path.isdir(realpath):
@ -114,28 +123,15 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False):
os.chown(realpath, uid, gid)
def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs):
def write_file(path, content, owner='root', group='root', perms=0444):
"""Create or overwrite a file with the contents of a string"""
context = execution_environment()
context.update(kwargs)
log("Writing file {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner.format(**context)).pw_uid
gid = grp.getgrnam(group.format(**context)).gr_gid
with open(path.format(**context), 'w') as target:
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
with open(path, 'w') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
target.write(fmtstr.format(**context))
def render_template_file(source, destination, **kwargs):
"""Create or overwrite a file using a template"""
log("Rendering template {} for {}".format(source,
destination))
context = execution_environment()
with open(source.format(**context), 'r') as template:
write_file(destination.format(**context), template.read(),
**kwargs)
target.write(content)
def filter_installed_packages(packages):

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1,61 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Paul Collins <paul.collins@canonical.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import socket
from charmhelpers.core.host import (
apt_install
)
from charmhelpers.core.hookenv import (
unit_get,
cached
)
TEMPLATES_DIR = 'templates'
try:
import jinja2
except ImportError:
apt_install('python-jinja2', fatal=True)
import jinja2
try:
import dns.resolver
except ImportError:
apt_install('python-dnspython', fatal=True)
import dns.resolver
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
@cached
def get_unit_hostname():
return socket.gethostname()
@cached
def get_host_ip(hostname=None):
hostname = hostname or unit_get('private-address')
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
return None

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

66
hooks/quantum_contexts.py Normal file
View File

@ -0,0 +1,66 @@
# vim: set ts=4:et
from charmhelpers.core.hookenv import (
config,
relation_ids,
related_units,
relation_get,
)
from charmhelpers.contrib.openstack.context import (
OSContextGenerator,
context_complete
)
import quantum_utils as qutils
class NetworkServiceContext(OSContextGenerator):
interfaces = ['quantum-network-service']
def __call__(self):
for rid in relation_ids('quantum-network-service'):
for unit in related_units(rid):
ctxt = {
'keystone_host': relation_get('keystone_host',
rid=rid, unit=unit),
'service_port': relation_get('service_port', rid=rid,
unit=unit),
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
'service_tenant': relation_get('service_tenant',
rid=rid, unit=unit),
'service_username': relation_get('service_username',
rid=rid, unit=unit),
'service_password': relation_get('service_password',
rid=rid, unit=unit),
'quantum_host': relation_get('quantum_host',
rid=rid, unit=unit),
'quantum_port': relation_get('quantum_port',
rid=rid, unit=unit),
'quantum_url': relation_get('quantum_url',
rid=rid, unit=unit),
'region': relation_get('region',
rid=rid, unit=unit),
# XXX: Hard-coded http.
'service_protocol': 'http',
'auth_protocol': 'http',
}
if context_complete(ctxt):
return ctxt
return {}
class ExternalPortContext(OSContextGenerator):
def __call__(self):
if config('ext-port'):
return {"ext_port": config('ext-port')}
else:
return None
class QuantumGatewayContext(OSContextGenerator):
def __call__(self):
ctxt = {
'shared_secret': qutils.get_shared_secret(),
'local_ip': qutils.get_host_ip(),
'core_plugin': qutils.CORE_PLUGIN[config('plugin')],
'plugin': config('plugin')
}
return ctxt

126
hooks/quantum_hooks.py Executable file
View File

@ -0,0 +1,126 @@
#!/usr/bin/python
from charmhelpers.core.hookenv import (
log, ERROR, WARNING,
config,
relation_get,
relation_set,
unit_get,
Hooks, UnregisteredHookError
)
from charmhelpers.core.host import (
apt_update,
apt_install,
filter_installed_packages,
restart_on_change
)
from charmhelpers.contrib.hahelpers.cluster import(
eligible_leader
)
from charmhelpers.contrib.hahelpers.apache import(
install_ca_cert
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available
)
import sys
from quantum_utils import (
register_configs,
restart_map,
do_openstack_upgrade,
get_packages,
get_early_packages,
valid_plugin,
RABBIT_USER,
RABBIT_VHOST,
DB_USER, QUANTUM_DB,
NOVA_DB_USER, NOVA_DB,
configure_ovs,
reassign_agent_resources,
)
hooks = Hooks()
CONFIGS = register_configs()
@hooks.hook('install')
def install():
configure_installation_source(config('openstack-origin'))
apt_update(fatal=True)
if valid_plugin():
apt_install(filter_installed_packages(get_early_packages()),
fatal=True)
apt_install(filter_installed_packages(get_packages()),
fatal=True)
else:
log('Please provide a valid plugin config', level=ERROR)
sys.exit(1)
@hooks.hook('config-changed')
@restart_on_change(restart_map())
def config_changed():
if openstack_upgrade_available('quantum-common'):
do_openstack_upgrade(CONFIGS)
if valid_plugin():
CONFIGS.write_all()
configure_ovs()
else:
log('Please provide a valid plugin config', level=ERROR)
sys.exit(1)
@hooks.hook('upgrade-charm')
def upgrade_charm():
install()
config_changed()
@hooks.hook('shared-db-relation-joined')
def db_joined():
relation_set(quantum_username=DB_USER,
quantum_database=QUANTUM_DB,
quantum_hostname=unit_get('private-address'),
nova_username=NOVA_DB_USER,
nova_database=NOVA_DB,
nova_hostname=unit_get('private-address'))
@hooks.hook('amqp-relation-joined')
def amqp_joined():
relation_set(username=RABBIT_USER,
vhost=RABBIT_VHOST)
@hooks.hook('shared-db-relation-changed',
'amqp-relation-changed')
@restart_on_change(restart_map())
def db_amqp_changed():
CONFIGS.write_all()
@hooks.hook('quantum-network-service-relation-changed')
@restart_on_change(restart_map())
def nm_changed():
CONFIGS.write_all()
if relation_get('ca_cert'):
install_ca_cert(relation_get('ca_cert'))
@hooks.hook("cluster-relation-departed")
def cluster_departed():
if config('plugin') == 'nvp':
log('Unable to re-assign agent resources for failed nodes with nvp',
level=WARNING)
return
if eligible_leader(None):
reassign_agent_resources()
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))

View File

@ -1,341 +0,0 @@
#!/usr/bin/python
from charmhelpers.core.hookenv import (
log, ERROR, WARNING,
config,
relation_ids,
related_units,
relation_get,
relation_set,
unit_get,
Hooks, UnregisteredHookError
)
from charmhelpers.core.host import (
apt_update,
apt_install,
restart_on_change
)
from charmhelpers.contrib.hahelpers.cluster_utils import(
eligible_leader
)
from charmhelpers.contrib.openstack.openstack_utils import (
configure_installation_source,
get_os_codename_install_source,
get_os_codename_package,
get_os_version_codename
)
from charmhelpers.contrib.network.ovs import (
add_bridge,
add_bridge_port
)
from lib.utils import render_template, get_host_ip
import sys
import quantum_utils as qutils
import os
PLUGIN = config('plugin')
hooks = Hooks()
@hooks.hook()
def install():
configure_installation_source(config('openstack-origin'))
apt_update(fatal=True)
if PLUGIN in qutils.GATEWAY_PKGS.keys():
if PLUGIN in [qutils.OVS, qutils.NVP]:
# Install OVS DKMS first to ensure that the ovs module
# loaded supports GRE tunnels
apt_install('openvswitch-datapath-dkms', fatal=True)
apt_install(qutils.GATEWAY_PKGS[PLUGIN], fatal=True)
else:
log('Please provide a valid plugin config', level=ERROR)
sys.exit(1)
@hooks.hook()
@restart_on_change(qutils.RESTART_MAP[PLUGIN])
def config_changed():
src = config('openstack-origin')
available = get_os_codename_install_source(src)
installed = get_os_codename_package('quantum-common')
if (available and
get_os_version_codename(available) >
get_os_version_codename(installed)):
qutils.do_openstack_upgrade()
if PLUGIN in qutils.GATEWAY_PKGS.keys():
render_quantum_conf()
render_dhcp_agent_conf()
render_l3_agent_conf()
render_metadata_agent_conf()
render_metadata_api_conf()
render_plugin_conf()
render_ext_port_upstart()
render_evacuate_unit()
if PLUGIN in [qutils.OVS, qutils.NVP]:
add_bridge(qutils.INT_BRIDGE)
add_bridge(qutils.EXT_BRIDGE)
ext_port = config('ext-port')
if ext_port:
add_bridge_port(qutils.EXT_BRIDGE, ext_port)
else:
log('Please provide a valid plugin config', level=ERROR)
sys.exit(1)
@hooks.hook()
def upgrade_charm():
install()
config_changed()
def render_ext_port_upstart():
if config('ext-port'):
with open(qutils.EXT_PORT_CONF, "w") as conf:
conf.write(
render_template(os.path.basename(qutils.EXT_PORT_CONF),
{"ext_port": config('ext-port')})
)
else:
if os.path.exists(qutils.EXT_PORT_CONF):
os.remove(qutils.EXT_PORT_CONF)
def render_l3_agent_conf():
context = get_keystone_conf()
if (context and
os.path.exists(qutils.L3_AGENT_CONF)):
with open(qutils.L3_AGENT_CONF, "w") as conf:
conf.write(
render_template(os.path.basename(qutils.L3_AGENT_CONF),
context)
)
def render_dhcp_agent_conf():
if (os.path.exists(qutils.DHCP_AGENT_CONF)):
with open(qutils.DHCP_AGENT_CONF, "w") as conf:
conf.write(
render_template(os.path.basename(qutils.DHCP_AGENT_CONF),
{"plugin": PLUGIN})
)
def render_metadata_agent_conf():
context = get_keystone_conf()
if (context and
os.path.exists(qutils.METADATA_AGENT_CONF)):
context['local_ip'] = get_host_ip()
context['shared_secret'] = qutils.get_shared_secret()
with open(qutils.METADATA_AGENT_CONF, "w") as conf:
conf.write(
render_template(os.path.basename(qutils.METADATA_AGENT_CONF),
context)
)
def render_quantum_conf():
context = get_rabbit_conf()
if (context and
os.path.exists(qutils.QUANTUM_CONF)):
context['core_plugin'] = \
qutils.CORE_PLUGIN[PLUGIN]
with open(qutils.QUANTUM_CONF, "w") as conf:
conf.write(
render_template(os.path.basename(qutils.QUANTUM_CONF),
context)
)
def render_plugin_conf():
context = get_quantum_db_conf()
if (context and
os.path.exists(qutils.PLUGIN_CONF[PLUGIN])):
context['local_ip'] = get_host_ip()
conf_file = qutils.PLUGIN_CONF[PLUGIN]
with open(conf_file, "w") as conf:
conf.write(
render_template(os.path.basename(conf_file),
context)
)
def render_metadata_api_conf():
context = get_nova_db_conf()
r_context = get_rabbit_conf()
q_context = get_keystone_conf()
if (context and r_context and q_context and
os.path.exists(qutils.NOVA_CONF)):
context.update(r_context)
context.update(q_context)
context['shared_secret'] = qutils.get_shared_secret()
with open(qutils.NOVA_CONF, "w") as conf:
conf.write(
render_template(os.path.basename(qutils.NOVA_CONF),
context)
)
def render_evacuate_unit():
context = get_keystone_conf()
if context:
with open('/usr/local/bin/quantum-evacuate-unit', "w") as conf:
conf.write(render_template('evacuate_unit.py', context))
os.chmod('/usr/local/bin/quantum-evacuate-unit', 0700)
def get_keystone_conf():
for relid in relation_ids('quantum-network-service'):
for unit in related_units(relid):
conf = {
"keystone_host": relation_get('keystone_host',
unit, relid),
"service_port": relation_get('service_port',
unit, relid),
"auth_port": relation_get('auth_port', unit, relid),
"service_username": relation_get('service_username',
unit, relid),
"service_password": relation_get('service_password',
unit, relid),
"service_tenant": relation_get('service_tenant',
unit, relid),
"quantum_host": relation_get('quantum_host',
unit, relid),
"quantum_port": relation_get('quantum_port',
unit, relid),
"quantum_url": relation_get('quantum_url',
unit, relid),
"region": relation_get('region',
unit, relid)
}
if None not in conf.itervalues():
return conf
return None
@hooks.hook('shared-db-relation-joined')
def db_joined():
relation_set(quantum_username=qutils.DB_USER,
quantum_database=qutils.QUANTUM_DB,
quantum_hostname=unit_get('private-address'),
nova_username=qutils.NOVA_DB_USER,
nova_database=qutils.NOVA_DB,
nova_hostname=unit_get('private-address'))
@hooks.hook('shared-db-relation-changed')
@restart_on_change(qutils.RESTART_MAP[PLUGIN])
def db_changed():
render_plugin_conf()
render_metadata_api_conf()
def get_quantum_db_conf():
for relid in relation_ids('shared-db'):
for unit in related_units(relid):
conf = {
"host": relation_get('db_host',
unit, relid),
"user": qutils.DB_USER,
"password": relation_get('quantum_password',
unit, relid),
"db": qutils.QUANTUM_DB
}
if None not in conf.itervalues():
return conf
return None
def get_nova_db_conf():
for relid in relation_ids('shared-db'):
for unit in related_units(relid):
conf = {
"host": relation_get('db_host',
unit, relid),
"user": qutils.NOVA_DB_USER,
"password": relation_get('nova_password',
unit, relid),
"db": qutils.NOVA_DB
}
if None not in conf.itervalues():
return conf
return None
@hooks.hook('amqp-relation-joined')
def amqp_joined():
relation_set(username=qutils.RABBIT_USER,
vhost=qutils.RABBIT_VHOST)
@hooks.hook('amqp-relation-changed')
@restart_on_change(qutils.RESTART_MAP[PLUGIN])
def amqp_changed():
render_dhcp_agent_conf()
render_quantum_conf()
render_metadata_api_conf()
def get_rabbit_conf():
for relid in relation_ids('amqp'):
for unit in related_units(relid):
conf = {
"rabbit_host": relation_get('private-address',
unit, relid),
"rabbit_virtual_host": qutils.RABBIT_VHOST,
"rabbit_userid": qutils.RABBIT_USER,
"rabbit_password": relation_get('password',
unit, relid)
}
clustered = relation_get('clustered', unit, relid)
if clustered:
conf['rabbit_host'] = relation_get('vip', unit, relid)
if None not in conf.itervalues():
return conf
return None
@hooks.hook('quantum-network-service-relation-changed')
@restart_on_change(qutils.RESTART_MAP[PLUGIN])
def nm_changed():
render_dhcp_agent_conf()
render_l3_agent_conf()
render_metadata_agent_conf()
render_metadata_api_conf()
render_evacuate_unit()
store_ca_cert()
def store_ca_cert():
ca_cert = get_ca_cert()
if ca_cert:
qutils.install_ca(ca_cert)
def get_ca_cert():
for relid in relation_ids('quantum-network-service'):
for unit in related_units(relid):
ca_cert = relation_get('ca_cert', unit, relid)
if ca_cert:
return ca_cert
return None
@hooks.hook("cluster-relation-departed")
def cluster_departed():
if PLUGIN == 'nvp':
log('Unable to re-assign agent resources for failed nodes with nvp',
level=WARNING)
return
conf = get_keystone_conf()
if conf and eligible_leader(None):
qutils.reassign_agent_resources(conf)
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))

View File

@ -1,18 +1,29 @@
import subprocess
import os
import uuid
import base64
import apt_pkg as apt
import socket
from charmhelpers.core.hookenv import (
log,
config
config,
unit_get,
cached
)
from charmhelpers.core.host import (
apt_install
apt_install,
apt_update
)
from charmhelpers.contrib.openstack.openstack_utils import (
configure_installation_source
from charmhelpers.contrib.network.ovs import (
add_bridge,
add_bridge_port
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
get_os_codename_package,
get_os_codename_install_source
)
import charmhelpers.contrib.openstack.context as context
import charmhelpers.contrib.openstack.templating as templating
import quantum_contexts
from collections import OrderedDict
OVS = "ovs"
NVP = "nvp"
@ -26,6 +37,10 @@ CORE_PLUGIN = {
NVP: NVP_PLUGIN
}
def valid_plugin():
return config('plugin') in CORE_PLUGIN
OVS_PLUGIN_CONF = \
"/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini"
NVP_PLUGIN_CONF = \
@ -51,41 +66,25 @@ GATEWAY_PKGS = {
]
}
GATEWAY_AGENTS = {
OVS: [
"quantum-plugin-openvswitch-agent",
"quantum-l3-agent",
"quantum-dhcp-agent",
"nova-api-metadata"
],
NVP: [
"quantum-dhcp-agent",
"nova-api-metadata"
],
EARLY_PACKAGES = {
OVS: ['openvswitch-datapath-dkms']
}
EXT_PORT_CONF = '/etc/init/ext-port.conf'
def get_os_version(package=None):
apt.init()
cache = apt.Cache()
pkg = cache[package or 'quantum-common']
if pkg.current_ver:
return apt.upstream_version(pkg.current_ver.ver_str)
def get_early_packages():
'''Return a list of package for pre-install based on configured plugin'''
if config('plugin') in EARLY_PACKAGES:
return EARLY_PACKAGES[config('plugin')]
else:
return None
return []
if get_os_version('quantum-common') >= "2013.1":
for plugin in GATEWAY_AGENTS:
GATEWAY_AGENTS[plugin].append("quantum-metadata-agent")
def get_packages():
'''Return a list of packages for install based on the configured plugin'''
return GATEWAY_PKGS[config('plugin')]
DB_USER = "quantum"
QUANTUM_DB = "quantum"
KEYSTONE_SERVICE = "quantum"
NOVA_DB_USER = "nova"
NOVA_DB = "nova"
EXT_PORT_CONF = '/etc/init/ext-port.conf'
TEMPLATES = 'templates'
QUANTUM_CONF = "/etc/quantum/quantum.conf"
L3_AGENT_CONF = "/etc/quantum/l3_agent.ini"
@ -93,53 +92,99 @@ DHCP_AGENT_CONF = "/etc/quantum/dhcp_agent.ini"
METADATA_AGENT_CONF = "/etc/quantum/metadata_agent.ini"
NOVA_CONF = "/etc/nova/nova.conf"
OVS_RESTART_MAP = {
QUANTUM_CONF: [
'quantum-l3-agent',
'quantum-dhcp-agent',
'quantum-metadata-agent',
'quantum-plugin-openvswitch-agent'
],
DHCP_AGENT_CONF: [
'quantum-dhcp-agent'
],
L3_AGENT_CONF: [
'quantum-l3-agent'
],
METADATA_AGENT_CONF: [
'quantum-metadata-agent'
],
OVS_PLUGIN_CONF: [
'quantum-plugin-openvswitch-agent'
],
NOVA_CONF: [
'nova-api-metadata'
]
SHARED_CONFIG_FILES = {
DHCP_AGENT_CONF: {
'hook_contexts': [quantum_contexts.QuantumGatewayContext()],
'services': ['quantum-dhcp-agent']
},
METADATA_AGENT_CONF: {
'hook_contexts': [quantum_contexts.NetworkServiceContext()],
'services': ['quantum-metadata-agent']
},
NOVA_CONF: {
'hook_contexts': [context.AMQPContext(),
context.SharedDBContext(),
quantum_contexts.NetworkServiceContext(),
quantum_contexts.QuantumGatewayContext()],
'services': ['nova-api-metadata']
},
}
NVP_RESTART_MAP = {
QUANTUM_CONF: [
'quantum-dhcp-agent',
'quantum-metadata-agent'
],
DHCP_AGENT_CONF: [
'quantum-dhcp-agent'
],
METADATA_AGENT_CONF: [
'quantum-metadata-agent'
],
NOVA_CONF: [
'nova-api-metadata'
]
OVS_CONFIG_FILES = {
QUANTUM_CONF: {
'hook_contexts': [context.AMQPContext(),
quantum_contexts.QuantumGatewayContext()],
'services': ['quantum-l3-agent',
'quantum-dhcp-agent',
'quantum-metadata-agent',
'quantum-plugin-openvswitch-agent']
},
L3_AGENT_CONF: {
'hook_contexts': [quantum_contexts.NetworkServiceContext()],
'services': ['quantum-l3-agent']
},
# TODO: Check to see if this is actually required
OVS_PLUGIN_CONF: {
'hook_contexts': [context.SharedDBContext(),
quantum_contexts.QuantumGatewayContext()],
'services': ['quantum-plugin-openvswitch-agent']
},
EXT_PORT_CONF: {
'hook_contexts': [quantum_contexts.ExternalPortContext()],
'services': []
}
}
NVP_CONFIG_FILES = {
QUANTUM_CONF: {
'hook_contexts': [context.AMQPContext()],
'services': ['quantum-dhcp-agent', 'quantum-metadata-agent']
},
}
CONFIG_FILES = {
NVP: NVP_CONFIG_FILES.update(SHARED_CONFIG_FILES),
OVS: OVS_CONFIG_FILES.update(SHARED_CONFIG_FILES),
}
RESTART_MAP = {
OVS: OVS_RESTART_MAP,
NVP: NVP_RESTART_MAP
}
def register_configs():
''' Register config files with their respective contexts. '''
release = get_os_codename_package('quantum-common', fatal=False) or \
'essex'
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)
plugin = config('plugin')
for conf in CONFIG_FILES[plugin].keys():
configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
return configs
def restart_map():
'''
Determine the correct resource map to be passed to
charmhelpers.core.restart_on_change() based on the services configured.
:returns: dict: A dictionary mapping config file to lists of services
that should be restarted when file changes.
'''
_map = []
for f, ctxt in CONFIG_FILES[config('plugin')].iteritems():
svcs = []
for svc in ctxt['services']:
svcs.append(svc)
if svcs:
_map.append((f, svcs))
return OrderedDict(_map)
DB_USER = "quantum"
QUANTUM_DB = "quantum"
KEYSTONE_SERVICE = "quantum"
NOVA_DB_USER = "nova"
NOVA_DB = "nova"
RABBIT_USER = "nova"
RABBIT_VHOST = "nova"
@ -161,33 +206,22 @@ def get_shared_secret():
secret = secret_file.read().strip()
return secret
def flush_local_configuration():
if os.path.exists('/usr/bin/quantum-netns-cleanup'):
cmd = [
"quantum-netns-cleanup",
"--config-file=/etc/quantum/quantum.conf"
]
for agent_conf in ['l3_agent.ini', 'dhcp_agent.ini']:
agent_cmd = list(cmd)
agent_cmd.append('--config-file=/etc/quantum/{}'
.format(agent_conf))
subprocess.call(agent_cmd)
def install_ca(ca_cert):
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(base64.b64decode(ca_cert))
subprocess.check_call(['update-ca-certificates', '--fresh'])
DHCP_AGENT = "DHCP Agent"
L3_AGENT = "L3 Agent"
def reassign_agent_resources(env):
def reassign_agent_resources():
''' Use agent scheduler API to detect down agents and re-schedule '''
from quantumclient.v2_0 import client
env = quantum_contexts.NetworkServiceContext()()
if not env:
log('Unable to re-assign resources at this time')
return
try:
from quantumclient.v2_0 import client
except ImportError:
''' Try to import neutronclient instead for havana+ '''
from neutronclient.v2_0 import client
# TODO: Fixup for https keystone
auth_url = 'http://%(keystone_host)s:%(auth_port)s/v2.0' % env
quantum = client.Client(username=env['service_username'],
@ -243,16 +277,56 @@ def reassign_agent_resources(env):
index += 1
def do_openstack_upgrade():
configure_installation_source(config('openstack-origin'))
plugin = config('plugin')
pkgs = []
if plugin in GATEWAY_PKGS.keys():
pkgs.extend(GATEWAY_PKGS[plugin])
if plugin in [OVS, NVP]:
pkgs.append('openvswitch-datapath-dkms')
def do_openstack_upgrade(configs):
"""
Perform an upgrade. Takes care of upgrading packages, rewriting
configs, database migrations and potentially any other post-upgrade
actions.
:param configs: The charms main OSConfigRenderer object.
"""
new_src = config('openstack-origin')
new_os_rel = get_os_codename_install_source(new_src)
log('Performing OpenStack upgrade to %s.' % (new_os_rel))
configure_installation_source(new_src)
dpkg_opts = [
'--option', 'Dpkg::Options::=--force-confold',
'--option', 'Dpkg::Options::=--force-confdef'
'--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef',
]
apt_install(pkgs, options=dpkg_opts, fatal=True)
apt_update(fatal=True)
apt_install(packages=GATEWAY_PKGS[config('plugin')], options=dpkg_opts,
fatal=True)
# set CONFIGS to load templates from new release
configs.set_release(openstack_release=new_os_rel)
@cached
def get_host_ip(hostname=None):
try:
import dns.resolver
except ImportError:
apt_install('python-dnspython', fatal=True)
import dns.resolver
hostname = hostname or unit_get('private-address')
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
def configure_ovs():
if config('plugin') == OVS:
add_bridge(INT_BRIDGE)
add_bridge(EXT_BRIDGE)
ext_port = config('ext-port')
if ext_port:
add_bridge_port(EXT_BRIDGE, ext_port)
if config('plugin') == NVP:
add_bridge(INT_BRIDGE)

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

View File

@ -1 +1 @@
quantum_relations.py
quantum_hooks.py

5
setup.cfg Normal file
View File

@ -0,0 +1,5 @@
[nosetests]
verbosity=2
with-coverage=1
cover-erase=1
cover-package=hooks

View File

@ -1,70 +0,0 @@
#!/usr/bin/python
import subprocess
def log(priority, message):
print "{}: {}".format(priority, message)
DHCP_AGENT = "DHCP Agent"
L3_AGENT = "L3 Agent"
def evacuate_unit(unit):
''' Use agent scheduler API to detect down agents and re-schedule '''
from quantumclient.v2_0 import client
# TODO: Fixup for https keystone
auth_url = 'http://{{ keystone_host }}:{{ auth_port }}/v2.0'
quantum = client.Client(username='{{ service_username }}',
password='{{ service_password }}',
tenant_name='{{ service_tenant }}',
auth_url=auth_url,
region_name='{{ region }}')
agents = quantum.list_agents(agent_type=DHCP_AGENT)
dhcp_agents = []
l3_agents = []
networks = {}
for agent in agents['agents']:
if agent['alive'] and agent['host'] != unit:
dhcp_agents.append(agent['id'])
elif agent['host'] == unit:
for network in \
quantum.list_networks_on_dhcp_agent(agent['id'])['networks']:
networks[network['id']] = agent['id']
agents = quantum.list_agents(agent_type=L3_AGENT)
routers = {}
for agent in agents['agents']:
if agent['alive'] and agent['host'] != unit:
l3_agents.append(agent['id'])
elif agent['host'] == unit:
for router in \
quantum.list_routers_on_l3_agent(agent['id'])['routers']:
routers[router['id']] = agent['id']
index = 0
for router_id in routers:
agent = index % len(l3_agents)
log('INFO',
'Moving router %s from %s to %s' % \
(router_id, routers[router_id], l3_agents[agent]))
quantum.remove_router_from_l3_agent(l3_agent=routers[router_id],
router_id=router_id)
quantum.add_router_to_l3_agent(l3_agent=l3_agents[agent],
body={'router_id': router_id})
index += 1
index = 0
for network_id in networks:
agent = index % len(dhcp_agents)
log('INFO',
'Moving network %s from %s to %s' % \
(network_id, networks[network_id], dhcp_agents[agent]))
quantum.remove_network_from_dhcp_agent(dhcp_agent=networks[network_id],
network_id=network_id)
quantum.add_network_to_dhcp_agent(dhcp_agent=dhcp_agents[agent],
body={'network_id': network_id})
index += 1
evacuate_unit(subprocess.check_output(['hostname', '-f']).strip())

View File

@ -1,6 +1,6 @@
[DEFAULT]
interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver
auth_url = http://{{ keystone_host }}:{{ service_port }}/v2.0
auth_url = {{ service_protocol }}://{{ keystone_host }}:{{ service_port }}/v2.0
auth_region = {{ region }}
admin_tenant_name = {{ service_tenant }}
admin_user = {{ service_username }}

View File

@ -1,6 +1,6 @@
[DEFAULT]
debug = True
auth_url = http://{{ keystone_host }}:{{ service_port }}/v2.0
auth_url = {{ service_protocol }}://{{ keystone_host }}:{{ service_port }}/v2.0
auth_region = {{ region }}
admin_tenant_name = {{ service_tenant }}
admin_user = {{ service_username }}

View File

@ -22,4 +22,4 @@ quantum_url={{ quantum_url }}
quantum_admin_tenant_name={{ service_tenant }}
quantum_admin_username={{ service_username }}
quantum_admin_password={{ service_password }}
quantum_admin_auth_url=http://{{ keystone_host }}:{{ service_port }}/v2.0
quantum_admin_auth_url={{ service_protocol }}://{{ keystone_host }}:{{ service_port }}/v2.0