Update of all Havana / Saucy / python-redux work:
* Full python rewrite using new OpenStack charm-helpers. * Test coverage * Havana support
This commit is contained in:
commit
812fafd5b7
6
.coveragerc
Normal file
6
.coveragerc
Normal file
@ -0,0 +1,6 @@
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
exclude_lines =
|
||||
if __name__ == .__main__.:
|
||||
include=
|
||||
hooks/nova_*
|
17
.project
Normal file
17
.project
Normal file
@ -0,0 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>nova-compute</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.python.pydev.PyDevBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.python.pydev.pythonNature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
9
.pydevproject
Normal file
9
.pydevproject
Normal file
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/nova-compute/hooks</path>
|
||||
<path>/nova-compute/unit_tests</path>
|
||||
</pydev_pathproperty>
|
||||
</pydev_project>
|
14
Makefile
Normal file
14
Makefile
Normal file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/make
|
||||
PYTHON := /usr/bin/env python
|
||||
|
||||
lint:
|
||||
@flake8 --exclude hooks/charmhelpers hooks
|
||||
@flake8 --exclude hooks/charmhelpers unit_tests
|
||||
@charm proof
|
||||
|
||||
test:
|
||||
@echo Starting tests...
|
||||
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
|
||||
|
||||
sync:
|
||||
@charm-helper-sync -c charm-helpers.yaml
|
12
charm-helpers.yaml
Normal file
12
charm-helpers.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
branch: lp:charm-helpers
|
||||
destination: hooks/charmhelpers
|
||||
include:
|
||||
- core
|
||||
- fetch
|
||||
- contrib.openstack|inc=*
|
||||
- contrib.storage
|
||||
- contrib.hahelpers:
|
||||
- apache
|
||||
- cluster
|
||||
- contrib.network.ovs
|
||||
- payload.execd
|
13
config.yaml
13
config.yaml
@ -26,14 +26,22 @@ options:
|
||||
default: nova
|
||||
type: string
|
||||
decsription: Rabbitmq vhost
|
||||
db-user:
|
||||
database-user:
|
||||
default: nova
|
||||
type: string
|
||||
description: Username for database access
|
||||
nova-db:
|
||||
database:
|
||||
default: nova
|
||||
type: string
|
||||
description: Database name
|
||||
neutron-database-user:
|
||||
default: neutron
|
||||
type: string
|
||||
description: Username for Neutron database access (if enabled)
|
||||
neutron-database:
|
||||
default: neutron
|
||||
type: string
|
||||
description: Database name for Neutron (if enabled)
|
||||
virt-type:
|
||||
default: kvm
|
||||
type: string
|
||||
@ -71,7 +79,6 @@ options:
|
||||
type: string
|
||||
description: Network interface on which to build bridge
|
||||
config-flags:
|
||||
default: None
|
||||
type: string
|
||||
description: Comma separated list of key=value config flags to be set in nova.conf.
|
||||
nagios_context:
|
||||
|
0
hooks/__init__.py
Normal file
0
hooks/__init__.py
Normal file
1
hooks/amqp-relation-broken
Symbolic link
1
hooks/amqp-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_compute_hooks.py
|
@ -1 +1 @@
|
||||
nova-compute-relations
|
||||
nova_compute_hooks.py
|
@ -1 +1 @@
|
||||
nova-compute-relations
|
||||
nova_compute_hooks.py
|
1
hooks/ceph-relation-broken
Symbolic link
1
hooks/ceph-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_compute_hooks.py
|
@ -1 +1 @@
|
||||
nova-compute-relations
|
||||
nova_compute_hooks.py
|
@ -1 +1 @@
|
||||
nova-compute-relations
|
||||
nova_compute_hooks.py
|
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
58
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
58
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
@ -0,0 +1,58 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config as config_get,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units as relation_list,
|
||||
log,
|
||||
INFO,
|
||||
)
|
||||
|
||||
|
||||
def get_cert():
|
||||
cert = config_get('ssl_cert')
|
||||
key = config_get('ssl_key')
|
||||
if not (cert and key):
|
||||
log("Inspecting identity-service relations for SSL certificate.",
|
||||
level=INFO)
|
||||
cert = key = None
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not cert:
|
||||
cert = relation_get('ssl_cert',
|
||||
rid=r_id, unit=unit)
|
||||
if not key:
|
||||
key = relation_get('ssl_key',
|
||||
rid=r_id, unit=unit)
|
||||
return (cert, key)
|
||||
|
||||
|
||||
def get_ca_cert():
|
||||
ca_cert = None
|
||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||
level=INFO)
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not ca_cert:
|
||||
ca_cert = relation_get('ca_cert',
|
||||
rid=r_id, unit=unit)
|
||||
return ca_cert
|
||||
|
||||
|
||||
def install_ca_cert(ca_cert):
|
||||
if ca_cert:
|
||||
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
||||
'w') as crt:
|
||||
crt.write(ca_cert)
|
||||
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
294
hooks/charmhelpers/contrib/hahelpers/ceph.py
Normal file
294
hooks/charmhelpers/contrib/hahelpers/ceph.py
Normal file
@ -0,0 +1,294 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import commands
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
CalledProcessError
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units,
|
||||
log,
|
||||
INFO,
|
||||
ERROR
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
mount,
|
||||
mounts,
|
||||
service_start,
|
||||
service_stop,
|
||||
umount,
|
||||
)
|
||||
|
||||
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
|
||||
KEYFILE = '/etc/ceph/ceph.client.%s.key'
|
||||
|
||||
CEPH_CONF = """[global]
|
||||
auth supported = %(auth)s
|
||||
keyring = %(keyring)s
|
||||
mon host = %(mon_hosts)s
|
||||
"""
|
||||
|
||||
|
||||
def running(service):
|
||||
# this local util can be dropped as soon the following branch lands
|
||||
# in lp:charm-helpers
|
||||
# https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
|
||||
try:
|
||||
output = check_output(['service', service, 'status'])
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if ("start/running" in output or "is running" in output):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def install():
|
||||
ceph_dir = "/etc/ceph"
|
||||
if not os.path.isdir(ceph_dir):
|
||||
os.mkdir(ceph_dir)
|
||||
apt_install('ceph-common', fatal=True)
|
||||
|
||||
|
||||
def rbd_exists(service, pool, rbd_img):
|
||||
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
|
||||
(service, pool))
|
||||
return rbd_img in out
|
||||
|
||||
|
||||
def create_rbd_image(service, pool, image, sizemb):
|
||||
cmd = [
|
||||
'rbd',
|
||||
'create',
|
||||
image,
|
||||
'--size',
|
||||
str(sizemb),
|
||||
'--id',
|
||||
service,
|
||||
'--pool',
|
||||
pool
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def pool_exists(service, name):
|
||||
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
|
||||
return name in out
|
||||
|
||||
|
||||
def create_pool(service, name):
|
||||
cmd = [
|
||||
'rados',
|
||||
'--id',
|
||||
service,
|
||||
'mkpool',
|
||||
name
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def keyfile_path(service):
|
||||
return KEYFILE % service
|
||||
|
||||
|
||||
def keyring_path(service):
|
||||
return KEYRING % service
|
||||
|
||||
|
||||
def create_keyring(service, key):
|
||||
keyring = keyring_path(service)
|
||||
if os.path.exists(keyring):
|
||||
log('ceph: Keyring exists at %s.' % keyring, level=INFO)
|
||||
cmd = [
|
||||
'ceph-authtool',
|
||||
keyring,
|
||||
'--create-keyring',
|
||||
'--name=client.%s' % service,
|
||||
'--add-key=%s' % key
|
||||
]
|
||||
check_call(cmd)
|
||||
log('ceph: Created new ring at %s.' % keyring, level=INFO)
|
||||
|
||||
|
||||
def create_key_file(service, key):
|
||||
# create a file containing the key
|
||||
keyfile = keyfile_path(service)
|
||||
if os.path.exists(keyfile):
|
||||
log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
|
||||
fd = open(keyfile, 'w')
|
||||
fd.write(key)
|
||||
fd.close()
|
||||
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
|
||||
|
||||
|
||||
def get_ceph_nodes():
|
||||
hosts = []
|
||||
for r_id in relation_ids('ceph'):
|
||||
for unit in related_units(r_id):
|
||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||
return hosts
|
||||
|
||||
|
||||
def configure(service, key, auth):
|
||||
create_keyring(service, key)
|
||||
create_key_file(service, key)
|
||||
hosts = get_ceph_nodes()
|
||||
mon_hosts = ",".join(map(str, hosts))
|
||||
keyring = keyring_path(service)
|
||||
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||
ceph_conf.write(CEPH_CONF % locals())
|
||||
modprobe_kernel_module('rbd')
|
||||
|
||||
|
||||
def image_mapped(image_name):
|
||||
(rc, out) = commands.getstatusoutput('rbd showmapped')
|
||||
return image_name in out
|
||||
|
||||
|
||||
def map_block_storage(service, pool, image):
|
||||
cmd = [
|
||||
'rbd',
|
||||
'map',
|
||||
'%s/%s' % (pool, image),
|
||||
'--user',
|
||||
service,
|
||||
'--secret',
|
||||
keyfile_path(service),
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def filesystem_mounted(fs):
|
||||
return fs in [f for m, f in mounts()]
|
||||
|
||||
|
||||
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
||||
count = 0
|
||||
e_noent = os.errno.ENOENT
|
||||
while not os.path.exists(blk_device):
|
||||
if count >= timeout:
|
||||
log('ceph: gave up waiting on block device %s' % blk_device,
|
||||
level=ERROR)
|
||||
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
||||
log('ceph: waiting for block device %s to appear' % blk_device,
|
||||
level=INFO)
|
||||
count += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
log('ceph: Formatting block device %s as filesystem %s.' %
|
||||
(blk_device, fstype), level=INFO)
|
||||
check_call(['mkfs', '-t', fstype, blk_device])
|
||||
|
||||
|
||||
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
|
||||
# mount block device into /mnt
|
||||
mount(blk_device, '/mnt')
|
||||
|
||||
# copy data to /mnt
|
||||
try:
|
||||
copy_files(data_src_dst, '/mnt')
|
||||
except:
|
||||
pass
|
||||
|
||||
# umount block device
|
||||
umount('/mnt')
|
||||
|
||||
_dir = os.stat(data_src_dst)
|
||||
uid = _dir.st_uid
|
||||
gid = _dir.st_gid
|
||||
|
||||
# re-mount where the data should originally be
|
||||
mount(blk_device, data_src_dst, persist=True)
|
||||
|
||||
# ensure original ownership of new mount.
|
||||
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
# TODO: re-use
|
||||
def modprobe_kernel_module(module):
|
||||
log('ceph: Loading kernel module', level=INFO)
|
||||
cmd = ['modprobe', module]
|
||||
check_call(cmd)
|
||||
cmd = 'echo %s >> /etc/modules' % module
|
||||
check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dst, item)
|
||||
if os.path.isdir(s):
|
||||
shutil.copytree(s, d, symlinks, ignore)
|
||||
else:
|
||||
shutil.copy2(s, d)
|
||||
|
||||
|
||||
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
blk_device, fstype, system_services=[]):
|
||||
"""
|
||||
To be called from the current cluster leader.
|
||||
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||
and the device is formatted and mounted at the given mount_point.
|
||||
|
||||
If formatting a device for the first time, data existing at mount_point
|
||||
will be migrated to the RBD device before being remounted.
|
||||
|
||||
All services listed in system_services will be stopped prior to data
|
||||
migration and restarted when complete.
|
||||
"""
|
||||
# Ensure pool, RBD image, RBD mappings are in place.
|
||||
if not pool_exists(service, pool):
|
||||
log('ceph: Creating new pool %s.' % pool, level=INFO)
|
||||
create_pool(service, pool)
|
||||
|
||||
if not rbd_exists(service, pool, rbd_img):
|
||||
log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
|
||||
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||
|
||||
if not image_mapped(rbd_img):
|
||||
log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
|
||||
map_block_storage(service, pool, rbd_img)
|
||||
|
||||
# make file system
|
||||
# TODO: What happens if for whatever reason this is run again and
|
||||
# the data is already in the rbd device and/or is mounted??
|
||||
# When it is mounted already, it will fail to make the fs
|
||||
# XXX: This is really sketchy! Need to at least add an fstab entry
|
||||
# otherwise this hook will blow away existing data if its executed
|
||||
# after a reboot.
|
||||
if not filesystem_mounted(mount_point):
|
||||
make_filesystem(blk_device, fstype)
|
||||
|
||||
for svc in system_services:
|
||||
if running(svc):
|
||||
log('Stopping services %s prior to migrating data.' % svc,
|
||||
level=INFO)
|
||||
service_stop(svc)
|
||||
|
||||
place_data_on_ceph(service, blk_device, mount_point, fstype)
|
||||
|
||||
for svc in system_services:
|
||||
service_start(svc)
|
183
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
183
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
@ -0,0 +1,183 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from socket import gethostname as get_unit_hostname
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
relation_ids,
|
||||
related_units as relation_list,
|
||||
relation_get,
|
||||
config as config_get,
|
||||
INFO,
|
||||
ERROR,
|
||||
unit_get,
|
||||
)
|
||||
|
||||
|
||||
class HAIncompleteConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def is_clustered():
|
||||
for r_id in (relation_ids('ha') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
clustered = relation_get('clustered',
|
||||
rid=r_id,
|
||||
unit=unit)
|
||||
if clustered:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_leader(resource):
|
||||
cmd = [
|
||||
"crm", "resource",
|
||||
"show", resource
|
||||
]
|
||||
try:
|
||||
status = subprocess.check_output(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if get_unit_hostname() in status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def peer_units():
|
||||
peers = []
|
||||
for r_id in (relation_ids('cluster') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
peers.append(unit)
|
||||
return peers
|
||||
|
||||
|
||||
def oldest_peer(peers):
|
||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||
for peer in peers:
|
||||
remote_unit_no = int(peer.split('/')[1])
|
||||
if remote_unit_no < local_unit_no:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def eligible_leader(resource):
|
||||
if is_clustered():
|
||||
if not is_leader(resource):
|
||||
log('Deferring action to CRM leader.', level=INFO)
|
||||
return False
|
||||
else:
|
||||
peers = peer_units()
|
||||
if peers and not oldest_peer(peers):
|
||||
log('Deferring action to oldest service unit.', level=INFO)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def https():
|
||||
'''
|
||||
Determines whether enough data has been provided in configuration
|
||||
or relation data to configure HTTPS
|
||||
.
|
||||
returns: boolean
|
||||
'''
|
||||
if config_get('use-https') == "yes":
|
||||
return True
|
||||
if config_get('ssl_cert') and config_get('ssl_key'):
|
||||
return True
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
rel_state = [
|
||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||
relation_get('ssl_cert', rid=r_id, unit=unit),
|
||||
relation_get('ssl_key', rid=r_id, unit=unit),
|
||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||
]
|
||||
# NOTE: works around (LP: #1203241)
|
||||
if (None not in rel_state) and ('' not in rel_state):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def determine_api_port(public_port):
|
||||
'''
|
||||
Determine correct API server listening port based on
|
||||
existence of HTTPS reverse proxy and/or haproxy.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
returns: int: the correct listening port for the API service
|
||||
'''
|
||||
i = 0
|
||||
if len(peer_units()) > 0 or is_clustered():
|
||||
i += 1
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def determine_haproxy_port(public_port):
|
||||
'''
|
||||
Description: Determine correct proxy listening port based on public IP +
|
||||
existence of HTTPS reverse proxy.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
returns: int: the correct listening port for the HAProxy service
|
||||
'''
|
||||
i = 0
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def get_hacluster_config():
|
||||
'''
|
||||
Obtains all relevant configuration from charm configuration required
|
||||
for initiating a relation to hacluster:
|
||||
|
||||
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
|
||||
|
||||
returns: dict: A dict containing settings keyed by setting name.
|
||||
raises: HAIncompleteConfig if settings are missing.
|
||||
'''
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
|
||||
conf = {}
|
||||
for setting in settings:
|
||||
conf[setting] = config_get(setting)
|
||||
missing = []
|
||||
[missing.append(s) for s, v in conf.iteritems() if v is None]
|
||||
if missing:
|
||||
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||
raise HAIncompleteConfig
|
||||
return conf
|
||||
|
||||
|
||||
def canonical_url(configs, vip_setting='vip'):
|
||||
'''
|
||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
configuration and hacluster.
|
||||
|
||||
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
||||
a complete https context.
|
||||
:vip_setting: str: Setting in charm config that specifies
|
||||
VIP address.
|
||||
'''
|
||||
scheme = 'http'
|
||||
if 'https' in configs.complete_contexts():
|
||||
scheme = 'https'
|
||||
if is_clustered():
|
||||
addr = config_get(vip_setting)
|
||||
else:
|
||||
addr = unit_get('private-address')
|
||||
return '%s://%s' % (scheme, addr)
|
0
hooks/charmhelpers/contrib/network/__init__.py
Normal file
0
hooks/charmhelpers/contrib/network/__init__.py
Normal file
72
hooks/charmhelpers/contrib/network/ovs/__init__.py
Normal file
72
hooks/charmhelpers/contrib/network/ovs/__init__.py
Normal file
@ -0,0 +1,72 @@
|
||||
''' Helpers for interacting with OpenvSwitch '''
|
||||
import subprocess
|
||||
import os
|
||||
from charmhelpers.core.hookenv import (
|
||||
log, WARNING
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
service
|
||||
)
|
||||
|
||||
|
||||
def add_bridge(name):
|
||||
''' Add the named bridge to openvswitch '''
|
||||
log('Creating bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
|
||||
|
||||
|
||||
def del_bridge(name):
|
||||
''' Delete the named bridge from openvswitch '''
|
||||
log('Deleting bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
||||
|
||||
|
||||
def add_bridge_port(name, port):
|
||||
''' Add a port to the named openvswitch bridge '''
|
||||
log('Adding port {} to bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
|
||||
name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "up"])
|
||||
|
||||
|
||||
def del_bridge_port(name, port):
|
||||
''' Delete a port from the named openvswitch bridge '''
|
||||
log('Deleting port {} from bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
||||
name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "down"])
|
||||
|
||||
|
||||
def set_manager(manager):
|
||||
''' Set the controller for the local openvswitch '''
|
||||
log('Setting manager for local ovs to {}'.format(manager))
|
||||
subprocess.check_call(['ovs-vsctl', 'set-manager',
|
||||
'ssl:{}'.format(manager)])
|
||||
|
||||
|
||||
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
|
||||
|
||||
|
||||
def get_certificate():
|
||||
''' Read openvswitch certificate from disk '''
|
||||
if os.path.exists(CERT_PATH):
|
||||
log('Reading ovs certificate from {}'.format(CERT_PATH))
|
||||
with open(CERT_PATH, 'r') as cert:
|
||||
full_cert = cert.read()
|
||||
begin_marker = "-----BEGIN CERTIFICATE-----"
|
||||
end_marker = "-----END CERTIFICATE-----"
|
||||
begin_index = full_cert.find(begin_marker)
|
||||
end_index = full_cert.rfind(end_marker)
|
||||
if end_index == -1 or begin_index == -1:
|
||||
raise RuntimeError("Certificate does not contain valid begin"
|
||||
" and end markers.")
|
||||
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
|
||||
return full_cert
|
||||
else:
|
||||
log('Certificate not found', level=WARNING)
|
||||
return None
|
||||
|
||||
|
||||
def full_restart():
|
||||
''' Full restart and reload of openvswitch '''
|
||||
service('force-reload-kmod', 'openvswitch-switch')
|
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
522
hooks/charmhelpers/contrib/openstack/context.py
Normal file
522
hooks/charmhelpers/contrib/openstack/context.py
Normal file
@ -0,0 +1,522 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
from base64 import b64decode
|
||||
|
||||
from subprocess import (
|
||||
check_call
|
||||
)
|
||||
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
filter_installed_packages,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
local_unit,
|
||||
log,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units,
|
||||
unit_get,
|
||||
unit_private_ip,
|
||||
ERROR,
|
||||
WARNING,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_api_port,
|
||||
determine_haproxy_port,
|
||||
https,
|
||||
is_clustered,
|
||||
peer_units,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.apache import (
|
||||
get_cert,
|
||||
get_ca_cert,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.neutron import (
|
||||
neutron_plugin_attribute,
|
||||
)
|
||||
|
||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
|
||||
|
||||
class OSContextError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_packages(packages):
|
||||
'''Install but do not upgrade required plugin packages'''
|
||||
required = filter_installed_packages(packages)
|
||||
if required:
|
||||
apt_install(required, fatal=True)
|
||||
|
||||
|
||||
def context_complete(ctxt):
|
||||
_missing = []
|
||||
for k, v in ctxt.iteritems():
|
||||
if v is None or v == '':
|
||||
_missing.append(k)
|
||||
if _missing:
|
||||
log('Missing required data: %s' % ' '.join(_missing), level='INFO')
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class OSContextGenerator(object):
|
||||
interfaces = []
|
||||
|
||||
def __call__(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SharedDBContext(OSContextGenerator):
|
||||
interfaces = ['shared-db']
|
||||
|
||||
def __init__(self, database=None, user=None, relation_prefix=None):
|
||||
'''
|
||||
Allows inspecting relation for settings prefixed with relation_prefix.
|
||||
This is useful for parsing access for multiple databases returned via
|
||||
the shared-db interface (eg, nova_password, quantum_password)
|
||||
'''
|
||||
self.relation_prefix = relation_prefix
|
||||
self.database = database
|
||||
self.user = user
|
||||
|
||||
def __call__(self):
|
||||
self.database = self.database or config('database')
|
||||
self.user = self.user or config('database-user')
|
||||
if None in [self.database, self.user]:
|
||||
log('Could not generate shared_db context. '
|
||||
'Missing required charm config options. '
|
||||
'(database name and user)')
|
||||
raise OSContextError
|
||||
ctxt = {}
|
||||
|
||||
password_setting = 'password'
|
||||
if self.relation_prefix:
|
||||
password_setting = self.relation_prefix + '_password'
|
||||
|
||||
for rid in relation_ids('shared-db'):
|
||||
for unit in related_units(rid):
|
||||
passwd = relation_get(password_setting, rid=rid, unit=unit)
|
||||
ctxt = {
|
||||
'database_host': relation_get('db_host', rid=rid,
|
||||
unit=unit),
|
||||
'database': self.database,
|
||||
'database_user': self.user,
|
||||
'database_password': passwd,
|
||||
}
|
||||
if context_complete(ctxt):
|
||||
return ctxt
|
||||
return {}
|
||||
|
||||
|
||||
class IdentityServiceContext(OSContextGenerator):
|
||||
interfaces = ['identity-service']
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for identity-service')
|
||||
ctxt = {}
|
||||
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
ctxt = {
|
||||
'service_port': relation_get('service_port', rid=rid,
|
||||
unit=unit),
|
||||
'service_host': relation_get('service_host', rid=rid,
|
||||
unit=unit),
|
||||
'auth_host': relation_get('auth_host', rid=rid, unit=unit),
|
||||
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
|
||||
'admin_tenant_name': relation_get('service_tenant',
|
||||
rid=rid, unit=unit),
|
||||
'admin_user': relation_get('service_username', rid=rid,
|
||||
unit=unit),
|
||||
'admin_password': relation_get('service_password', rid=rid,
|
||||
unit=unit),
|
||||
# XXX: Hard-coded http.
|
||||
'service_protocol': 'http',
|
||||
'auth_protocol': 'http',
|
||||
}
|
||||
if context_complete(ctxt):
|
||||
return ctxt
|
||||
return {}
|
||||
|
||||
|
||||
class AMQPContext(OSContextGenerator):
|
||||
interfaces = ['amqp']
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for amqp')
|
||||
conf = config()
|
||||
try:
|
||||
username = conf['rabbit-user']
|
||||
vhost = conf['rabbit-vhost']
|
||||
except KeyError as e:
|
||||
log('Could not generate shared_db context. '
|
||||
'Missing required charm config options: %s.' % e)
|
||||
raise OSContextError
|
||||
|
||||
ctxt = {}
|
||||
for rid in relation_ids('amqp'):
|
||||
for unit in related_units(rid):
|
||||
if relation_get('clustered', rid=rid, unit=unit):
|
||||
ctxt['clustered'] = True
|
||||
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
|
||||
unit=unit)
|
||||
else:
|
||||
ctxt['rabbitmq_host'] = relation_get('private-address',
|
||||
rid=rid, unit=unit)
|
||||
ctxt.update({
|
||||
'rabbitmq_user': username,
|
||||
'rabbitmq_password': relation_get('password', rid=rid,
|
||||
unit=unit),
|
||||
'rabbitmq_virtual_host': vhost,
|
||||
})
|
||||
if context_complete(ctxt):
|
||||
# Sufficient information found = break out!
|
||||
break
|
||||
# Used for active/active rabbitmq >= grizzly
|
||||
ctxt['rabbitmq_hosts'] = []
|
||||
for unit in related_units(rid):
|
||||
ctxt['rabbitmq_hosts'].append(relation_get('private-address',
|
||||
rid=rid, unit=unit))
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
else:
|
||||
return ctxt
|
||||
|
||||
|
||||
class CephContext(OSContextGenerator):
|
||||
interfaces = ['ceph']
|
||||
|
||||
def __call__(self):
|
||||
'''This generates context for /etc/ceph/ceph.conf templates'''
|
||||
if not relation_ids('ceph'):
|
||||
return {}
|
||||
log('Generating template context for ceph')
|
||||
mon_hosts = []
|
||||
auth = None
|
||||
key = None
|
||||
for rid in relation_ids('ceph'):
|
||||
for unit in related_units(rid):
|
||||
mon_hosts.append(relation_get('private-address', rid=rid,
|
||||
unit=unit))
|
||||
auth = relation_get('auth', rid=rid, unit=unit)
|
||||
key = relation_get('key', rid=rid, unit=unit)
|
||||
|
||||
ctxt = {
|
||||
'mon_hosts': ' '.join(mon_hosts),
|
||||
'auth': auth,
|
||||
'key': key,
|
||||
}
|
||||
|
||||
if not os.path.isdir('/etc/ceph'):
|
||||
os.mkdir('/etc/ceph')
|
||||
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
|
||||
ensure_packages(['ceph-common'])
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class HAProxyContext(OSContextGenerator):
|
||||
interfaces = ['cluster']
|
||||
|
||||
def __call__(self):
|
||||
'''
|
||||
Builds half a context for the haproxy template, which describes
|
||||
all peers to be included in the cluster. Each charm needs to include
|
||||
its own context generator that describes the port mapping.
|
||||
'''
|
||||
if not relation_ids('cluster'):
|
||||
return {}
|
||||
|
||||
cluster_hosts = {}
|
||||
l_unit = local_unit().replace('/', '-')
|
||||
cluster_hosts[l_unit] = unit_get('private-address')
|
||||
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
_unit = unit.replace('/', '-')
|
||||
addr = relation_get('private-address', rid=rid, unit=unit)
|
||||
cluster_hosts[_unit] = addr
|
||||
|
||||
ctxt = {
|
||||
'units': cluster_hosts,
|
||||
}
|
||||
if len(cluster_hosts.keys()) > 1:
|
||||
# Enable haproxy when we have enough peers.
|
||||
log('Ensuring haproxy enabled in /etc/default/haproxy.')
|
||||
with open('/etc/default/haproxy', 'w') as out:
|
||||
out.write('ENABLED=1\n')
|
||||
return ctxt
|
||||
log('HAProxy context is incomplete, this unit has no peers.')
|
||||
return {}
|
||||
|
||||
|
||||
class ImageServiceContext(OSContextGenerator):
|
||||
interfaces = ['image-service']
|
||||
|
||||
def __call__(self):
|
||||
'''
|
||||
Obtains the glance API server from the image-service relation. Useful
|
||||
in nova and cinder (currently).
|
||||
'''
|
||||
log('Generating template context for image-service.')
|
||||
rids = relation_ids('image-service')
|
||||
if not rids:
|
||||
return {}
|
||||
for rid in rids:
|
||||
for unit in related_units(rid):
|
||||
api_server = relation_get('glance-api-server',
|
||||
rid=rid, unit=unit)
|
||||
if api_server:
|
||||
return {'glance_api_servers': api_server}
|
||||
log('ImageService context is incomplete. '
|
||||
'Missing required relation data.')
|
||||
return {}
|
||||
|
||||
|
||||
class ApacheSSLContext(OSContextGenerator):
|
||||
"""
|
||||
Generates a context for an apache vhost configuration that configures
|
||||
HTTPS reverse proxying for one or many endpoints. Generated context
|
||||
looks something like:
|
||||
{
|
||||
'namespace': 'cinder',
|
||||
'private_address': 'iscsi.mycinderhost.com',
|
||||
'endpoints': [(8776, 8766), (8777, 8767)]
|
||||
}
|
||||
|
||||
The endpoints list consists of a tuples mapping external ports
|
||||
to internal ports.
|
||||
"""
|
||||
interfaces = ['https']
|
||||
|
||||
# charms should inherit this context and set external ports
|
||||
# and service namespace accordingly.
|
||||
external_ports = []
|
||||
service_namespace = None
|
||||
|
||||
def enable_modules(self):
|
||||
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
|
||||
check_call(cmd)
|
||||
|
||||
def configure_cert(self):
|
||||
if not os.path.isdir('/etc/apache2/ssl'):
|
||||
os.mkdir('/etc/apache2/ssl')
|
||||
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
|
||||
if not os.path.isdir(ssl_dir):
|
||||
os.mkdir(ssl_dir)
|
||||
cert, key = get_cert()
|
||||
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
|
||||
cert_out.write(b64decode(cert))
|
||||
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
|
||||
key_out.write(b64decode(key))
|
||||
ca_cert = get_ca_cert()
|
||||
if ca_cert:
|
||||
with open(CA_CERT_PATH, 'w') as ca_out:
|
||||
ca_out.write(b64decode(ca_cert))
|
||||
check_call(['update-ca-certificates'])
|
||||
|
||||
def __call__(self):
|
||||
if isinstance(self.external_ports, basestring):
|
||||
self.external_ports = [self.external_ports]
|
||||
if (not self.external_ports or not https()):
|
||||
return {}
|
||||
|
||||
self.configure_cert()
|
||||
self.enable_modules()
|
||||
|
||||
ctxt = {
|
||||
'namespace': self.service_namespace,
|
||||
'private_address': unit_get('private-address'),
|
||||
'endpoints': []
|
||||
}
|
||||
for ext_port in self.external_ports:
|
||||
if peer_units() or is_clustered():
|
||||
int_port = determine_haproxy_port(ext_port)
|
||||
else:
|
||||
int_port = determine_api_port(ext_port)
|
||||
portmap = (int(ext_port), int(int_port))
|
||||
ctxt['endpoints'].append(portmap)
|
||||
return ctxt
|
||||
|
||||
|
||||
class NeutronContext(object):
|
||||
interfaces = []
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def network_manager(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def packages(self):
|
||||
return neutron_plugin_attribute(
|
||||
self.plugin, 'packages', self.network_manager)
|
||||
|
||||
@property
|
||||
def neutron_security_groups(self):
|
||||
return None
|
||||
|
||||
def _ensure_packages(self):
|
||||
[ensure_packages(pkgs) for pkgs in self.packages]
|
||||
|
||||
def _save_flag_file(self):
|
||||
if self.network_manager == 'quantum':
|
||||
_file = '/etc/nova/quantum_plugin.conf'
|
||||
else:
|
||||
_file = '/etc/nova/neutron_plugin.conf'
|
||||
with open(_file, 'wb') as out:
|
||||
out.write(self.plugin + '\n')
|
||||
|
||||
def ovs_ctxt(self):
|
||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||
self.network_manager)
|
||||
|
||||
ovs_ctxt = {
|
||||
'core_plugin': driver,
|
||||
'neutron_plugin': 'ovs',
|
||||
'neutron_security_groups': self.neutron_security_groups,
|
||||
'local_ip': unit_private_ip(),
|
||||
}
|
||||
|
||||
return ovs_ctxt
|
||||
|
||||
def __call__(self):
|
||||
self._ensure_packages()
|
||||
|
||||
if self.network_manager not in ['quantum', 'neutron']:
|
||||
return {}
|
||||
|
||||
if not self.plugin:
|
||||
return {}
|
||||
|
||||
ctxt = {'network_manager': self.network_manager}
|
||||
|
||||
if self.plugin == 'ovs':
|
||||
ctxt.update(self.ovs_ctxt())
|
||||
|
||||
self._save_flag_file()
|
||||
return ctxt
|
||||
|
||||
|
||||
class OSConfigFlagContext(OSContextGenerator):
|
||||
'''
|
||||
Responsible adding user-defined config-flags in charm config to a
|
||||
to a template context.
|
||||
'''
|
||||
def __call__(self):
|
||||
config_flags = config('config-flags')
|
||||
if not config_flags or config_flags in ['None', '']:
|
||||
return {}
|
||||
config_flags = config_flags.split(',')
|
||||
flags = {}
|
||||
for flag in config_flags:
|
||||
if '=' not in flag:
|
||||
log('Improperly formatted config-flag, expected k=v '
|
||||
'got %s' % flag, level=WARNING)
|
||||
continue
|
||||
k, v = flag.split('=')
|
||||
flags[k.strip()] = v
|
||||
ctxt = {'user_config_flags': flags}
|
||||
return ctxt
|
||||
|
||||
|
||||
class SubordinateConfigContext(OSContextGenerator):
|
||||
"""
|
||||
Responsible for inspecting relations to subordinates that
|
||||
may be exporting required config via a json blob.
|
||||
|
||||
The subordinate interface allows subordinates to export their
|
||||
configuration requirements to the principle for multiple config
|
||||
files and multiple serivces. Ie, a subordinate that has interfaces
|
||||
to both glance and nova may export to following yaml blob as json:
|
||||
|
||||
glance:
|
||||
/etc/glance/glance-api.conf:
|
||||
sections:
|
||||
DEFAULT:
|
||||
- [key1, value1]
|
||||
/etc/glance/glance-registry.conf:
|
||||
MYSECTION:
|
||||
- [key2, value2]
|
||||
nova:
|
||||
/etc/nova/nova.conf:
|
||||
sections:
|
||||
DEFAULT:
|
||||
- [key3, value3]
|
||||
|
||||
|
||||
It is then up to the principle charms to subscribe this context to
|
||||
the service+config file it is interestd in. Configuration data will
|
||||
be available in the template context, in glance's case, as:
|
||||
ctxt = {
|
||||
... other context ...
|
||||
'subordinate_config': {
|
||||
'DEFAULT': {
|
||||
'key1': 'value1',
|
||||
},
|
||||
'MYSECTION': {
|
||||
'key2': 'value2',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
def __init__(self, service, config_file, interface):
|
||||
"""
|
||||
:param service : Service name key to query in any subordinate
|
||||
data found
|
||||
:param config_file : Service's config file to query sections
|
||||
:param interface : Subordinate interface to inspect
|
||||
"""
|
||||
self.service = service
|
||||
self.config_file = config_file
|
||||
self.interface = interface
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
for rid in relation_ids(self.interface):
|
||||
for unit in related_units(rid):
|
||||
sub_config = relation_get('subordinate_configuration',
|
||||
rid=rid, unit=unit)
|
||||
if sub_config and sub_config != '':
|
||||
try:
|
||||
sub_config = json.loads(sub_config)
|
||||
except:
|
||||
log('Could not parse JSON from subordinate_config '
|
||||
'setting from %s' % rid, level=ERROR)
|
||||
continue
|
||||
|
||||
if self.service not in sub_config:
|
||||
log('Found subordinate_config on %s but it contained'
|
||||
'nothing for %s service' % (rid, self.service))
|
||||
continue
|
||||
|
||||
sub_config = sub_config[self.service]
|
||||
if self.config_file not in sub_config:
|
||||
log('Found subordinate_config on %s but it contained'
|
||||
'nothing for %s' % (rid, self.config_file))
|
||||
continue
|
||||
|
||||
sub_config = sub_config[self.config_file]
|
||||
for k, v in sub_config.iteritems():
|
||||
ctxt[k] = v
|
||||
|
||||
if not ctxt:
|
||||
ctxt['sections'] = {}
|
||||
|
||||
return ctxt
|
117
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
117
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
@ -0,0 +1,117 @@
|
||||
# Various utilies for dealing with Neutron and the renaming from Quantum.
|
||||
|
||||
from subprocess import check_output
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import os_release
|
||||
|
||||
|
||||
def headers_package():
|
||||
"""Ensures correct linux-headers for running kernel are installed,
|
||||
for building DKMS package"""
|
||||
kver = check_output(['uname', '-r']).strip()
|
||||
return 'linux-headers-%s' % kver
|
||||
|
||||
|
||||
# legacy
|
||||
def quantum_plugins():
|
||||
from charmhelpers.contrib.openstack import context
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/quantum/plugins/openvswitch/'
|
||||
'ovs_quantum_plugin.ini',
|
||||
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
||||
'OVSQuantumPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': ['quantum-plugin-openvswitch-agent'],
|
||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||
'QuantumPlugin.NvpPluginV2',
|
||||
'services': [],
|
||||
'packages': [],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def neutron_plugins():
|
||||
from charmhelpers.contrib.openstack import context
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/neutron/plugins/openvswitch/'
|
||||
'ovs_neutron_plugin.ini',
|
||||
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
||||
'OVSNeutronPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': ['neutron-plugin-openvswitch-agent'],
|
||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||
'NeutronPlugin.NvpPluginV2',
|
||||
'services': [],
|
||||
'packages': [],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||
manager = net_manager or network_manager()
|
||||
if manager == 'quantum':
|
||||
plugins = quantum_plugins()
|
||||
elif manager == 'neutron':
|
||||
plugins = neutron_plugins()
|
||||
else:
|
||||
log('Error: Network manager does not support plugins.')
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
_plugin = plugins[plugin]
|
||||
except KeyError:
|
||||
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
return _plugin[attr]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
def network_manager():
|
||||
'''
|
||||
Deals with the renaming of Quantum to Neutron in H and any situations
|
||||
that require compatability (eg, deploying H with network-manager=quantum,
|
||||
upgrading from G).
|
||||
'''
|
||||
release = os_release('nova-common')
|
||||
manager = config('network-manager').lower()
|
||||
|
||||
if manager not in ['quantum', 'neutron']:
|
||||
return manager
|
||||
|
||||
if release in ['essex']:
|
||||
# E does not support neutron
|
||||
log('Neutron networking not supported in Essex.', level=ERROR)
|
||||
raise Exception
|
||||
elif release in ['folsom', 'grizzly']:
|
||||
# neutron is named quantum in F and G
|
||||
return 'quantum'
|
||||
else:
|
||||
# ensure accurate naming for all releases post-H
|
||||
return 'neutron'
|
@ -0,0 +1,2 @@
|
||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
||||
# module
|
11
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
11
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
@ -0,0 +1,11 @@
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# cinder configuration file maintained by Juju
|
||||
# local changes may be overwritten.
|
||||
###############################################################################
|
||||
{% if auth -%}
|
||||
[global]
|
||||
auth_supported = {{ auth }}
|
||||
keyring = /etc/ceph/$cluster.$name.keyring
|
||||
mon host = {{ mon_hosts }}
|
||||
{% endif -%}
|
37
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
37
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
@ -0,0 +1,37 @@
|
||||
global
|
||||
log 127.0.0.1 local0
|
||||
log 127.0.0.1 local1 notice
|
||||
maxconn 20000
|
||||
user haproxy
|
||||
group haproxy
|
||||
spread-checks 0
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
retries 3
|
||||
timeout queue 1000
|
||||
timeout connect 1000
|
||||
timeout client 30000
|
||||
timeout server 30000
|
||||
|
||||
listen stats :8888
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
stats realm Haproxy\ Statistics
|