Merge of python-redux work for havana cycle
This commit is contained in:
commit
ceb01bf99a
6
.coveragerc
Normal file
6
.coveragerc
Normal file
@ -0,0 +1,6 @@
|
||||
[report]
|
||||
# Regexes for lines to exclude from consideration
|
||||
exclude_lines =
|
||||
if __name__ == .__main__.:
|
||||
include=
|
||||
hooks/nova_*
|
17
.project
Normal file
17
.project
Normal file
@ -0,0 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>nova-cloud-controller</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.python.pydev.PyDevBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.python.pydev.pythonNature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
9
.pydevproject
Normal file
9
.pydevproject
Normal file
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/nova-cloud-controller/hooks</path>
|
||||
<path>/nova-cloud-controller/unit_tests</path>
|
||||
</pydev_pathproperty>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
</pydev_project>
|
14
Makefile
Normal file
14
Makefile
Normal file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/make
|
||||
PYTHON := /usr/bin/env python
|
||||
|
||||
lint:
|
||||
@flake8 --exclude hooks/charmhelpers hooks
|
||||
@flake8 --exclude hooks/charmhelpers unit_tests
|
||||
@charm proof
|
||||
|
||||
test:
|
||||
@echo Starting tests...
|
||||
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
|
||||
|
||||
sync:
|
||||
@charm-helper-sync -c charm-helpers.yaml
|
11
charm-helpers.yaml
Normal file
11
charm-helpers.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
branch: lp:charm-helpers
|
||||
destination: hooks/charmhelpers
|
||||
include:
|
||||
- core
|
||||
- fetch
|
||||
- contrib.openstack|inc=*
|
||||
- contrib.storage
|
||||
- contrib.hahelpers:
|
||||
- apache
|
||||
- ceph
|
||||
- payload.execd
|
50
config.yaml
50
config.yaml
@ -22,14 +22,22 @@ options:
|
||||
default: nova
|
||||
type: string
|
||||
decsription: Rabbitmq vhost
|
||||
db-user:
|
||||
database-user:
|
||||
default: nova
|
||||
type: string
|
||||
description: Username for database access
|
||||
nova-db:
|
||||
database:
|
||||
default: nova
|
||||
type: string
|
||||
description: Database name
|
||||
neutron-database-user:
|
||||
default: neutron
|
||||
type: string
|
||||
description: Username for Neutron database access (if enabled)
|
||||
neutron-database:
|
||||
default: neutron
|
||||
type: string
|
||||
description: Database name for Neutron (if enabled)
|
||||
network-manager:
|
||||
default: FlatDHCPManager
|
||||
type: string
|
||||
@ -38,10 +46,10 @@ options:
|
||||
.
|
||||
FlatDHCPManager (nova-network) (default)
|
||||
FlatManager (nova-network)
|
||||
Quantum (Full SDN solution)
|
||||
Neutron|Quantum (Full SDN solution)
|
||||
.
|
||||
When using the Quantum option you will most likely want to use
|
||||
the quantum charm to provide L3 routing and DHCP Services.
|
||||
When using the Neutron option you will most likely want to use
|
||||
the neutron-gateway charm to provide L3 routing and DHCP Services.
|
||||
bridge-interface:
|
||||
default: br100
|
||||
type: string
|
||||
@ -71,38 +79,10 @@ options:
|
||||
Use quantum for security group management.
|
||||
.
|
||||
Only supported for >= grizzly.
|
||||
conf-ext-net:
|
||||
type: string
|
||||
default: "no"
|
||||
description: Configure external network for quantum using
|
||||
network configuration below. Only used when OpenvSwitch
|
||||
plugin is configured.
|
||||
ext-net-name:
|
||||
neutron-external-network:
|
||||
type: string
|
||||
default: ext_net
|
||||
description: |
|
||||
Name of external network configuration to create for
|
||||
public access to instances/floating IP's.
|
||||
ext-net-cidr:
|
||||
type: string
|
||||
default: 192.168.21.0/24
|
||||
description: |
|
||||
External network addressing
|
||||
ext-net-gateway:
|
||||
type: string
|
||||
default: 192.168.21.1
|
||||
description: |
|
||||
IP of the public network gateway (i.e. external router)
|
||||
pool-floating-start:
|
||||
type: string
|
||||
default: 192.168.21.130
|
||||
description: |
|
||||
Start of default floating IP range.
|
||||
pool-floating-end:
|
||||
type: string
|
||||
default: 192.168.21.200
|
||||
description: |
|
||||
End of default floating IP range.
|
||||
description: Name of the external network for floating IP addresses provided by Neutron.
|
||||
config-flags:
|
||||
default: None
|
||||
type: string
|
||||
|
@ -1,6 +1,11 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from quantumclient.v2_0 import client
|
||||
try:
|
||||
from quantumclient.v2_0 import client
|
||||
except ImportError:
|
||||
from neutronclient.v2_0 import client
|
||||
|
||||
from keystoneclient.v2_0 import client as ks_client
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
@ -17,6 +22,10 @@ For example:
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = optparse.OptionParser(usage)
|
||||
parser.add_option('-t', '--tenant',
|
||||
help='Tenant name to create network for',
|
||||
dest='tenant', action='store',
|
||||
default=None)
|
||||
parser.add_option("-d", "--debug",
|
||||
help="Enable debug logging",
|
||||
dest="debug", action="store_true", default=False)
|
||||
@ -49,18 +58,36 @@ if __name__ == '__main__':
|
||||
start_floating_ip = None
|
||||
end_floating_ip = None
|
||||
|
||||
keystone = ks_client.Client(username=os.environ['OS_USERNAME'],
|
||||
password=os.environ['OS_PASSWORD'],
|
||||
tenant_name=os.environ['OS_TENANT_NAME'],
|
||||
auth_url=os.environ['OS_AUTH_URL'],
|
||||
region_name=os.environ['OS_REGION_NAME'])
|
||||
quantum = client.Client(username=os.environ['OS_USERNAME'],
|
||||
password=os.environ['OS_PASSWORD'],
|
||||
tenant_name=os.environ['OS_TENANT_NAME'],
|
||||
auth_url=os.environ['OS_AUTH_URL'],
|
||||
region_name=os.environ['OS_REGION_NAME'])
|
||||
|
||||
|
||||
# Resolve tenant id
|
||||
tenant_id = None
|
||||
for tenant in [t._info for t in keystone.tenants.list()]:
|
||||
if (tenant['name'] ==
|
||||
(opts.tenant or os.environ['OS_TENANT_NAME'])):
|
||||
tenant_id = tenant['id']
|
||||
break # Tenant ID found - stop looking
|
||||
if not tenant_id:
|
||||
logging.error("Unable to locate tenant id for %s.", opts.tenant)
|
||||
sys.exit(1)
|
||||
|
||||
networks = quantum.list_networks(name=net_name)
|
||||
if len(networks['networks']) == 0:
|
||||
logging.info('Configuring external bridge')
|
||||
network_msg = {
|
||||
'name': net_name,
|
||||
'router:external': True
|
||||
'router:external': True,
|
||||
'tenant_id': tenant_id
|
||||
}
|
||||
logging.info('Creating new external network definition: %s',
|
||||
net_name)
|
||||
@ -76,7 +103,8 @@ if __name__ == '__main__':
|
||||
'name': subnet_name,
|
||||
'network_id': network['id'],
|
||||
'enable_dhcp': False,
|
||||
'ip_version': 4
|
||||
'ip_version': 4,
|
||||
'tenant_id': tenant_id
|
||||
}
|
||||
|
||||
if opts.default_gateway:
|
||||
@ -102,7 +130,7 @@ if __name__ == '__main__':
|
||||
if len(routers['routers']) == 0:
|
||||
logging.info('Creating provider router for external network access')
|
||||
router = quantum.create_router(
|
||||
{'router': {'name': 'provider-router'}}
|
||||
{'router': {'name': 'provider-router', 'tenant_id': tenant_id}}
|
||||
)['router']
|
||||
logging.info('New router created: %s', (router['id']))
|
||||
else:
|
@ -1,6 +1,10 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from quantumclient.v2_0 import client
|
||||
try:
|
||||
from quantumclient.v2_0 import client
|
||||
except ImportError:
|
||||
from neutronclient.v2_0 import client
|
||||
|
||||
from keystoneclient.v2_0 import client as ks_client
|
||||
import optparse
|
||||
import os
|
0
hooks/__init__.py
Normal file
0
hooks/__init__.py
Normal file
1
hooks/amqp-relation-broken
Symbolic link
1
hooks/amqp-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
58
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
58
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
@ -0,0 +1,58 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config as config_get,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units as relation_list,
|
||||
log,
|
||||
INFO,
|
||||
)
|
||||
|
||||
|
||||
def get_cert():
|
||||
cert = config_get('ssl_cert')
|
||||
key = config_get('ssl_key')
|
||||
if not (cert and key):
|
||||
log("Inspecting identity-service relations for SSL certificate.",
|
||||
level=INFO)
|
||||
cert = key = None
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not cert:
|
||||
cert = relation_get('ssl_cert',
|
||||
rid=r_id, unit=unit)
|
||||
if not key:
|
||||
key = relation_get('ssl_key',
|
||||
rid=r_id, unit=unit)
|
||||
return (cert, key)
|
||||
|
||||
|
||||
def get_ca_cert():
|
||||
ca_cert = None
|
||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||
level=INFO)
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not ca_cert:
|
||||
ca_cert = relation_get('ca_cert',
|
||||
rid=r_id, unit=unit)
|
||||
return ca_cert
|
||||
|
||||
|
||||
def install_ca_cert(ca_cert):
|
||||
if ca_cert:
|
||||
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
||||
'w') as crt:
|
||||
crt.write(ca_cert)
|
||||
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
294
hooks/charmhelpers/contrib/hahelpers/ceph.py
Normal file
294
hooks/charmhelpers/contrib/hahelpers/ceph.py
Normal file
@ -0,0 +1,294 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import commands
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
CalledProcessError
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units,
|
||||
log,
|
||||
INFO,
|
||||
ERROR
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
mount,
|
||||
mounts,
|
||||
service_start,
|
||||
service_stop,
|
||||
umount,
|
||||
)
|
||||
|
||||
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
|
||||
KEYFILE = '/etc/ceph/ceph.client.%s.key'
|
||||
|
||||
CEPH_CONF = """[global]
|
||||
auth supported = %(auth)s
|
||||
keyring = %(keyring)s
|
||||
mon host = %(mon_hosts)s
|
||||
"""
|
||||
|
||||
|
||||
def running(service):
|
||||
# this local util can be dropped as soon the following branch lands
|
||||
# in lp:charm-helpers
|
||||
# https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
|
||||
try:
|
||||
output = check_output(['service', service, 'status'])
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if ("start/running" in output or "is running" in output):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def install():
|
||||
ceph_dir = "/etc/ceph"
|
||||
if not os.path.isdir(ceph_dir):
|
||||
os.mkdir(ceph_dir)
|
||||
apt_install('ceph-common', fatal=True)
|
||||
|
||||
|
||||
def rbd_exists(service, pool, rbd_img):
|
||||
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
|
||||
(service, pool))
|
||||
return rbd_img in out
|
||||
|
||||
|
||||
def create_rbd_image(service, pool, image, sizemb):
|
||||
cmd = [
|
||||
'rbd',
|
||||
'create',
|
||||
image,
|
||||
'--size',
|
||||
str(sizemb),
|
||||
'--id',
|
||||
service,
|
||||
'--pool',
|
||||
pool
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def pool_exists(service, name):
|
||||
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
|
||||
return name in out
|
||||
|
||||
|
||||
def create_pool(service, name):
|
||||
cmd = [
|
||||
'rados',
|
||||
'--id',
|
||||
service,
|
||||
'mkpool',
|
||||
name
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def keyfile_path(service):
|
||||
return KEYFILE % service
|
||||
|
||||
|
||||
def keyring_path(service):
|
||||
return KEYRING % service
|
||||
|
||||
|
||||
def create_keyring(service, key):
|
||||
keyring = keyring_path(service)
|
||||
if os.path.exists(keyring):
|
||||
log('ceph: Keyring exists at %s.' % keyring, level=INFO)
|
||||
cmd = [
|
||||
'ceph-authtool',
|
||||
keyring,
|
||||
'--create-keyring',
|
||||
'--name=client.%s' % service,
|
||||
'--add-key=%s' % key
|
||||
]
|
||||
check_call(cmd)
|
||||
log('ceph: Created new ring at %s.' % keyring, level=INFO)
|
||||
|
||||
|
||||
def create_key_file(service, key):
|
||||
# create a file containing the key
|
||||
keyfile = keyfile_path(service)
|
||||
if os.path.exists(keyfile):
|
||||
log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
|
||||
fd = open(keyfile, 'w')
|
||||
fd.write(key)
|
||||
fd.close()
|
||||
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
|
||||
|
||||
|
||||
def get_ceph_nodes():
|
||||
hosts = []
|
||||
for r_id in relation_ids('ceph'):
|
||||
for unit in related_units(r_id):
|
||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||
return hosts
|
||||
|
||||
|
||||
def configure(service, key, auth):
|
||||
create_keyring(service, key)
|
||||
create_key_file(service, key)
|
||||
hosts = get_ceph_nodes()
|
||||
mon_hosts = ",".join(map(str, hosts))
|
||||
keyring = keyring_path(service)
|
||||
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||
ceph_conf.write(CEPH_CONF % locals())
|
||||
modprobe_kernel_module('rbd')
|
||||
|
||||
|
||||
def image_mapped(image_name):
|
||||
(rc, out) = commands.getstatusoutput('rbd showmapped')
|
||||
return image_name in out
|
||||
|
||||
|
||||
def map_block_storage(service, pool, image):
|
||||
cmd = [
|
||||
'rbd',
|
||||
'map',
|
||||
'%s/%s' % (pool, image),
|
||||
'--user',
|
||||
service,
|
||||
'--secret',
|
||||
keyfile_path(service),
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def filesystem_mounted(fs):
|
||||
return fs in [f for m, f in mounts()]
|
||||
|
||||
|
||||
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
||||
count = 0
|
||||
e_noent = os.errno.ENOENT
|
||||
while not os.path.exists(blk_device):
|
||||
if count >= timeout:
|
||||
log('ceph: gave up waiting on block device %s' % blk_device,
|
||||
level=ERROR)
|
||||
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
||||
log('ceph: waiting for block device %s to appear' % blk_device,
|
||||
level=INFO)
|
||||
count += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
log('ceph: Formatting block device %s as filesystem %s.' %
|
||||
(blk_device, fstype), level=INFO)
|
||||
check_call(['mkfs', '-t', fstype, blk_device])
|
||||
|
||||
|
||||
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
|
||||
# mount block device into /mnt
|
||||
mount(blk_device, '/mnt')
|
||||
|
||||
# copy data to /mnt
|
||||
try:
|
||||
copy_files(data_src_dst, '/mnt')
|
||||
except:
|
||||
pass
|
||||
|
||||
# umount block device
|
||||
umount('/mnt')
|
||||
|
||||
_dir = os.stat(data_src_dst)
|
||||
uid = _dir.st_uid
|
||||
gid = _dir.st_gid
|
||||
|
||||
# re-mount where the data should originally be
|
||||
mount(blk_device, data_src_dst, persist=True)
|
||||
|
||||
# ensure original ownership of new mount.
|
||||
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
# TODO: re-use
|
||||
def modprobe_kernel_module(module):
|
||||
log('ceph: Loading kernel module', level=INFO)
|
||||
cmd = ['modprobe', module]
|
||||
check_call(cmd)
|
||||
cmd = 'echo %s >> /etc/modules' % module
|
||||
check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dst, item)
|
||||
if os.path.isdir(s):
|
||||
shutil.copytree(s, d, symlinks, ignore)
|
||||
else:
|
||||
shutil.copy2(s, d)
|
||||
|
||||
|
||||
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
blk_device, fstype, system_services=[]):
|
||||
"""
|
||||
To be called from the current cluster leader.
|
||||
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||
and the device is formatted and mounted at the given mount_point.
|
||||
|
||||
If formatting a device for the first time, data existing at mount_point
|
||||
will be migrated to the RBD device before being remounted.
|
||||
|
||||
All services listed in system_services will be stopped prior to data
|
||||
migration and restarted when complete.
|
||||
"""
|
||||
# Ensure pool, RBD image, RBD mappings are in place.
|
||||
if not pool_exists(service, pool):
|
||||
log('ceph: Creating new pool %s.' % pool, level=INFO)
|
||||
create_pool(service, pool)
|
||||
|
||||
if not rbd_exists(service, pool, rbd_img):
|
||||
log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
|
||||
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||
|
||||
if not image_mapped(rbd_img):
|
||||
log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
|
||||
map_block_storage(service, pool, rbd_img)
|
||||
|
||||
# make file system
|
||||
# TODO: What happens if for whatever reason this is run again and
|
||||
# the data is already in the rbd device and/or is mounted??
|
||||
# When it is mounted already, it will fail to make the fs
|
||||
# XXX: This is really sketchy! Need to at least add an fstab entry
|
||||
# otherwise this hook will blow away existing data if its executed
|
||||
# after a reboot.
|
||||
if not filesystem_mounted(mount_point):
|
||||
make_filesystem(blk_device, fstype)
|
||||
|
||||
for svc in system_services:
|
||||
if running(svc):
|
||||
log('Stopping services %s prior to migrating data.' % svc,
|
||||
level=INFO)
|
||||
service_stop(svc)
|
||||
|
||||
place_data_on_ceph(service, blk_device, mount_point, fstype)
|
||||
|
||||
for svc in system_services:
|
||||
service_start(svc)
|
183
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
183
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
@ -0,0 +1,183 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from socket import gethostname as get_unit_hostname
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
relation_ids,
|
||||
related_units as relation_list,
|
||||
relation_get,
|
||||
config as config_get,
|
||||
INFO,
|
||||
ERROR,
|
||||
unit_get,
|
||||
)
|
||||
|
||||
|
||||
class HAIncompleteConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def is_clustered():
|
||||
for r_id in (relation_ids('ha') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
clustered = relation_get('clustered',
|
||||
rid=r_id,
|
||||
unit=unit)
|
||||
if clustered:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_leader(resource):
|
||||
cmd = [
|
||||
"crm", "resource",
|
||||
"show", resource
|
||||
]
|
||||
try:
|
||||
status = subprocess.check_output(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if get_unit_hostname() in status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def peer_units():
|
||||
peers = []
|
||||
for r_id in (relation_ids('cluster') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
peers.append(unit)
|
||||
return peers
|
||||
|
||||
|
||||
def oldest_peer(peers):
|
||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||
for peer in peers:
|
||||
remote_unit_no = int(peer.split('/')[1])
|
||||
if remote_unit_no < local_unit_no:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def eligible_leader(resource):
|
||||
if is_clustered():
|
||||
if not is_leader(resource):
|
||||
log('Deferring action to CRM leader.', level=INFO)
|
||||
return False
|
||||
else:
|
||||
peers = peer_units()
|
||||
if peers and not oldest_peer(peers):
|
||||
log('Deferring action to oldest service unit.', level=INFO)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def https():
|
||||
'''
|
||||
Determines whether enough data has been provided in configuration
|
||||
or relation data to configure HTTPS
|
||||
.
|
||||
returns: boolean
|
||||
'''
|
||||
if config_get('use-https') == "yes":
|
||||
return True
|
||||
if config_get('ssl_cert') and config_get('ssl_key'):
|
||||
return True
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
rel_state = [
|
||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||
relation_get('ssl_cert', rid=r_id, unit=unit),
|
||||
relation_get('ssl_key', rid=r_id, unit=unit),
|
||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||
]
|
||||
# NOTE: works around (LP: #1203241)
|
||||
if (None not in rel_state) and ('' not in rel_state):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def determine_api_port(public_port):
|
||||
'''
|
||||
Determine correct API server listening port based on
|
||||
existence of HTTPS reverse proxy and/or haproxy.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
returns: int: the correct listening port for the API service
|
||||
'''
|
||||
i = 0
|
||||
if len(peer_units()) > 0 or is_clustered():
|
||||
i += 1
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def determine_haproxy_port(public_port):
|
||||
'''
|
||||
Description: Determine correct proxy listening port based on public IP +
|
||||
existence of HTTPS reverse proxy.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
returns: int: the correct listening port for the HAProxy service
|
||||
'''
|
||||
i = 0
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def get_hacluster_config():
|
||||
'''
|
||||
Obtains all relevant configuration from charm configuration required
|
||||
for initiating a relation to hacluster:
|
||||
|
||||
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
|
||||
|
||||
returns: dict: A dict containing settings keyed by setting name.
|
||||
raises: HAIncompleteConfig if settings are missing.
|
||||
'''
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
|
||||
conf = {}
|
||||
for setting in settings:
|
||||
conf[setting] = config_get(setting)
|
||||
missing = []
|
||||
[missing.append(s) for s, v in conf.iteritems() if v is None]
|
||||
if missing:
|
||||
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||
raise HAIncompleteConfig
|
||||
return conf
|
||||
|
||||
|
||||
def canonical_url(configs, vip_setting='vip'):
|
||||
'''
|
||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
configuration and hacluster.
|
||||
|
||||
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
||||
a complete https context.
|
||||
:vip_setting: str: Setting in charm config that specifies
|
||||
VIP address.
|
||||
'''
|
||||
scheme = 'http'
|
||||
if 'https' in configs.complete_contexts():
|
||||
scheme = 'https'
|
||||
if is_clustered():
|
||||
addr = config_get(vip_setting)
|
||||
else:
|
||||
addr = unit_get('private-address')
|
||||
return '%s://%s' % (scheme, addr)
|
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
522
hooks/charmhelpers/contrib/openstack/context.py
Normal file
522
hooks/charmhelpers/contrib/openstack/context.py
Normal file
@ -0,0 +1,522 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
from base64 import b64decode
|
||||
|
||||
from subprocess import (
|
||||
check_call
|
||||
)
|
||||
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
filter_installed_packages,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
local_unit,
|
||||
log,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units,
|
||||
unit_get,
|
||||
unit_private_ip,
|
||||
ERROR,
|
||||
WARNING,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_api_port,
|
||||
determine_haproxy_port,
|
||||
https,
|
||||
is_clustered,
|
||||
peer_units,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.apache import (
|
||||
get_cert,
|
||||
get_ca_cert,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.neutron import (
|
||||
neutron_plugin_attribute,
|
||||
)
|
||||
|
||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
|
||||
|
||||
class OSContextError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_packages(packages):
|
||||
'''Install but do not upgrade required plugin packages'''
|
||||
required = filter_installed_packages(packages)
|
||||
if required:
|
||||
apt_install(required, fatal=True)
|
||||
|
||||
|
||||
def context_complete(ctxt):
|
||||
_missing = []
|
||||
for k, v in ctxt.iteritems():
|
||||
if v is None or v == '':
|
||||
_missing.append(k)
|
||||
if _missing:
|
||||
log('Missing required data: %s' % ' '.join(_missing), level='INFO')
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class OSContextGenerator(object):
|
||||
interfaces = []
|
||||
|
||||
def __call__(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SharedDBContext(OSContextGenerator):
|
||||
interfaces = ['shared-db']
|
||||
|
||||
def __init__(self, database=None, user=None, relation_prefix=None):
|
||||
'''
|
||||
Allows inspecting relation for settings prefixed with relation_prefix.
|
||||
This is useful for parsing access for multiple databases returned via
|
||||
the shared-db interface (eg, nova_password, quantum_password)
|
||||
'''
|
||||
self.relation_prefix = relation_prefix
|
||||
self.database = database
|
||||
self.user = user
|
||||
|
||||
def __call__(self):
|
||||
self.database = self.database or config('database')
|
||||
self.user = self.user or config('database-user')
|
||||
if None in [self.database, self.user]:
|
||||
log('Could not generate shared_db context. '
|
||||
'Missing required charm config options. '
|
||||
'(database name and user)')
|
||||
raise OSContextError
|
||||
ctxt = {}
|
||||
|
||||
password_setting = 'password'
|
||||
if self.relation_prefix:
|
||||
password_setting = self.relation_prefix + '_password'
|
||||
|
||||
for rid in relation_ids('shared-db'):
|
||||
for unit in related_units(rid):
|
||||
passwd = relation_get(password_setting, rid=rid, unit=unit)
|
||||
ctxt = {
|
||||
'database_host': relation_get('db_host', rid=rid,
|
||||
unit=unit),
|
||||
'database': self.database,
|
||||
'database_user': self.user,
|
||||
'database_password': passwd,
|
||||
}
|
||||
if context_complete(ctxt):
|
||||
return ctxt
|
||||
return {}
|
||||
|
||||
|
||||
class IdentityServiceContext(OSContextGenerator):
|
||||
interfaces = ['identity-service']
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for identity-service')
|
||||
ctxt = {}
|
||||
|
||||
for rid in relation_ids('identity-service'):
|
||||
for unit in related_units(rid):
|
||||
ctxt = {
|
||||
'service_port': relation_get('service_port', rid=rid,
|
||||
unit=unit),
|
||||
'service_host': relation_get('service_host', rid=rid,
|
||||
unit=unit),
|
||||
'auth_host': relation_get('auth_host', rid=rid, unit=unit),
|
||||
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
|
||||
'admin_tenant_name': relation_get('service_tenant',
|
||||
rid=rid, unit=unit),
|
||||
'admin_user': relation_get('service_username', rid=rid,
|
||||
unit=unit),
|
||||
'admin_password': relation_get('service_password', rid=rid,
|
||||
unit=unit),
|
||||
# XXX: Hard-coded http.
|
||||
'service_protocol': 'http',
|
||||
'auth_protocol': 'http',
|
||||
}
|
||||
if context_complete(ctxt):
|
||||
return ctxt
|
||||
return {}
|
||||
|
||||
|
||||
class AMQPContext(OSContextGenerator):
|
||||
interfaces = ['amqp']
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for amqp')
|
||||
conf = config()
|
||||
try:
|
||||
username = conf['rabbit-user']
|
||||
vhost = conf['rabbit-vhost']
|
||||
except KeyError as e:
|
||||
log('Could not generate shared_db context. '
|
||||
'Missing required charm config options: %s.' % e)
|
||||
raise OSContextError
|
||||
|
||||
ctxt = {}
|
||||
for rid in relation_ids('amqp'):
|
||||
for unit in related_units(rid):
|
||||
if relation_get('clustered', rid=rid, unit=unit):
|
||||
ctxt['clustered'] = True
|
||||
ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
|
||||
unit=unit)
|
||||
else:
|
||||
ctxt['rabbitmq_host'] = relation_get('private-address',
|
||||
rid=rid, unit=unit)
|
||||
ctxt.update({
|
||||
'rabbitmq_user': username,
|
||||
'rabbitmq_password': relation_get('password', rid=rid,
|
||||
unit=unit),
|
||||
'rabbitmq_virtual_host': vhost,
|
||||
})
|
||||
if context_complete(ctxt):
|
||||
# Sufficient information found = break out!
|
||||
break
|
||||
# Used for active/active rabbitmq >= grizzly
|
||||
ctxt['rabbitmq_hosts'] = []
|
||||
for unit in related_units(rid):
|
||||
ctxt['rabbitmq_hosts'].append(relation_get('private-address',
|
||||
rid=rid, unit=unit))
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
else:
|
||||
return ctxt
|
||||
|
||||
|
||||
class CephContext(OSContextGenerator):
|
||||
interfaces = ['ceph']
|
||||
|
||||
def __call__(self):
|
||||
'''This generates context for /etc/ceph/ceph.conf templates'''
|
||||
if not relation_ids('ceph'):
|
||||
return {}
|
||||
log('Generating template context for ceph')
|
||||
mon_hosts = []
|
||||
auth = None
|
||||
key = None
|
||||
for rid in relation_ids('ceph'):
|
||||
for unit in related_units(rid):
|
||||
mon_hosts.append(relation_get('private-address', rid=rid,
|
||||
unit=unit))
|
||||
auth = relation_get('auth', rid=rid, unit=unit)
|
||||
key = relation_get('key', rid=rid, unit=unit)
|
||||
|
||||
ctxt = {
|
||||
'mon_hosts': ' '.join(mon_hosts),
|
||||
'auth': auth,
|
||||
'key': key,
|
||||
}
|
||||
|
||||
if not os.path.isdir('/etc/ceph'):
|
||||
os.mkdir('/etc/ceph')
|
||||
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
|
||||
ensure_packages(['ceph-common'])
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class HAProxyContext(OSContextGenerator):
|
||||
interfaces = ['cluster']
|
||||
|
||||
def __call__(self):
|
||||
'''
|
||||
Builds half a context for the haproxy template, which describes
|
||||
all peers to be included in the cluster. Each charm needs to include
|
||||
its own context generator that describes the port mapping.
|
||||
'''
|
||||
if not relation_ids('cluster'):
|
||||
return {}
|
||||
|
||||
cluster_hosts = {}
|
||||
l_unit = local_unit().replace('/', '-')
|
||||
cluster_hosts[l_unit] = unit_get('private-address')
|
||||
|
||||
for rid in relation_ids('cluster'):
|
||||
for unit in related_units(rid):
|
||||
_unit = unit.replace('/', '-')
|
||||
addr = relation_get('private-address', rid=rid, unit=unit)
|
||||
cluster_hosts[_unit] = addr
|
||||
|
||||
ctxt = {
|
||||
'units': cluster_hosts,
|
||||
}
|
||||
if len(cluster_hosts.keys()) > 1:
|
||||
# Enable haproxy when we have enough peers.
|
||||
log('Ensuring haproxy enabled in /etc/default/haproxy.')
|
||||
with open('/etc/default/haproxy', 'w') as out:
|
||||
out.write('ENABLED=1\n')
|
||||
return ctxt
|
||||
log('HAProxy context is incomplete, this unit has no peers.')
|
||||
return {}
|
||||
|
||||
|
||||
class ImageServiceContext(OSContextGenerator):
|
||||
interfaces = ['image-service']
|
||||
|
||||
def __call__(self):
|
||||
'''
|
||||
Obtains the glance API server from the image-service relation. Useful
|
||||
in nova and cinder (currently).
|
||||
'''
|
||||
log('Generating template context for image-service.')
|
||||
rids = relation_ids('image-service')
|
||||
if not rids:
|
||||
return {}
|
||||
for rid in rids:
|
||||
for unit in related_units(rid):
|
||||
api_server = relation_get('glance-api-server',
|
||||
rid=rid, unit=unit)
|
||||
if api_server:
|
||||
return {'glance_api_servers': api_server}
|
||||
log('ImageService context is incomplete. '
|
||||
'Missing required relation data.')
|
||||
return {}
|
||||
|
||||
|
||||
class ApacheSSLContext(OSContextGenerator):
|
||||
"""
|
||||
Generates a context for an apache vhost configuration that configures
|
||||
HTTPS reverse proxying for one or many endpoints. Generated context
|
||||
looks something like:
|
||||
{
|
||||
'namespace': 'cinder',
|
||||
'private_address': 'iscsi.mycinderhost.com',
|
||||
'endpoints': [(8776, 8766), (8777, 8767)]
|
||||
}
|
||||
|
||||
The endpoints list consists of a tuples mapping external ports
|
||||
to internal ports.
|
||||
"""
|
||||
interfaces = ['https']
|
||||
|
||||
# charms should inherit this context and set external ports
|
||||
# and service namespace accordingly.
|
||||
external_ports = []
|
||||
service_namespace = None
|
||||
|
||||
def enable_modules(self):
|
||||
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
|
||||
check_call(cmd)
|
||||
|
||||
def configure_cert(self):
|
||||
if not os.path.isdir('/etc/apache2/ssl'):
|
||||
os.mkdir('/etc/apache2/ssl')
|
||||
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
|
||||
if not os.path.isdir(ssl_dir):
|
||||
os.mkdir(ssl_dir)
|
||||
cert, key = get_cert()
|
||||
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
|
||||
cert_out.write(b64decode(cert))
|
||||
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
|
||||
key_out.write(b64decode(key))
|
||||
ca_cert = get_ca_cert()
|
||||
if ca_cert:
|
||||
with open(CA_CERT_PATH, 'w') as ca_out:
|
||||
ca_out.write(b64decode(ca_cert))
|
||||
check_call(['update-ca-certificates'])
|
||||
|
||||
def __call__(self):
|
||||
if isinstance(self.external_ports, basestring):
|
||||
self.external_ports = [self.external_ports]
|
||||
if (not self.external_ports or not https()):
|
||||
return {}
|
||||
|
||||
self.configure_cert()
|
||||
self.enable_modules()
|
||||
|
||||
ctxt = {
|
||||
'namespace': self.service_namespace,
|
||||
'private_address': unit_get('private-address'),
|
||||
'endpoints': []
|
||||
}
|
||||
for ext_port in self.external_ports:
|
||||
if peer_units() or is_clustered():
|
||||
int_port = determine_haproxy_port(ext_port)
|
||||
else:
|
||||
int_port = determine_api_port(ext_port)
|
||||
portmap = (int(ext_port), int(int_port))
|
||||
ctxt['endpoints'].append(portmap)
|
||||
return ctxt
|
||||
|
||||
|
||||
class NeutronContext(object):
|
||||
interfaces = []
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def network_manager(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def packages(self):
|
||||
return neutron_plugin_attribute(
|
||||
self.plugin, 'packages', self.network_manager)
|
||||
|
||||
@property
|
||||
def neutron_security_groups(self):
|
||||
return None
|
||||
|
||||
def _ensure_packages(self):
|
||||
[ensure_packages(pkgs) for pkgs in self.packages]
|
||||
|
||||
def _save_flag_file(self):
|
||||
if self.network_manager == 'quantum':
|
||||
_file = '/etc/nova/quantum_plugin.conf'
|
||||
else:
|
||||
_file = '/etc/nova/neutron_plugin.conf'
|
||||
with open(_file, 'wb') as out:
|
||||
out.write(self.plugin + '\n')
|
||||
|
||||
def ovs_ctxt(self):
|
||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||
self.network_manager)
|
||||
|
||||
ovs_ctxt = {
|
||||
'core_plugin': driver,
|
||||
'neutron_plugin': 'ovs',
|
||||
'neutron_security_groups': self.neutron_security_groups,
|
||||
'local_ip': unit_private_ip(),
|
||||
}
|
||||
|
||||
return ovs_ctxt
|
||||
|
||||
def __call__(self):
|
||||
self._ensure_packages()
|
||||
|
||||
if self.network_manager not in ['quantum', 'neutron']:
|
||||
return {}
|
||||
|
||||
if not self.plugin:
|
||||
return {}
|
||||
|
||||
ctxt = {'network_manager': self.network_manager}
|
||||
|
||||
if self.plugin == 'ovs':
|
||||
ctxt.update(self.ovs_ctxt())
|
||||
|
||||
self._save_flag_file()
|
||||
return ctxt
|
||||
|
||||
|
||||
class OSConfigFlagContext(OSContextGenerator):
|
||||
'''
|
||||
Responsible adding user-defined config-flags in charm config to a
|
||||
to a template context.
|
||||
'''
|
||||
def __call__(self):
|
||||
config_flags = config('config-flags')
|
||||
if not config_flags or config_flags in ['None', '']:
|
||||
return {}
|
||||
config_flags = config_flags.split(',')
|
||||
flags = {}
|
||||
for flag in config_flags:
|
||||
if '=' not in flag:
|
||||
log('Improperly formatted config-flag, expected k=v '
|
||||
'got %s' % flag, level=WARNING)
|
||||
continue
|
||||
k, v = flag.split('=')
|
||||
flags[k.strip()] = v
|
||||
ctxt = {'user_config_flags': flags}
|
||||
return ctxt
|
||||
|
||||
|
||||
class SubordinateConfigContext(OSContextGenerator):
|
||||
"""
|
||||
Responsible for inspecting relations to subordinates that
|
||||
may be exporting required config via a json blob.
|
||||
|
||||
The subordinate interface allows subordinates to export their
|
||||
configuration requirements to the principle for multiple config
|
||||
files and multiple serivces. Ie, a subordinate that has interfaces
|
||||
to both glance and nova may export to following yaml blob as json:
|
||||
|
||||
glance:
|
||||
/etc/glance/glance-api.conf:
|
||||
sections:
|
||||
DEFAULT:
|
||||
- [key1, value1]
|
||||
/etc/glance/glance-registry.conf:
|
||||
MYSECTION:
|
||||
- [key2, value2]
|
||||
nova:
|
||||
/etc/nova/nova.conf:
|
||||
sections:
|
||||
DEFAULT:
|
||||
- [key3, value3]
|
||||
|
||||
|
||||
It is then up to the principle charms to subscribe this context to
|
||||
the service+config file it is interestd in. Configuration data will
|
||||
be available in the template context, in glance's case, as:
|
||||
ctxt = {
|
||||
... other context ...
|
||||
'subordinate_config': {
|
||||
'DEFAULT': {
|
||||
'key1': 'value1',
|
||||
},
|
||||
'MYSECTION': {
|
||||
'key2': 'value2',
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
def __init__(self, service, config_file, interface):
|
||||
"""
|
||||
:param service : Service name key to query in any subordinate
|
||||
data found
|
||||
:param config_file : Service's config file to query sections
|
||||
:param interface : Subordinate interface to inspect
|
||||
"""
|
||||
self.service = service
|
||||
self.config_file = config_file
|
||||
self.interface = interface
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
for rid in relation_ids(self.interface):
|
||||
for unit in related_units(rid):
|
||||
sub_config = relation_get('subordinate_configuration',
|
||||
rid=rid, unit=unit)
|
||||
if sub_config and sub_config != '':
|
||||
try:
|
||||
sub_config = json.loads(sub_config)
|
||||
except:
|
||||
log('Could not parse JSON from subordinate_config '
|
||||
'setting from %s' % rid, level=ERROR)
|
||||
continue
|
||||
|
||||
if self.service not in sub_config:
|
||||
log('Found subordinate_config on %s but it contained'
|
||||
'nothing for %s service' % (rid, self.service))
|
||||
continue
|
||||
|
||||
sub_config = sub_config[self.service]
|
||||
if self.config_file not in sub_config:
|
||||
log('Found subordinate_config on %s but it contained'
|
||||
'nothing for %s' % (rid, self.config_file))
|
||||
continue
|
||||
|
||||
sub_config = sub_config[self.config_file]
|
||||
for k, v in sub_config.iteritems():
|
||||
ctxt[k] = v
|
||||
|
||||
if not ctxt:
|
||||
ctxt['sections'] = {}
|
||||
|
||||
return ctxt
|
117
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
117
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
@ -0,0 +1,117 @@
|
||||
# Various utilies for dealing with Neutron and the renaming from Quantum.
|
||||
|
||||
from subprocess import check_output
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import os_release
|
||||
|
||||
|
||||
def headers_package():
|
||||
"""Ensures correct linux-headers for running kernel are installed,
|
||||
for building DKMS package"""
|
||||
kver = check_output(['uname', '-r']).strip()
|
||||
return 'linux-headers-%s' % kver
|
||||
|
||||
|
||||
# legacy
|
||||
def quantum_plugins():
|
||||
from charmhelpers.contrib.openstack import context
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/quantum/plugins/openvswitch/'
|
||||
'ovs_quantum_plugin.ini',
|
||||
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
||||
'OVSQuantumPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': ['quantum-plugin-openvswitch-agent'],
|
||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||
'QuantumPlugin.NvpPluginV2',
|
||||
'services': [],
|
||||
'packages': [],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def neutron_plugins():
|
||||
from charmhelpers.contrib.openstack import context
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/neutron/plugins/openvswitch/'
|
||||
'ovs_neutron_plugin.ini',
|
||||
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
||||
'OVSNeutronPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': ['neutron-plugin-openvswitch-agent'],
|
||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||
'NeutronPlugin.NvpPluginV2',
|
||||
'services': [],
|
||||
'packages': [],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||
manager = net_manager or network_manager()
|
||||
if manager == 'quantum':
|
||||
plugins = quantum_plugins()
|
||||
elif manager == 'neutron':
|
||||
plugins = neutron_plugins()
|
||||
else:
|
||||
log('Error: Network manager does not support plugins.')
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
_plugin = plugins[plugin]
|
||||
except KeyError:
|
||||
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
return _plugin[attr]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
def network_manager():
|
||||
'''
|
||||
Deals with the renaming of Quantum to Neutron in H and any situations
|
||||
that require compatability (eg, deploying H with network-manager=quantum,
|
||||
upgrading from G).
|
||||
'''
|
||||
release = os_release('nova-common')
|
||||
manager = config('network-manager').lower()
|
||||
|
||||
if manager not in ['quantum', 'neutron']:
|
||||
return manager
|
||||
|
||||
if release in ['essex']:
|
||||
# E does not support neutron
|
||||
log('Neutron networking not supported in Essex.', level=ERROR)
|
||||
raise Exception
|
||||
elif release in ['folsom', 'grizzly']:
|
||||
# neutron is named quantum in F and G
|
||||
return 'quantum'
|
||||
else:
|
||||
# ensure accurate naming for all releases post-H
|
||||
return 'neutron'
|
@ -0,0 +1,2 @@
|
||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
||||
# module
|
11
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
11
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
@ -0,0 +1,11 @@
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# cinder configuration file maintained by Juju
|
||||
# local changes may be overwritten.
|
||||
###############################################################################
|
||||
{% if auth -%}
|
||||
[global]
|
||||
auth_supported = {{ auth }}
|
||||
keyring = /etc/ceph/$cluster.$name.keyring
|
||||
mon host = {{ mon_hosts }}
|
||||
{% endif -%}
|
37
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
37
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
@ -0,0 +1,37 @@
|
||||
global
|
||||
log 127.0.0.1 local0
|
||||
log 127.0.0.1 local1 notice
|
||||
maxconn 20000
|
||||
user haproxy
|
||||
group haproxy
|
||||
spread-checks 0
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
retries 3
|
||||
timeout queue 1000
|
||||
timeout connect 1000
|
||||
timeout client 30000
|
||||
timeout server 30000
|
||||
|
||||
listen stats :8888
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
stats realm Haproxy\ Statistics
|
||||
stats uri /
|
||||
stats auth admin:password
|
||||
|
||||
{% if units -%}
|
||||
{% for service, ports in service_ports.iteritems() -%}
|
||||
listen {{ service }} 0.0.0.0:{{ ports[0] }}
|
||||
balance roundrobin
|
||||
option tcplog
|
||||
{% for unit, address in units.iteritems() -%}
|
||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||
{% endfor %}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
@ -0,0 +1,23 @@
|
||||
{% if endpoints -%}
|
||||
{% for ext, int in endpoints -%}
|
||||
Listen {{ ext }}
|
||||
NameVirtualHost *:{{ ext }}
|
||||
<VirtualHost *:{{ ext }}>
|
||||
ServerName {{ private_address }}
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
|
||||
ProxyPass / http://localhost:{{ int }}/
|
||||
ProxyPassReverse / http://localhost:{{ int }}/
|
||||
ProxyPreserveHost on
|
||||
</VirtualHost>
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
<Location />
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Location>
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
@ -0,0 +1,23 @@
|
||||
{% if endpoints -%}
|
||||
{% for ext, int in endpoints -%}
|
||||
Listen {{ ext }}
|
||||
NameVirtualHost *:{{ ext }}
|
||||
<VirtualHost *:{{ ext }}>
|
||||
ServerName {{ private_address }}
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
|
||||
ProxyPass / http://localhost:{{ int }}/
|
||||
ProxyPassReverse / http://localhost:{{ int }}/
|
||||
ProxyPreserveHost on
|
||||
</VirtualHost>
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
<Location />
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Location>
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
280
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
280
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
@ -0,0 +1,280 @@
|
||||
import os
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
ERROR,
|
||||
INFO
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||
|
||||
try:
|
||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||
except ImportError:
|
||||
# python-jinja2 may not be installed yet, or we're running unittests.
|
||||
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
|
||||
|
||||
|
||||
class OSConfigException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_loader(templates_dir, os_release):
|
||||
"""
|
||||
Create a jinja2.ChoiceLoader containing template dirs up to
|
||||
and including os_release. If directory template directory
|
||||
is missing at templates_dir, it will be omitted from the loader.
|
||||
templates_dir is added to the bottom of the search list as a base
|
||||
loading dir.
|
||||
|
||||
A charm may also ship a templates dir with this module
|
||||
and it will be appended to the bottom of the search list, eg:
|
||||
hooks/charmhelpers/contrib/openstack/templates.
|
||||
|
||||
:param templates_dir: str: Base template directory containing release
|
||||
sub-directories.
|
||||
:param os_release : str: OpenStack release codename to construct template
|
||||
loader.
|
||||
|
||||
:returns : jinja2.ChoiceLoader constructed with a list of
|
||||
jinja2.FilesystemLoaders, ordered in descending
|
||||
order by OpenStack release.
|
||||
"""
|
||||
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
||||
for rel in OPENSTACK_CODENAMES.itervalues()]
|
||||
|
||||
if not os.path.isdir(templates_dir):
|
||||
log('Templates directory not found @ %s.' % templates_dir,
|
||||
level=ERROR)
|
||||
raise OSConfigException
|
||||
|
||||
# the bottom contains tempaltes_dir and possibly a common templates dir
|
||||
# shipped with the helper.
|
||||
loaders = [FileSystemLoader(templates_dir)]
|
||||
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
|
||||
if os.path.isdir(helper_templates):
|
||||
loaders.append(FileSystemLoader(helper_templates))
|
||||
|
||||
for rel, tmpl_dir in tmpl_dirs:
|
||||
if os.path.isdir(tmpl_dir):
|
||||
loaders.insert(0, FileSystemLoader(tmpl_dir))
|
||||
if rel == os_release:
|
||||
break
|
||||
log('Creating choice loader with dirs: %s' %
|
||||
[l.searchpath for l in loaders], level=INFO)
|
||||
return ChoiceLoader(loaders)
|
||||
|
||||
|
||||
class OSConfigTemplate(object):
|
||||
"""
|
||||
Associates a config file template with a list of context generators.
|
||||
Responsible for constructing a template context based on those generators.
|
||||
"""
|
||||
def __init__(self, config_file, contexts):
|
||||
self.config_file = config_file
|
||||
|
||||
if hasattr(contexts, '__call__'):
|
||||
self.contexts = [contexts]
|
||||
else:
|
||||
self.contexts = contexts
|
||||
|
||||
self._complete_contexts = []
|
||||
|
||||
def context(self):
|
||||
ctxt = {}
|
||||
for context in self.contexts:
|
||||
_ctxt = context()
|
||||
if _ctxt:
|
||||
ctxt.update(_ctxt)
|
||||
# track interfaces for every complete context.
|
||||
[self._complete_contexts.append(interface)
|
||||
for interface in context.interfaces
|
||||
if interface not in self._complete_contexts]
|
||||
return ctxt
|
||||
|
||||
def complete_contexts(self):
|
||||
'''
|
||||
Return a list of interfaces that have atisfied contexts.
|
||||
'''
|
||||
if self._complete_contexts:
|
||||
return self._complete_contexts
|
||||
self.context()
|
||||
return self._complete_contexts
|
||||
|
||||
|
||||
class OSConfigRenderer(object):
|
||||
"""
|
||||
This class provides a common templating system to be used by OpenStack
|
||||
charms. It is intended to help charms share common code and templates,
|
||||
and ease the burden of managing config templates across multiple OpenStack
|
||||
releases.
|
||||
|
||||
Basic usage:
|
||||
# import some common context generates from charmhelpers
|
||||
from charmhelpers.contrib.openstack import context
|
||||
|
||||
# Create a renderer object for a specific OS release.
|
||||
configs = OSConfigRenderer(templates_dir='/tmp/templates',
|
||||
openstack_release='folsom')
|
||||
# register some config files with context generators.
|
||||
configs.register(config_file='/etc/nova/nova.conf',
|
||||
contexts=[context.SharedDBContext(),
|
||||
context.AMQPContext()])
|
||||
configs.register(config_file='/etc/nova/api-paste.ini',
|
||||
contexts=[context.IdentityServiceContext()])
|
||||
configs.register(config_file='/etc/haproxy/haproxy.conf',
|
||||
contexts=[context.HAProxyContext()])
|
||||
# write out a single config
|
||||
configs.write('/etc/nova/nova.conf')
|
||||
# write out all registered configs
|
||||
configs.write_all()
|
||||
|
||||
Details:
|
||||
|
||||
OpenStack Releases and template loading
|
||||
---------------------------------------
|
||||
When the object is instantiated, it is associated with a specific OS
|
||||
release. This dictates how the template loader will be constructed.
|
||||
|
||||
The constructed loader attempts to load the template from several places
|
||||
in the following order:
|
||||
- from the most recent OS release-specific template dir (if one exists)
|
||||
- the base templates_dir
|
||||
- a template directory shipped in the charm with this helper file.
|
||||
|
||||
|
||||
For the example above, '/tmp/templates' contains the following structure:
|
||||
/tmp/templates/nova.conf
|
||||
/tmp/templates/api-paste.ini
|
||||
/tmp/templates/grizzly/api-paste.ini
|
||||
/tmp/templates/havana/api-paste.ini
|
||||
|
||||
Since it was registered with the grizzly release, it first seraches
|
||||
the grizzly directory for nova.conf, then the templates dir.
|
||||
|
||||
When writing api-paste.ini, it will find the template in the grizzly
|
||||
directory.
|
||||
|
||||
If the object were created with folsom, it would fall back to the
|
||||
base templates dir for its api-paste.ini template.
|
||||
|
||||
This system should help manage changes in config files through
|
||||
openstack releases, allowing charms to fall back to the most recently
|
||||
updated config template for a given release
|
||||
|
||||
The haproxy.conf, since it is not shipped in the templates dir, will
|
||||
be loaded from the module directory's template directory, eg
|
||||
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
|
||||
us to ship common templates (haproxy, apache) with the helpers.
|
||||
|
||||
Context generators
|
||||
---------------------------------------
|
||||
Context generators are used to generate template contexts during hook
|
||||
execution. Doing so may require inspecting service relations, charm
|
||||
config, etc. When registered, a config file is associated with a list
|
||||
of generators. When a template is rendered and written, all context
|
||||
generates are called in a chain to generate the context dictionary
|
||||
passed to the jinja2 template. See context.py for more info.
|
||||
"""
|
||||
def __init__(self, templates_dir, openstack_release):
|
||||
if not os.path.isdir(templates_dir):
|
||||
log('Could not locate templates dir %s' % templates_dir,
|
||||
level=ERROR)
|
||||
raise OSConfigException
|
||||
|
||||
self.templates_dir = templates_dir
|
||||
self.openstack_release = openstack_release
|
||||
self.templates = {}
|
||||
self._tmpl_env = None
|
||||
|
||||
if None in [Environment, ChoiceLoader, FileSystemLoader]:
|
||||
# if this code is running, the object is created pre-install hook.
|
||||
# jinja2 shouldn't get touched until the module is reloaded on next
|
||||
# hook execution, with proper jinja2 bits successfully imported.
|
||||
apt_install('python-jinja2')
|
||||
|
||||
def register(self, config_file, contexts):
|
||||
"""
|
||||
Register a config file with a list of context generators to be called
|
||||
during rendering.
|
||||
"""
|
||||
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
|
||||
contexts=contexts)
|
||||
log('Registered config file: %s' % config_file, level=INFO)
|
||||
|
||||
def _get_tmpl_env(self):
|
||||
if not self._tmpl_env:
|
||||
loader = get_loader(self.templates_dir, self.openstack_release)
|
||||
self._tmpl_env = Environment(loader=loader)
|
||||
|
||||
def _get_template(self, template):
|
||||
self._get_tmpl_env()
|
||||
template = self._tmpl_env.get_template(template)
|
||||
log('Loaded template from %s' % template.filename, level=INFO)
|
||||
return template
|
||||
|
||||
def render(self, config_file):
|
||||
if config_file not in self.templates:
|
||||
log('Config not registered: %s' % config_file, level=ERROR)
|
||||
raise OSConfigException
|
||||
ctxt = self.templates[config_file].context()
|
||||
|
||||
_tmpl = os.path.basename(config_file)
|
||||
try:
|
||||
template = self._get_template(_tmpl)
|
||||
except exceptions.TemplateNotFound:
|
||||
# if no template is found with basename, try looking for it
|
||||
# using a munged full path, eg:
|
||||
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
||||
_tmpl = '_'.join(config_file.split('/')[1:])
|
||||
try:
|
||||
template = self._get_template(_tmpl)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
log('Could not load template from %s by %s or %s.' %
|
||||
(self.templates_dir, os.path.basename(config_file), _tmpl),
|
||||
level=ERROR)
|
||||
raise e
|
||||
|
||||
log('Rendering from template: %s' % _tmpl, level=INFO)
|
||||
return template.render(ctxt)
|
||||
|
||||
def write(self, config_file):
|
||||
"""
|
||||
Write a single config file, raises if config file is not registered.
|
||||
"""
|
||||
if config_file not in self.templates:
|
||||
log('Config not registered: %s' % config_file, level=ERROR)
|
||||
raise OSConfigException
|
||||
|
||||
_out = self.render(config_file)
|
||||
|
||||
with open(config_file, 'wb') as out:
|
||||
out.write(_out)
|
||||
|
||||
log('Wrote template %s.' % config_file, level=INFO)
|
||||
|
||||
def write_all(self):
|
||||
"""
|
||||
Write out all registered config files.
|
||||
"""
|
||||
[self.write(k) for k in self.templates.iterkeys()]
|
||||
|
||||
def set_release(self, openstack_release):
|
||||
"""
|
||||
Resets the template environment and generates a new template loader
|
||||
based on a the new openstack release.
|
||||
"""
|
||||
self._tmpl_env = None
|
||||
self.openstack_release = openstack_release
|
||||
self._get_tmpl_env()
|
||||
|
||||
def complete_contexts(self):
|
||||
'''
|
||||
Returns a list of context interfaces that yield a complete context.
|
||||
'''
|
||||
interfaces = []
|
||||
[interfaces.extend(i.complete_contexts())
|
||||
for i in self.templates.itervalues()]
|
||||
return interfaces
|
365
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
365
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
@ -0,0 +1,365 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Common python helper functions used for OpenStack charms.
|
||||
from collections import OrderedDict
|
||||
|
||||
import apt_pkg as apt
|
||||
import subprocess
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log as juju_log,
|
||||
charm_dir,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release,
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
)
|
||||
|
||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||
|
||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
('oneiric', 'diablo'),
|
||||
('precise', 'essex'),
|
||||
('quantal', 'folsom'),
|
||||
('raring', 'grizzly'),
|
||||
('saucy', 'havana'),
|
||||
])
|
||||
|
||||
|
||||
OPENSTACK_CODENAMES = OrderedDict([
|
||||
('2011.2', 'diablo'),
|
||||
('2012.1', 'essex'),
|
||||
('2012.2', 'folsom'),
|
||||
('2013.1', 'grizzly'),
|
||||
('2013.2', 'havana'),
|
||||
('2014.1', 'icehouse'),
|
||||
])
|
||||
|
||||
# The ugly duckling
|
||||
SWIFT_CODENAMES = OrderedDict([
|
||||
('1.4.3', 'diablo'),
|
||||
('1.4.8', 'essex'),
|
||||
('1.7.4', 'folsom'),
|
||||
('1.8.0', 'grizzly'),
|
||||
('1.7.7', 'grizzly'),
|
||||
('1.7.6', 'grizzly'),
|
||||
('1.10.0', 'havana'),
|
||||
('1.9.1', 'havana'),
|
||||
('1.9.0', 'havana'),
|
||||
])
|
||||
|
||||
|
||||
def error_out(msg):
|
||||
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_os_codename_install_source(src):
|
||||
'''Derive OpenStack release codename from a given installation source.'''
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
rel = ''
|
||||
if src == 'distro':
|
||||
try:
|
||||
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||
except KeyError:
|
||||
e = 'Could not derive openstack release for '\
|
||||
'this Ubuntu release: %s' % ubuntu_rel
|
||||
error_out(e)
|
||||
return rel
|
||||
|
||||
if src.startswith('cloud:'):
|
||||
ca_rel = src.split(':')[1]
|
||||
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
||||
return ca_rel
|
||||
|
||||
# Best guess match based on deb string provided
|
||||
if src.startswith('deb') or src.startswith('ppa'):
|
||||
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||
if v in src:
|
||||
return v
|
||||
|
||||
|
||||
def get_os_version_install_source(src):
|
||||
codename = get_os_codename_install_source(src)
|
||||
return get_os_version_codename(codename)
|
||||
|
||||
|
||||
def get_os_codename_version(vers):
|
||||
'''Determine OpenStack codename from version number.'''
|
||||
try:
|
||||
return OPENSTACK_CODENAMES[vers]
|
||||
except KeyError:
|
||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_codename(codename):
|
||||
'''Determine OpenStack version number from codename.'''
|
||||
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||
if v == codename:
|
||||
return k
|
||||
e = 'Could not derive OpenStack version for '\
|
||||
'codename: %s' % codename
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_codename_package(package, fatal=True):
|
||||
'''Derive OpenStack release codename from an installed package.'''
|
||||
apt.init()
|
||||
cache = apt.Cache()
|
||||
|
||||
try:
|
||||
pkg = cache[package]
|
||||
except:
|
||||
if not fatal:
|
||||
return None
|
||||
# the package is unknown to the current apt cache.
|
||||
e = 'Could not determine version of package with no installation '\
|
||||
'candidate: %s' % package
|
||||
error_out(e)
|
||||
|
||||
if not pkg.current_ver:
|
||||
if not fatal:
|
||||
return None
|
||||
# package is known, but no version is currently installed.
|
||||
e = 'Could not determine version of uninstalled package: %s' % package
|
||||
error_out(e)
|
||||
|
||||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||
|
||||
try:
|
||||
if 'swift' in pkg.name:
|
||||
swift_vers = vers[:5]
|
||||
if swift_vers not in SWIFT_CODENAMES:
|
||||
# Deal with 1.10.0 upward
|
||||
swift_vers = vers[:6]
|
||||
return SWIFT_CODENAMES[swift_vers]
|
||||
else:
|
||||
vers = vers[:6]
|
||||
return OPENSTACK_CODENAMES[vers]
|
||||
except KeyError:
|
||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_package(pkg, fatal=True):
|
||||
'''Derive OpenStack version number from an installed package.'''
|
||||
codename = get_os_codename_package(pkg, fatal=fatal)
|
||||
|
||||
if not codename:
|
||||
return None
|
||||
|
||||
if 'swift' in pkg:
|
||||
vers_map = SWIFT_CODENAMES
|
||||
else:
|
||||
vers_map = OPENSTACK_CODENAMES
|
||||
|
||||
for version, cname in vers_map.iteritems():
|
||||
if cname == codename:
|
||||
return version
|
||||
#e = "Could not determine OpenStack version for package: %s" % pkg
|
||||
#error_out(e)
|
||||
|
||||
|
||||
os_rel = None
|
||||
|
||||
|
||||
def os_release(package, base='essex'):
|
||||
'''
|
||||
Returns OpenStack release codename from a cached global.
|
||||
If the codename can not be determined from either an installed package or
|
||||
the installation source, the earliest release supported by the charm should
|
||||
be returned.
|
||||
'''
|
||||
global os_rel
|
||||
if os_rel:
|
||||
return os_rel
|
||||
os_rel = (get_os_codename_package(package, fatal=False) or
|
||||
get_os_codename_install_source(config('openstack-origin')) or
|
||||
base)
|
||||
return os_rel
|
||||
|
||||
|
||||
def import_key(keyid):
|
||||
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
|
||||
"--recv-keys %s" % keyid
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
except subprocess.CalledProcessError:
|
||||
error_out("Error importing repo key %s" % keyid)
|
||||
|
||||
|
||||
def configure_installation_source(rel):
|
||||
'''Configure apt installation source.'''
|
||||
if rel == 'distro':
|
||||
return
|
||||
elif rel[:4] == "ppa:":
|
||||
src = rel
|
||||
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||
elif rel[:3] == "deb":
|
||||
l = len(rel.split('|'))
|
||||
if l == 2:
|
||||
src, key = rel.split('|')
|
||||
juju_log("Importing PPA key from keyserver for %s" % src)
|
||||
import_key(key)
|
||||
elif l == 1:
|
||||
src = rel
|
||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||
f.write(src)
|
||||
elif rel[:6] == 'cloud:':
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
rel = rel.split(':')[1]
|
||||
u_rel = rel.split('-')[0]
|
||||
ca_rel = rel.split('-')[1]
|
||||
|
||||
if u_rel != ubuntu_rel:
|
||||
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
|
||||
'version (%s)' % (ca_rel, ubuntu_rel)
|
||||
error_out(e)
|
||||
|
||||
if 'staging' in ca_rel:
|
||||
# staging is just a regular PPA.
|
||||
os_rel = ca_rel.split('/')[0]
|
||||
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
|
||||
cmd = 'add-apt-repository -y %s' % ppa
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
return
|
||||
|
||||
# map charm config options to actual archive pockets.
|
||||
pockets = {
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'folsom/updates': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom',
|
||||
'grizzly': 'precise-updates/grizzly',
|
||||
'grizzly/updates': 'precise-updates/grizzly',
|
||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||
'havana': 'precise-updates/havana',
|
||||
'havana/updates': 'precise-updates/havana',
|
||||
'havana/proposed': 'precise-proposed/havana',
|
||||
}
|
||||
|
||||
try:
|
||||
pocket = pockets[ca_rel]
|
||||
except KeyError:
|
||||
e = 'Invalid Cloud Archive release specified: %s' % rel
|
||||
error_out(e)
|
||||
|
||||
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
||||
apt_install('ubuntu-cloud-keyring', fatal=True)
|
||||
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
||||
f.write(src)
|
||||
else:
|
||||
error_out("Invalid openstack-release specified: %s" % rel)
|
||||
|
||||
|
||||
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||
"""
|
||||
Write an rc file in the charm-delivered directory containing
|
||||
exported environment variables provided by env_vars. Any charm scripts run
|
||||
outside the juju hook environment can source this scriptrc to obtain
|
||||
updated config information necessary to perform health checks or
|
||||
service changes.
|
||||
"""
|
||||
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
|
||||
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
||||
os.mkdir(os.path.dirname(juju_rc_path))
|
||||
with open(juju_rc_path, 'wb') as rc_script:
|
||||
rc_script.write(
|
||||
"#!/bin/bash\n")
|
||||
[rc_script.write('export %s=%s\n' % (u, p))
|
||||
for u, p in env_vars.iteritems() if u != "script_path"]
|
||||
|
||||
|
||||
def openstack_upgrade_available(package):
|
||||
"""
|
||||
Determines if an OpenStack upgrade is available from installation
|
||||
source, based on version of installed package.
|
||||
|
||||
:param package: str: Name of installed package.
|
||||
|
||||
:returns: bool: : Returns True if configured installation source offers
|
||||
a newer version of package.
|
||||
|
||||
"""
|
||||
|
||||
src = config('openstack-origin')
|
||||
cur_vers = get_os_version_package(package)
|
||||
available_vers = get_os_version_install_source(src)
|
||||
apt.init()
|
||||
return apt.version_compare(available_vers, cur_vers) == 1
|
||||
|
||||
|
||||
def is_ip(address):
|
||||
"""
|
||||
Returns True if address is a valid IP address.
|
||||
"""
|
||||
try:
|
||||
# Test to see if already an IPv4 address
|
||||
socket.inet_aton(address)
|
||||
return True
|
||||
except socket.error:
|
||||
return False
|
||||
|
||||
|
||||
def ns_query(address):
|
||||
try:
|
||||
import dns.resolver
|
||||
except ImportError:
|
||||
apt_install('python-dnspython')
|
||||
import dns.resolver
|
||||
|
||||
if isinstance(address, dns.name.Name):
|
||||
rtype = 'PTR'
|
||||
elif isinstance(address, basestring):
|
||||
rtype = 'A'
|
||||
|
||||
answers = dns.resolver.query(address, rtype)
|
||||
if answers:
|
||||
return str(answers[0])
|
||||
return None
|
||||
|
||||
|
||||
def get_host_ip(hostname):
|
||||
"""
|
||||
Resolves the IP for a given hostname, or returns
|
||||
the input if it is already an IP.
|
||||
"""
|
||||
if is_ip(hostname):
|
||||
return hostname
|
||||
|
||||
return ns_query(hostname)
|
||||
|
||||
|
||||
def get_hostname(address):
|
||||
"""
|
||||
Resolves hostname for given IP, or returns the input
|
||||
if it is already a hostname.
|
||||
"""
|
||||
if not is_ip(address):
|
||||
return address
|
||||
|
||||
try:
|
||||
import dns.reversename
|
||||
except ImportError:
|
||||
apt_install('python-dnspython')
|
||||
import dns.reversename
|
||||
|
||||
rev = dns.reversename.from_address(address)
|
||||
result = ns_query(rev)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
# strip trailing .
|
||||
if result.endswith('.'):
|
||||
return result[:-1]
|
||||
return result
|
0
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
0
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
359
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
359
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
@ -0,0 +1,359 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
import time
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
CalledProcessError
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units,
|
||||
log,
|
||||
INFO,
|
||||
WARNING,
|
||||
ERROR
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
mount,
|
||||
mounts,
|
||||
service_start,
|
||||
service_stop,
|
||||
service_running,
|
||||
umount,
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
)
|
||||
|
||||
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
||||
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||
|
||||
CEPH_CONF = """[global]
|
||||
auth supported = {auth}
|
||||
keyring = {keyring}
|
||||
mon host = {mon_hosts}
|
||||
"""
|
||||
|
||||
|
||||
def install():
|
||||
''' Basic Ceph client installation '''
|
||||
ceph_dir = "/etc/ceph"
|
||||
if not os.path.exists(ceph_dir):
|
||||
os.mkdir(ceph_dir)
|
||||
apt_install('ceph-common', fatal=True)
|
||||
|
||||
|
||||
def rbd_exists(service, pool, rbd_img):
|
||||
''' Check to see if a RADOS block device exists '''
|
||||
try:
|
||||
out = check_output(['rbd', 'list', '--id', service,
|
||||
'--pool', pool])
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return rbd_img in out
|
||||
|
||||
|
||||
def create_rbd_image(service, pool, image, sizemb):
|
||||
''' Create a new RADOS block device '''
|
||||
cmd = [
|
||||
'rbd',
|
||||
'create',
|
||||
image,
|
||||
'--size',
|
||||
str(sizemb),
|
||||
'--id',
|
||||
service,
|
||||
'--pool',
|
||||
pool
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def pool_exists(service, name):
|
||||
''' Check to see if a RADOS pool already exists '''
|
||||
try:
|
||||
out = check_output(['rados', '--id', service, 'lspools'])
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return name in out
|
||||
|
||||
|
||||
def get_osds(service):
|
||||
'''
|
||||
Return a list of all Ceph Object Storage Daemons
|
||||
currently in the cluster
|
||||
'''
|
||||
return json.loads(check_output(['ceph', '--id', service,
|
||||
'osd', 'ls', '--format=json']))
|
||||
|
||||
|
||||
def create_pool(service, name, replicas=2):
|
||||
''' Create a new RADOS pool '''
|
||||
if pool_exists(service, name):
|
||||
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||
level=WARNING)
|
||||
return
|
||||
# Calculate the number of placement groups based
|
||||
# on upstream recommended best practices.
|
||||
pgnum = (len(get_osds(service)) * 100 / replicas)
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'create',
|
||||
name, str(pgnum)
|
||||
]
|
||||
check_call(cmd)
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'set', name,
|
||||
'size', str(replicas)
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def delete_pool(service, name):
|
||||
''' Delete a RADOS pool from ceph '''
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'delete',
|
||||
name, '--yes-i-really-really-mean-it'
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def _keyfile_path(service):
|
||||
return KEYFILE.format(service)
|
||||
|
||||
|
||||
def _keyring_path(service):
|
||||
return KEYRING.format(service)
|
||||
|
||||
|
||||
def create_keyring(service, key):
|
||||
''' Create a new Ceph keyring containing key'''
|
||||
keyring = _keyring_path(service)
|
||||
if os.path.exists(keyring):
|
||||
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
|
||||
return
|
||||
cmd = [
|
||||
'ceph-authtool',
|
||||
keyring,
|
||||
'--create-keyring',
|
||||
'--name=client.{}'.format(service),
|
||||
'--add-key={}'.format(key)
|
||||
]
|
||||
check_call(cmd)
|
||||
log('ceph: Created new ring at %s.' % keyring, level=INFO)
|
||||
|
||||
|
||||
def create_key_file(service, key):
|
||||
''' Create a file containing key '''
|
||||
keyfile = _keyfile_path(service)
|
||||
if os.path.exists(keyfile):
|
||||
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
|
||||
return
|
||||
with open(keyfile, 'w') as fd:
|
||||
fd.write(key)
|
||||
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
|
||||
|
||||
|
||||
def get_ceph_nodes():
|
||||
''' Query named relation 'ceph' to detemine current nodes '''
|
||||
hosts = []
|
||||
for r_id in relation_ids('ceph'):
|
||||
for unit in related_units(r_id):
|
||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||
return hosts
|
||||
|
||||
|
||||
def configure(service, key, auth):
|
||||
''' Perform basic configuration of Ceph '''
|
||||
create_keyring(service, key)
|
||||
create_key_file(service, key)
|
||||
hosts = get_ceph_nodes()
|
||||
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
||||
keyring=_keyring_path(service),
|
||||
mon_hosts=",".join(map(str, hosts))))
|
||||
modprobe('rbd')
|
||||
|
||||
|
||||
def image_mapped(name):
|
||||
''' Determine whether a RADOS block device is mapped locally '''
|
||||
try:
|
||||
out = check_output(['rbd', 'showmapped'])
|
||||
except CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
return name in out
|
||||
|
||||
|
||||
def map_block_storage(service, pool, image):
|
||||
''' Map a RADOS block device for local use '''
|
||||
cmd = [
|
||||
'rbd',
|
||||
'map',
|
||||
'{}/{}'.format(pool, image),
|
||||
'--user',
|
||||
service,
|
||||
'--secret',
|
||||
_keyfile_path(service),
|
||||
]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def filesystem_mounted(fs):
|
||||
''' Determine whether a filesytems is already mounted '''
|
||||
return fs in [f for f, m in mounts()]
|
||||
|
||||
|
||||
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
||||
''' Make a new filesystem on the specified block device '''
|
||||
count = 0
|
||||
e_noent = os.errno.ENOENT
|
||||
while not os.path.exists(blk_device):
|
||||
if count >= timeout:
|
||||
log('ceph: gave up waiting on block device %s' % blk_device,
|
||||
level=ERROR)
|
||||
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
||||
log('ceph: waiting for block device %s to appear' % blk_device,
|
||||
level=INFO)
|
||||
count += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
log('ceph: Formatting block device %s as filesystem %s.' %
|
||||
(blk_device, fstype), level=INFO)
|
||||
check_call(['mkfs', '-t', fstype, blk_device])
|
||||
|
||||
|
||||
def place_data_on_block_device(blk_device, data_src_dst):
|
||||
''' Migrate data in data_src_dst to blk_device and then remount '''
|
||||
# mount block device into /mnt
|
||||
mount(blk_device, '/mnt')
|
||||
# copy data to /mnt
|
||||
copy_files(data_src_dst, '/mnt')
|
||||
# umount block device
|
||||
umount('/mnt')
|
||||
# Grab user/group ID's from original source
|
||||
_dir = os.stat(data_src_dst)
|
||||
uid = _dir.st_uid
|
||||
gid = _dir.st_gid
|
||||
# re-mount where the data should originally be
|
||||
# TODO: persist is currently a NO-OP in core.host
|
||||
mount(blk_device, data_src_dst, persist=True)
|
||||
# ensure original ownership of new mount.
|
||||
os.chown(data_src_dst, uid, gid)
|
||||
|
||||
|
||||
# TODO: re-use
|
||||
def modprobe(module):
|
||||
''' Load a kernel module and configure for auto-load on reboot '''
|
||||
log('ceph: Loading kernel module', level=INFO)
|
||||
cmd = ['modprobe', module]
|
||||
check_call(cmd)
|
||||
with open('/etc/modules', 'r+') as modules:
|
||||
if module not in modules.read():
|
||||
modules.write(module)
|
||||
|
||||
|
||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||
''' Copy files from src to dst '''
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dst, item)
|
||||
if os.path.isdir(s):
|
||||
shutil.copytree(s, d, symlinks, ignore)
|
||||
else:
|
||||
shutil.copy2(s, d)
|
||||
|
||||
|
||||
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
blk_device, fstype, system_services=[]):
|
||||
"""
|
||||
NOTE: This function must only be called from a single service unit for
|
||||
the same rbd_img otherwise data loss will occur.
|
||||
|
||||
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||
and the device is formatted and mounted at the given mount_point.
|
||||
|
||||
If formatting a device for the first time, data existing at mount_point
|
||||
will be migrated to the RBD device before being re-mounted.
|
||||
|
||||
All services listed in system_services will be stopped prior to data
|
||||
migration and restarted when complete.
|
||||
"""
|
||||
# Ensure pool, RBD image, RBD mappings are in place.
|
||||
if not pool_exists(service, pool):
|
||||
log('ceph: Creating new pool {}.'.format(pool))
|
||||
create_pool(service, pool)
|
||||
|
||||
if not rbd_exists(service, pool, rbd_img):
|
||||
log('ceph: Creating RBD image ({}).'.format(rbd_img))
|
||||
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||
|
||||
if not image_mapped(rbd_img):
|
||||
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
|
||||
map_block_storage(service, pool, rbd_img)
|
||||
|
||||
# make file system
|
||||
# TODO: What happens if for whatever reason this is run again and
|
||||
# the data is already in the rbd device and/or is mounted??
|
||||
# When it is mounted already, it will fail to make the fs
|
||||
# XXX: This is really sketchy! Need to at least add an fstab entry
|
||||
# otherwise this hook will blow away existing data if its executed
|
||||
# after a reboot.
|
||||
if not filesystem_mounted(mount_point):
|
||||
make_filesystem(blk_device, fstype)
|
||||
|
||||
for svc in system_services:
|
||||
if service_running(svc):
|
||||
log('ceph: Stopping services {} prior to migrating data.'
|
||||
.format(svc))
|
||||
service_stop(svc)
|
||||
|
||||
place_data_on_block_device(blk_device, mount_point)
|
||||
|
||||
for svc in system_services:
|
||||
log('ceph: Starting service {} after migrating data.'
|
||||
.format(svc))
|
||||
service_start(svc)
|
||||
|
||||
|
||||
def ensure_ceph_keyring(service, user=None, group=None):
|
||||
'''
|
||||
Ensures a ceph keyring is created for a named service
|
||||
and optionally ensures user and group ownership.
|
||||
|
||||
Returns False if no ceph key is available in relation state.
|
||||
'''
|
||||
key = None
|
||||
for rid in relation_ids('ceph'):
|
||||
for unit in related_units(rid):
|
||||
key = relation_get('key', rid=rid, unit=unit)
|
||||
if key:
|
||||
break
|
||||
if not key:
|
||||
return False
|
||||
create_keyring(service=service, key=key)
|
||||
keyring = _keyring_path(service)
|
||||
if user and group:
|
||||
check_call(['chown', '%s.%s' % (user, group), keyring])
|
||||
return True
|
62
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
62
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
@ -0,0 +1,62 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
)
|
||||
|
||||
|
||||
##################################################
|
||||
# loopback device helpers.
|
||||
##################################################
|
||||
def loopback_devices():
|
||||
'''
|
||||
Parse through 'losetup -a' output to determine currently mapped
|
||||
loopback devices. Output is expected to look like:
|
||||
|
||||
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
||||
|
||||
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
||||
'''
|
||||
loopbacks = {}
|
||||
cmd = ['losetup', '-a']
|
||||
devs = [d.strip().split(' ') for d in
|
||||
check_output(cmd).splitlines() if d != '']
|
||||
for dev, _, f in devs:
|
||||
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
|
||||
return loopbacks
|
||||
|
||||
|
||||
def create_loopback(file_path):
|
||||
'''
|
||||
Create a loopback device for a given backing file.
|
||||
|
||||
:returns: str: Full path to new loopback device (eg, /dev/loop0)
|
||||
'''
|
||||
file_path = os.path.abspath(file_path)
|
||||
check_call(['losetup', '--find', file_path])
|
||||
for d, f in loopback_devices().iteritems():
|
||||
if f == file_path:
|
||||
return d
|
||||
|
||||
|
||||
def ensure_loopback_device(path, size):
|
||||
'''
|
||||
Ensure a loopback device exists for a given backing file path and size.
|
||||
If it a loopback device is not mapped to file, a new one will be created.
|
||||
|
||||
TODO: Confirm size of found loopback device.
|
||||
|
||||
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
||||
'''
|
||||
for d, f in loopback_devices().iteritems():
|
||||
if f == path:
|
||||
return d
|
||||
|
||||
if not os.path.exists(path):
|
||||
cmd = ['truncate', '--size', size, path]
|
||||
check_call(cmd)
|
||||
|
||||
return create_loopback(path)
|
88
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
88
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
@ -0,0 +1,88 @@
|
||||
from subprocess import (
|
||||
CalledProcessError,
|
||||
check_call,
|
||||
check_output,
|
||||
Popen,
|
||||
PIPE,
|
||||
)
|
||||
|
||||
|
||||
##################################################
|
||||
# LVM helpers.
|
||||
##################################################
|
||||
def deactivate_lvm_volume_group(block_device):
|
||||
'''
|
||||
Deactivate any volume gruop associated with an LVM physical volume.
|
||||
|
||||
:param block_device: str: Full path to LVM physical volume
|
||||
'''
|
||||
vg = list_lvm_volume_group(block_device)
|
||||
if vg:
|
||||
cmd = ['vgchange', '-an', vg]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def is_lvm_physical_volume(block_device):
|
||||
'''
|
||||
Determine whether a block device is initialized as an LVM PV.
|
||||
|
||||
:param block_device: str: Full path of block device to inspect.
|
||||
|
||||
:returns: boolean: True if block device is a PV, False if not.
|
||||
'''
|
||||
try:
|
||||
check_output(['pvdisplay', block_device])
|
||||
return True
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
def remove_lvm_physical_volume(block_device):
|
||||
'''
|
||||
Remove LVM PV signatures from a given block device.
|
||||
|
||||
:param block_device: str: Full path of block device to scrub.
|
||||
'''
|
||||
p = Popen(['pvremove', '-ff', block_device],
|
||||
stdin=PIPE)
|
||||
p.communicate(input='y\n')
|
||||
|
||||
|
||||
def list_lvm_volume_group(block_device):
|
||||
'''
|
||||
List LVM volume group associated with a given block device.
|
||||
|
||||
Assumes block device is a valid LVM PV.
|
||||
|
||||
:param block_device: str: Full path of block device to inspect.
|
||||
|
||||
:returns: str: Name of volume group associated with block device or None
|
||||
'''
|
||||
vg = None
|
||||
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||
for l in pvd:
|
||||
if l.strip().startswith('VG Name'):
|
||||
vg = ' '.join(l.split()).split(' ').pop()
|
||||
return vg
|
||||
|
||||
|
||||
def create_lvm_physical_volume(block_device):
|
||||
'''
|
||||
Initialize a block device as an LVM physical volume.
|
||||
|
||||
:param block_device: str: Full path of block device to initialize.
|
||||
|
||||
'''
|
||||
check_call(['pvcreate', block_device])
|
||||
|
||||
|
||||
def create_lvm_volume_group(volume_group, block_device):
|
||||
'''
|
||||
Create an LVM volume group backed by a given block device.
|
||||
|
||||
Assumes block device has already been initialized as an LVM PV.
|
||||
|
||||
:param volume_group: str: Name of volume group to create.
|
||||
:block_device: str: Full path of PV-initialized block device.
|
||||
'''
|
||||
check_call(['vgcreate', volume_group, block_device])
|
25
hooks/charmhelpers/contrib/storage/linux/utils.py
Normal file
25
hooks/charmhelpers/contrib/storage/linux/utils.py
Normal file
@ -0,0 +1,25 @@
|
||||
from os import stat
|
||||
from stat import S_ISBLK
|
||||
|
||||
from subprocess import (
|
||||
check_call
|
||||
)
|
||||
|
||||
|
||||
def is_block_device(path):
|
||||
'''
|
||||
Confirm device at path is a valid block device node.
|
||||
|
||||
:returns: boolean: True if path is a block device, False if not.
|
||||
'''
|
||||
return S_ISBLK(stat(path).st_mode)
|
||||
|
||||
|
||||
def zap_disk(block_device):
|
||||
'''
|
||||
Clear a block device of partition table. Relies on sgdisk, which is
|
||||
installed as pat of the 'gdisk' package in Ubuntu.
|
||||
|
||||
:param block_device: str: Full path of block device to clean.
|
||||
'''
|
||||
check_call(['sgdisk', '--zap-all', block_device])
|
0
hooks/charmhelpers/core/__init__.py
Normal file
0
hooks/charmhelpers/core/__init__.py
Normal file
340
hooks/charmhelpers/core/hookenv.py
Normal file
340
hooks/charmhelpers/core/hookenv.py
Normal file
@ -0,0 +1,340 @@
|
||||
"Interactions with the Juju environment"
|
||||
# Copyright 2013 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
import subprocess
|
||||
import UserDict
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
INFO = "INFO"
|
||||
DEBUG = "DEBUG"
|
||||
MARKER = object()
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
def cached(func):
|
||||
''' Cache return values for multiple executions of func + args
|
||||
|
||||
For example:
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
pass
|
||||
|
||||
unit_get('test')
|
||||
|
||||
will cache the result of unit_get + 'test' for future calls.
|
||||
'''
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
try:
|
||||
return cache[key]
|
||||
except KeyError:
|
||||
res = func(*args, **kwargs)
|
||||
cache[key] = res
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
|
||||
def flush(key):
|
||||
''' Flushes any entries from function cache where the
|
||||
key is found in the function+args '''
|
||||
flush_list = []
|
||||
for item in cache:
|
||||
if key in item:
|
||||
flush_list.append(item)
|
||||
for item in flush_list:
|
||||
del cache[item]
|
||||
|
||||
|
||||
def log(message, level=None):
|
||||
"Write a message to the juju log"
|
||||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
command += [message]
|
||||
subprocess.call(command)
|
||||
|
||||
|
||||
class Serializable(UserDict.IterableUserDict):
|
||||
"Wrapper, an object that can be serialized to yaml or json"
|
||||
|
||||
def __init__(self, obj):
|
||||
# wrap the object
|
||||
UserDict.IterableUserDict.__init__(self)
|
||||
self.data = obj
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# See if this object has attribute.
|
||||
if attr in ("json", "yaml", "data"):
|
||||
return self.__dict__[attr]
|
||||
# Check for attribute in wrapped object.
|
||||
got = getattr(self.data, attr, MARKER)
|
||||
if got is not MARKER:
|
||||
return got
|
||||
# Proxy to the wrapped object via dict interface.
|
||||
try:
|
||||
return self.data[attr]
|
||||
except KeyError:
|
||||
raise AttributeError(attr)
|
||||
|
||||
def __getstate__(self):
|
||||
# Pickle as a standard dictionary.
|
||||
return self.data
|
||||
|
||||
def __setstate__(self, state):
|
||||
# Unpickle into our wrapper.
|
||||
self.data = state
|
||||
|
||||
def json(self):
|
||||
"Serialize the object to json"
|
||||
return json.dumps(self.data)
|
||||
|
||||
def yaml(self):
|
||||
"Serialize the object to yaml"
|
||||
return yaml.dump(self.data)
|
||||
|
||||
|
||||
def execution_environment():
|
||||
"""A convenient bundling of the current execution context"""
|
||||
context = {}
|
||||
context['conf'] = config()
|
||||
if relation_id():
|
||||
context['reltype'] = relation_type()
|
||||
context['relid'] = relation_id()
|
||||
context['rel'] = relation_get()
|
||||
context['unit'] = local_unit()
|
||||
context['rels'] = relations()
|
||||
context['env'] = os.environ
|
||||
return context
|
||||
|
||||
|
||||
def in_relation_hook():
|
||||
"Determine whether we're running in a relation hook"
|
||||
return 'JUJU_RELATION' in os.environ
|
||||
|
||||
|
||||
def relation_type():
|
||||
"The scope for the current relation hook"
|
||||
return os.environ.get('JUJU_RELATION', None)
|
||||
|
||||
|
||||
def relation_id():
|
||||
"The relation ID for the current relation hook"
|
||||
return os.environ.get('JUJU_RELATION_ID', None)
|
||||
|
||||
|
||||
def local_unit():
|
||||
"Local unit ID"
|
||||
return os.environ['JUJU_UNIT_NAME']
|
||||
|
||||
|
||||
def remote_unit():
|
||||
"The remote unit for the current relation hook"
|
||||
return os.environ['JUJU_REMOTE_UNIT']
|
||||
|
||||
|
||||
def service_name():
|
||||
"The name service group this unit belongs to"
|
||||
return local_unit().split('/')[0]
|
||||
|
||||
|
||||
@cached
|
||||
def config(scope=None):
|
||||
"Juju charm configuration"
|
||||
config_cmd_line = ['config-get']
|
||||
if scope is not None:
|
||||
config_cmd_line.append(scope)
|
||||
config_cmd_line.append('--format=json')
|
||||
try:
|
||||
return json.loads(subprocess.check_output(config_cmd_line))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get(attribute=None, unit=None, rid=None):
|
||||
_args = ['relation-get', '--format=json']
|
||||
if rid:
|
||||
_args.append('-r')
|
||||
_args.append(rid)
|
||||
_args.append(attribute or '-')
|
||||
if unit:
|
||||
_args.append(unit)
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
for k, v in (relation_settings.items() + kwargs.items()):
|
||||
if v is None:
|
||||
relation_cmd_line.append('{}='.format(k))
|
||||
else:
|
||||
relation_cmd_line.append('{}={}'.format(k, v))
|
||||
subprocess.check_call(relation_cmd_line)
|
||||
# Flush cache of any relation-gets for local unit
|
||||
flush(local_unit())
|
||||
|
||||
|
||||
@cached
|
||||
def relation_ids(reltype=None):
|
||||
"A list of relation_ids"
|
||||
reltype = reltype or relation_type()
|
||||
relid_cmd_line = ['relation-ids', '--format=json']
|
||||
if reltype is not None:
|
||||
relid_cmd_line.append(reltype)
|
||||
return json.loads(subprocess.check_output(relid_cmd_line)) or []
|
||||
return []
|
||||
|
||||
|
||||
@cached
|
||||
def related_units(relid=None):
|
||||
"A list of related units"
|
||||
relid = relid or relation_id()
|
||||
units_cmd_line = ['relation-list', '--format=json']
|
||||
if relid is not None:
|
||||
units_cmd_line.extend(('-r', relid))
|
||||
return json.loads(subprocess.check_output(units_cmd_line)) or []
|
||||
|
||||
|
||||
@cached
|
||||
def relation_for_unit(unit=None, rid=None):
|
||||
"Get the json represenation of a unit's relation"
|
||||
unit = unit or remote_unit()
|
||||
relation = relation_get(unit=unit, rid=rid)
|
||||
for key in relation:
|
||||
if key.endswith('-list'):
|
||||
relation[key] = relation[key].split()
|
||||
relation['__unit__'] = unit
|
||||
return relation
|
||||
|
||||
|
||||
@cached
|
||||
def relations_for_id(relid=None):
|
||||
"Get relations of a specific relation ID"
|
||||
relation_data = []
|
||||
relid = relid or relation_ids()
|
||||
for unit in related_units(relid):
|
||||
unit_data = relation_for_unit(unit, relid)
|
||||
unit_data['__relid__'] = relid
|
||||
relation_data.append(unit_data)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relations_of_type(reltype=None):
|
||||
"Get relations of a specific type"
|
||||
relation_data = []
|
||||
reltype = reltype or relation_type()
|
||||
for relid in relation_ids(reltype):
|
||||
for relation in relations_for_id(relid):
|
||||
relation['__relid__'] = relid
|
||||
relation_data.append(relation)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relation_types():
|
||||
"Get a list of relation types supported by this charm"
|
||||
charmdir = os.environ.get('CHARM_DIR', '')
|
||||
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
||||
md = yaml.safe_load(mdf)
|
||||
rel_types = []
|
||||
for key in ('provides', 'requires', 'peers'):
|
||||
section = md.get(key)
|
||||
if section:
|
||||
rel_types.extend(section.keys())
|
||||
mdf.close()
|
||||
return rel_types
|
||||
|
||||
|
||||
@cached
|
||||
def relations():
|
||||
rels = {}
|
||||
for reltype in relation_types():
|
||||
relids = {}
|
||||
for relid in relation_ids(reltype):
|
||||
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
||||
for unit in related_units(relid):
|
||||
reldata = relation_get(unit=unit, rid=relid)
|
||||
units[unit] = reldata
|
||||
relids[relid] = units
|
||||
rels[reltype] = relids
|
||||
return rels
|
||||
|
||||
|
||||
def open_port(port, protocol="TCP"):
|
||||
"Open a service network port"
|
||||
_args = ['open-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
def close_port(port, protocol="TCP"):
|
||||
"Close a service network port"
|
||||
_args = ['close-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
_args = ['unit-get', '--format=json', attribute]
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def unit_private_ip():
|
||||
return unit_get('private-address')
|
||||
|
||||
|
||||
class UnregisteredHookError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Hooks(object):
|
||||
def __init__(self):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
|
||||
def register(self, name, function):
|
||||
self._hooks[name] = function
|
||||
|
||||
def execute(self, args):
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
self._hooks[hook_name]()
|
||||
else:
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
def hook(self, *hook_names):
|
||||
def wrapper(decorated):
|
||||
for hook_name in hook_names:
|
||||
self.register(hook_name, decorated)
|
||||
else:
|
||||
self.register(decorated.__name__, decorated)
|
||||
if '_' in decorated.__name__:
|
||||
self.register(
|
||||
decorated.__name__.replace('_', '-'), decorated)
|
||||
return decorated
|
||||
return wrapper
|
||||
|
||||
|
||||
def charm_dir():
|
||||
return os.environ.get('CHARM_DIR')
|
241
hooks/charmhelpers/core/host.py
Normal file
241
hooks/charmhelpers/core/host.py
Normal file
@ -0,0 +1,241 @@
|
||||
"""Tools for working with the host system"""
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Nick Moffitt <nick.moffitt@canonical.com>
|
||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
import random
|
||||
import string
|
||||
import subprocess
|
||||
import hashlib
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from hookenv import log
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
return service('start', service_name)
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
return service('stop', service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
return service('restart', service_name)
|
||||
|
||||
|
||||
def service_reload(service_name, restart_on_failure=False):
|
||||
service_result = service('reload', service_name)
|
||||
if not service_result and restart_on_failure:
|
||||
service_result = service('restart', service_name)
|
||||
return service_result
|
||||
|
||||
|
||||
def service(action, service_name):
|
||||
cmd = ['service', service_name, action]
|
||||
return subprocess.call(cmd) == 0
|
||||
|
||||
|
||||
def service_running(service):
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'])
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if ("start/running" in output or "is running" in output):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
"""Add a user"""
|
||||
try:
|
||||
user_info = pwd.getpwnam(username)
|
||||
log('user {0} already exists!'.format(username))
|
||||
except KeyError:
|
||||
log('creating user {0}'.format(username))
|
||||
cmd = ['useradd']
|
||||
if system_user or password is None:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--create-home',
|
||||
'--shell', shell,
|
||||
'--password', password,
|
||||
])
|
||||
cmd.append(username)
|
||||
subprocess.check_call(cmd)
|
||||
user_info = pwd.getpwnam(username)
|
||||
return user_info
|
||||
|
||||
|
||||
def add_user_to_group(username, group):
|
||||
"""Add a user to a group"""
|
||||
cmd = [
|
||||
'gpasswd', '-a',
|
||||
username,
|
||||
group
|
||||
]
|
||||
log("Adding user {} to group {}".format(username, group))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def rsync(from_path, to_path, flags='-r', options=None):
|
||||
"""Replicate the contents of a path"""
|
||||
options = options or ['--delete', '--executability']
|
||||
cmd = ['/usr/bin/rsync', flags]
|
||||
cmd.extend(options)
|
||||
cmd.append(from_path)
|
||||
cmd.append(to_path)
|
||||
log(" ".join(cmd))
|
||||
return subprocess.check_output(cmd).strip()
|
||||
|
||||
|
||||
def symlink(source, destination):
|
||||
"""Create a symbolic link"""
|
||||
log("Symlinking {} as {}".format(source, destination))
|
||||
cmd = [
|
||||
'ln',
|
||||
'-sf',
|
||||
source,
|
||||
destination,
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
||||
"""Create a directory"""
|
||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
realpath = os.path.abspath(path)
|
||||
if os.path.exists(realpath):
|
||||
if force and not os.path.isdir(realpath):
|
||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||
os.unlink(realpath)
|
||||
else:
|
||||
os.makedirs(realpath, perms)
|
||||
os.chown(realpath, uid, gid)
|
||||
|
||||
|
||||
def write_file(path, content, owner='root', group='root', perms=0444):
|
||||
"""Create or overwrite a file with the contents of a string"""
|
||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
with open(path, 'w') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
target.write(content)
|
||||
|
||||
|
||||
def mount(device, mountpoint, options=None, persist=False):
|
||||
'''Mount a filesystem'''
|
||||
cmd_args = ['mount']
|
||||
if options is not None:
|
||||
cmd_args.extend(['-o', options])
|
||||
cmd_args.extend([device, mountpoint])
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||
return False
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def umount(mountpoint, persist=False):
|
||||
'''Unmount a filesystem'''
|
||||
cmd_args = ['umount', mountpoint]
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||
return False
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def mounts():
|
||||
'''List of all mounted volumes as [[mountpoint,device],[...]]'''
|
||||
with open('/proc/mounts') as f:
|
||||
# [['/mount/point','/dev/path'],[...]]
|
||||
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||
for l in f.readlines()]]
|
||||
return system_mounts
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
''' Generate a md5 hash of the contents of 'path' or None if not found '''
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
return h.hexdigest()
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def restart_on_change(restart_map):
|
||||
''' Restart services based on configuration files changing
|
||||
|
||||
This function is used a decorator, for example
|
||||
|
||||
@restart_on_change({
|
||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||
})
|
||||
def ceph_client_changed():
|
||||
...
|
||||
|
||||
In this example, the cinder-api and cinder-volume services
|
||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||
ceph_client_changed function.
|
||||
'''
|
||||
def wrap(f):
|
||||
def wrapped_f(*args):
|
||||
checksums = {}
|
||||
for path in restart_map:
|
||||
checksums[path] = file_hash(path)
|
||||
f(*args)
|
||||
restarts = []
|
||||
for path in restart_map:
|
||||
if checksums[path] != file_hash(path):
|
||||
restarts += restart_map[path]
|
||||
for service_name in list(OrderedDict.fromkeys(restarts)):
|
||||
service('restart', service_name)
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
|
||||
def lsb_release():
|
||||
'''Return /etc/lsb-release in a dict'''
|
||||
d = {}
|
||||
with open('/etc/lsb-release', 'r') as lsb:
|
||||
for l in lsb:
|
||||
k, v = l.split('=')
|
||||
d[k.strip()] = v.strip()
|
||||
return d
|
||||
|
||||
|
||||
def pwgen(length=None):
|
||||
'''Generate a random pasword.'''
|
||||
if length is None:
|
||||
length = random.choice(range(35, 45))
|
||||
alphanumeric_chars = [
|
||||
l for l in (string.letters + string.digits)
|
||||
if l not in 'l0QD1vAEIOUaeiou']
|
||||
random_chars = [
|
||||
random.choice(alphanumeric_chars) for _ in range(length)]
|
||||
return(''.join(random_chars))
|
209
hooks/charmhelpers/fetch/__init__.py
Normal file
209
hooks/charmhelpers/fetch/__init__.py
Normal file
@ -0,0 +1,209 @@
|
||||
import importlib
|
||||
from yaml import safe_load
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release
|
||||
)
|
||||
from urlparse import (
|
||||
urlparse,
|
||||
urlunparse,
|
||||
)
|
||||
import subprocess
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
)
|
||||
import apt_pkg
|
||||
|
||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
"""
|
||||
PROPOSED_POCKET = """# Proposed
|
||||
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
||||
"""
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
"""Returns a list of packages that require installation"""
|
||||
apt_pkg.init()
|
||||
cache = apt_pkg.Cache()
|
||||
_pkgs = []
|
||||
for package in packages:
|
||||
try:
|
||||
p = cache[package]
|
||||
p.current_ver or _pkgs.append(package)
|
||||
except KeyError:
|
||||
log('Package {} has no installation candidate.'.format(package),
|
||||
level='WARNING')
|
||||
_pkgs.append(package)
|
||||
return _pkgs
|
||||
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
options = options or []
|
||||
cmd = ['apt-get', '-y']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Installing {} with options: {}".format(packages,
|
||||
options))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_update(fatal=False):
|
||||
"""Update local apt cache"""
|
||||
cmd = ['apt-get', 'update']
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_purge(packages, fatal=False):
|
||||
"""Purge one or more packages"""
|
||||
cmd = ['apt-get', '-y', 'purge']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Purging {}".format(packages))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def add_source(source, key=None):
|
||||
if ((source.startswith('ppa:') or
|
||||
source.startswith('http:'))):
|
||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||
elif source.startswith('cloud:'):
|
||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||
fatal=True)
|
||||
pocket = source.split(':')[-1]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(pocket))
|
||||
elif source == 'proposed':
|
||||
release = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
apt.write(PROPOSED_POCKET.format(release))
|
||||
if key:
|
||||
subprocess.check_call(['apt-key', 'import', key])
|
||||
|
||||
|
||||
class SourceConfigError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def configure_sources(update=False,
|
||||
sources_var='install_sources',
|
||||
keys_var='install_keys'):
|
||||
"""
|
||||
Configure multiple sources from charm configuration
|
||||
|
||||
Example config:
|
||||
install_sources:
|
||||
- "ppa:foo"
|
||||
- "http://example.com/repo precise main"
|
||||
install_keys:
|
||||
- null
|
||||
- "a1b2c3d4"
|
||||
|
||||
Note that 'null' (a.k.a. None) should not be quoted.
|
||||
"""
|
||||
sources = safe_load(config(sources_var))
|
||||
keys = safe_load(config(keys_var))
|
||||
if isinstance(sources, basestring) and isinstance(keys, basestring):
|
||||
add_source(sources, keys)
|
||||
else:
|
||||
if not len(sources) == len(keys):
|
||||
msg = 'Install sources and keys lists are different lengths'
|
||||
raise SourceConfigError(msg)
|
||||
for src_num in range(len(sources)):
|
||||
add_source(sources[src_num], keys[src_num])
|
||||
if update:
|
||||
apt_update(fatal=True)
|
||||
|
||||
# The order of this list is very important. Handlers should be listed in from
|
||||
# least- to most-specific URL matching.
|
||||
FETCH_HANDLERS = (
|
||||
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
|
||||
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
|
||||
)
|
||||
|
||||
|
||||
class UnhandledSource(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def install_remote(source):
|
||||
"""
|
||||
Install a file tree from a remote source
|
||||
|
||||
The specified source should be a url of the form:
|
||||
scheme://[host]/path[#[option=value][&...]]
|
||||
|
||||
Schemes supported are based on this modules submodules
|
||||
Options supported are submodule-specific"""
|
||||
# We ONLY check for True here because can_handle may return a string
|
||||
# explaining why it can't handle a given source.
|
||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||
installed_to = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
installed_to = handler.install(source)
|
||||
except UnhandledSource:
|
||||
pass
|
||||
if not installed_to:
|
||||
raise UnhandledSource("No handler found for source {}".format(source))
|
||||
return installed_to
|
||||
|
||||
|
||||
def install_from_config(config_var_name):
|
||||
charm_config = config()
|
||||
source = charm_config[config_var_name]
|
||||
return install_remote(source)
|
||||
|
||||
|
||||
class BaseFetchHandler(object):
|
||||
"""Base class for FetchHandler implementations in fetch plugins"""
|
||||
def can_handle(self, source):
|
||||
"""Returns True if the source can be handled. Otherwise returns
|
||||
a string explaining why it cannot"""
|
||||
return "Wrong source type"
|
||||
|
||||
def install(self, source):
|
||||
"""Try to download and unpack the source. Return the path to the
|
||||
unpacked files or raise UnhandledSource."""
|
||||
raise UnhandledSource("Wrong source type {}".format(source))
|
||||
|
||||
def parse_url(self, url):
|
||||
return urlparse(url)
|
||||
|
||||
def base_url(self, url):
|
||||
"""Return url without querystring or fragment"""
|
||||
parts = list(self.parse_url(url))
|
||||
parts[4:] = ['' for i in parts[4:]]
|
||||
return urlunparse(parts)
|
||||
|
||||
|
||||
def plugins(fetch_handlers=None):
|
||||
if not fetch_handlers:
|
||||
fetch_handlers = FETCH_HANDLERS
|
||||
plugin_list = []
|
||||
for handler_name in fetch_handlers:
|
||||
package, classname = handler_name.rsplit('.', 1)
|
||||
try:
|
||||
handler_class = getattr(importlib.import_module(package), classname)
|
||||
plugin_list.append(handler_class())
|
||||
except (ImportError, AttributeError):
|
||||
# Skip missing plugins so that they can be ommitted from
|
||||
# installation if desired
|
||||
log("FetchHandler {} not found, skipping plugin".format(handler_name))
|
||||
return plugin_list
|
48
hooks/charmhelpers/fetch/archiveurl.py
Normal file
48
hooks/charmhelpers/fetch/archiveurl.py
Normal file
@ -0,0 +1,48 @@
|
||||
import os
|
||||
import urllib2
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource
|
||||
)
|
||||
from charmhelpers.payload.archive import (
|
||||
get_archive_handler,
|
||||
extract,
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for archives via generic URLs"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||
return "Wrong source type"
|
||||
if get_archive_handler(self.base_url(source)):
|
||||
return True
|
||||
return False
|
||||
|
||||
def download(self, source, dest):
|
||||
# propogate all exceptions
|
||||
# URLError, OSError, etc
|
||||
response = urllib2.urlopen(source)
|
||||
try:
|
||||
with open(dest, 'w') as dest_file:
|
||||
dest_file.write(response.read())
|
||||
except Exception as e:
|
||||
if os.path.isfile(dest):
|
||||
os.unlink(dest)
|
||||
raise e
|
||||
|
||||
def install(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
||||
try:
|
||||
self.download(source, dld_file)
|
||||
except urllib2.URLError as e:
|
||||
raise UnhandledSource(e.reason)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return extract(dld_file)
|
49
hooks/charmhelpers/fetch/bzrurl.py
Normal file
49
hooks/charmhelpers/fetch/bzrurl.py
Normal file
@ -0,0 +1,49 @@
|
||||
import os
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
try:
|
||||
from bzrlib.branch import Branch
|
||||
except ImportError:
|
||||
from charmhelpers.fetch import apt_install
|
||||
apt_install("python-bzrlib")
|
||||
from bzrlib.branch import Branch
|
||||
|
||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for bazaar branches via generic and lp URLs"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def branch(self, source, dest):
|
||||
url_parts = self.parse_url(source)
|
||||
# If we use lp:branchname scheme we need to load plugins
|
||||
if not self.can_handle(source):
|
||||
raise UnhandledSource("Cannot handle {}".format(source))
|
||||
if url_parts.scheme == "lp":
|
||||
from bzrlib.plugin import load_plugins
|
||||
load_plugins()
|
||||
try:
|
||||
remote_branch = Branch.open(source)
|
||||
remote_branch.bzrdir.sprout(dest).open_branch()
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def install(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
try:
|
||||
self.branch(source, dest_dir)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return dest_dir
|
||||
|
1
hooks/charmhelpers/payload/__init__.py
Normal file
1
hooks/charmhelpers/payload/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
"Tools for working with files injected into a charm just before deployment."
|
50
hooks/charmhelpers/payload/execd.py
Normal file
50
hooks/charmhelpers/payload/execd.py
Normal file
@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def default_execd_dir():
|
||||
return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
|
||||
|
||||
|
||||
def execd_module_paths(execd_dir=None):
|
||||
"""Generate a list of full paths to modules within execd_dir."""
|
||||
if not execd_dir:
|
||||
execd_dir = default_execd_dir()
|
||||
|
||||
if not os.path.exists(execd_dir):
|
||||
return
|
||||
|
||||
for subpath in os.listdir(execd_dir):
|
||||
module = os.path.join(execd_dir, subpath)
|
||||
if os.path.isdir(module):
|
||||
yield module
|
||||
|
||||
|
||||
def execd_submodule_paths(command, execd_dir=None):
|
||||
"""Generate a list of full paths to the specified command within exec_dir.
|
||||
"""
|
||||
for module_path in execd_module_paths(execd_dir):
|
||||
path = os.path.join(module_path, command)
|
||||
if os.access(path, os.X_OK) and os.path.isfile(path):
|
||||
yield path
|
||||
|
||||
|
||||
def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
|
||||
"""Run command for each module within execd_dir which defines it."""
|
||||
for submodule_path in execd_submodule_paths(command, execd_dir):
|
||||
try:
|
||||
subprocess.check_call(submodule_path, shell=True, stderr=stderr)
|
||||
except subprocess.CalledProcessError as e:
|
||||
hookenv.log("Error ({}) running {}. Output: {}".format(
|
||||
e.returncode, e.cmd, e.output))
|
||||
if die_on_error:
|
||||
sys.exit(e.returncode)
|
||||
|
||||
|
||||
def execd_preinstall(execd_dir=None):
|
||||
"""Run charm-pre-install for each module within execd_dir."""
|
||||
execd_run('charm-pre-install', execd_dir=execd_dir)
|
1
hooks/cinder-volume-service-relation-broken
Symbolic link
1
hooks/cinder-volume-service-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
1
hooks/cloud-compute-relation-departed
Symbolic link
1
hooks/cloud-compute-relation-departed
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
1
hooks/identity-service-relation-broken
Symbolic link
1
hooks/identity-service-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
1
hooks/image-service-relation-broken
Symbolic link
1
hooks/image-service-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1,43 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Essex-specific functions
|
||||
|
||||
nova_set_or_update() {
|
||||
# Set a config option in nova.conf or api-paste.ini, depending
|
||||
# Defaults to updating nova.conf
|
||||
local key=$1
|
||||
local value=$2
|
||||
local conf_file=$3
|
||||
local pattern=""
|
||||
|
||||
local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
|
||||
local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
|
||||
local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
|
||||
[[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1
|
||||
[[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1
|
||||
[[ -z "$conf_file" ]] && conf_file=$nova_conf
|
||||
|
||||
case "$conf_file" in
|
||||
"$nova_conf") match="\-\-$key="
|
||||
pattern="--$key="
|
||||
out=$pattern
|
||||
;;
|
||||
"$api_conf"|"$libvirtd_conf") match="^$key = "
|
||||
pattern="$match"
|
||||
out="$key = "
|
||||
;;
|
||||
*) error_out "ERROR: set_or_update: Invalid conf_file ($conf_file)"
|
||||
esac
|
||||
|
||||
cat $conf_file | grep "$match$value" >/dev/null &&
|
||||
juju-log "$CHARM: $key=$value already in set in $conf_file" \
|
||||
&& return 0
|
||||
if cat $conf_file | grep "$match" >/dev/null ; then
|
||||
juju-log "$CHARM: Updating $conf_file, $key=$value"
|
||||
sed -i "s|\($pattern\).*|\1$value|" $conf_file
|
||||
else
|
||||
juju-log "$CHARM: Setting new option $key=$value in $conf_file"
|
||||
echo "$out$value" >>$conf_file
|
||||
fi
|
||||
CONFIG_CHANGED=True
|
||||
}
|
@ -1,135 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Folsom-specific functions
|
||||
|
||||
nova_set_or_update() {
|
||||
# Set a config option in nova.conf or api-paste.ini, depending
|
||||
# Defaults to updating nova.conf
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
local conf_file="$3"
|
||||
local section="${4:-DEFAULT}"
|
||||
|
||||
local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
|
||||
local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
|
||||
local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
|
||||
local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
|
||||
local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
|
||||
local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
|
||||
|
||||
[[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
|
||||
[[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
|
||||
|
||||
[[ -z "$conf_file" ]] && conf_file=$nova_conf
|
||||
|
||||
local pattern=""
|
||||
case "$conf_file" in
|
||||
"$nova_conf") match="^$key="
|
||||
pattern="$key="
|
||||
out=$pattern
|
||||
;;
|
||||
"$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
|
||||
"$libvirtd_conf")
|
||||
match="^$key = "
|
||||
pattern="$match"
|
||||
out="$key = "
|
||||
;;
|
||||
*) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)"
|
||||
esac
|
||||
|
||||
cat $conf_file | grep "$match$value" >/dev/null &&
|
||||
juju-log "$CHARM: $key=$value already in set in $conf_file" \
|
||||
&& return 0
|
||||
|
||||
case $conf_file in
|
||||
"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
|
||||
python -c "
|
||||
import ConfigParser
|
||||
config = ConfigParser.RawConfigParser()
|
||||
config.read('$conf_file')
|
||||
config.set('$section','$key','$value')
|
||||
with open('$conf_file', 'wb') as configfile:
|
||||
config.write(configfile)
|
||||
"
|
||||
;;
|
||||
*)
|
||||
if cat $conf_file | grep "$match" >/dev/null ; then
|
||||
juju-log "$CHARM: Updating $conf_file, $key=$value"
|
||||
sed -i "s|\($pattern\).*|\1$value|" $conf_file
|
||||
else
|
||||
juju-log "$CHARM: Setting new option $key=$value in $conf_file"
|
||||
echo "$out$value" >>$conf_file
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
CONFIG_CHANGED="True"
|
||||
}
|
||||
|
||||
# Upgrade Helpers
|
||||
nova_pre_upgrade() {
|
||||
# Pre-upgrade helper. Caller should pass the version of OpenStack we are
|
||||
# upgrading from.
|
||||
return 0 # Nothing to do here, yet.
|
||||
}
|
||||
|
||||
nova_post_upgrade() {
|
||||
# Post-upgrade helper. Caller should pass the version of OpenStack we are
|
||||
# upgrading from.
|
||||
local upgrade_from="$1"
|
||||
juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> folsom."
|
||||
# We only support essex -> folsom, currently.
|
||||
[[ "$upgrade_from" != "essex" ]] &&
|
||||
error_out "Unsupported upgrade: $upgrade_from -> folsom"
|
||||
|
||||
# This may be dangerous, if we are upgrading a number of units at once
|
||||
# and they all begin the same migration concurrently. Migrate only from
|
||||
# the cloud controller(s).
|
||||
if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
|
||||
juju-log "$CHARM: Migrating nova database."
|
||||
/usr/bin/nova-manage db sync
|
||||
|
||||
# Trigger a service restart on all other nova nodes.
|
||||
trigger_remote_service_restarts
|
||||
fi
|
||||
|
||||
# Packaging currently takes care of converting the Essex gflags format
|
||||
# to .ini, but we need to update the api-paste.ini manually. It can be
|
||||
# updated directly from keystone, via the identity-service relation,
|
||||
# if it exists. Only services that require keystone credentials will
|
||||
# have modified api-paste.ini, and only those services will have a .dpkg-dist
|
||||
# version present.
|
||||
local r_id=$(relation-ids identity-service)
|
||||
if [[ -n "$r_id" ]] && [[ -e "$CONF_DIR/api-paste.ini.dpkg-dist" ]] ; then
|
||||
# Backup the last api config, update the stock packaged version
|
||||
# with our current Keystone info.
|
||||
mv $API_CONF $CONF_DIR/api-paste.ini.juju-last
|
||||
mv $CONF_DIR/api-paste.ini.dpkg-dist $CONF_DIR/api-paste.ini
|
||||
|
||||
unit=$(relation-list -r $r_id | head -n1)
|
||||
# Note, this should never be called from an relation hook, only config-changed.
|
||||
export JUJU_REMOTE_UNIT=$unit
|
||||
service_port=$(relation-get -r $r_id service_port)
|
||||
auth_port=$(relation-get -r $r_id auth_port)
|
||||
service_username=$(relation-get -r $r_id service_username)
|
||||
service_password=$(relation-get -r $r_id service_password)
|
||||
service_tenant=$(relation-get -r $r_id service_tenant)
|
||||
keystone_host=$(relation-get -r $r_id private-address)
|
||||
unset JUJU_REMOTE_UNIT
|
||||
|
||||
juju-log "$CHARM: Updating new api-paste.ini with keystone data from $unit:$r_id"
|
||||
set_or_update "service_host" "$keystone_host" "$API_CONF"
|
||||
set_or_update "service_port" "$service_port" "$API_CONF"
|
||||
set_or_update "auth_host" "$keystone_host" "$API_CONF"
|
||||
set_or_update "auth_port" "$auth_port" "$API_CONF"
|
||||
set_or_update "auth_uri" "http://$keystone_host:$service_port/" "$API_CONF"
|
||||
set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
|
||||
set_or_update "admin_user" "$service_username" "$API_CONF"
|
||||
set_or_update "admin_password" "$service_password" "$API_CONF"
|
||||
fi
|
||||
|
||||
# TEMPORARY
|
||||
# RC3 packaging in cloud archive doesn't have this in postinst. Do it here
|
||||
sed -e "s,^root_helper=.\+,rootwrap_config=/etc/nova/rootwrap.conf," -i /etc/nova/nova.conf
|
||||
|
||||
juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> folsom."
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Folsom-specific functions
|
||||
|
||||
nova_set_or_update() {
|
||||
# TODO: This needs to be shared among folsom, grizzly and beyond.
|
||||
# Set a config option in nova.conf or api-paste.ini, depending
|
||||
# Defaults to updating nova.conf
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
local conf_file="$3"
|
||||
local section="${4:-DEFAULT}"
|
||||
|
||||
local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
|
||||
local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
|
||||
local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
|
||||
local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
|
||||
local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
|
||||
local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
|
||||
|
||||
[[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
|
||||
[[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
|
||||
|
||||
[[ -z "$conf_file" ]] && conf_file=$nova_conf
|
||||
|
||||
local pattern=""
|
||||
case "$conf_file" in
|
||||
"$nova_conf") match="^$key="
|
||||
pattern="$key="
|
||||
out=$pattern
|
||||
;;
|
||||
"$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
|
||||
"$libvirtd_conf")
|
||||
match="^$key = "
|
||||
pattern="$match"
|
||||
out="$key = "
|
||||
;;
|
||||
*) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)"
|
||||
esac
|
||||
|
||||
cat $conf_file | grep "$match$value" >/dev/null &&
|
||||
juju-log "$CHARM: $key=$value already in set in $conf_file" \
|
||||
&& return 0
|
||||
|
||||
case $conf_file in
|
||||
"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
|
||||
python -c "
|
||||
import ConfigParser
|
||||
config = ConfigParser.RawConfigParser()
|
||||
config.read('$conf_file')
|
||||
config.set('$section','$key','$value')
|
||||
with open('$conf_file', 'wb') as configfile:
|
||||
config.write(configfile)
|
||||
"
|
||||
;;
|
||||
*)
|
||||
if cat $conf_file | grep "$match" >/dev/null ; then
|
||||
juju-log "$CHARM: Updating $conf_file, $key=$value"
|
||||
sed -i "s|\($pattern\).*|\1$value|" $conf_file
|
||||
else
|
||||
juju-log "$CHARM: Setting new option $key=$value in $conf_file"
|
||||
echo "$out$value" >>$conf_file
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
CONFIG_CHANGED="True"
|
||||
}
|
||||
|
||||
# Upgrade Helpers
|
||||
nova_pre_upgrade() {
|
||||
# Pre-upgrade helper. Caller should pass the version of OpenStack we are
|
||||
# upgrading from.
|
||||
return 0 # Nothing to do here, yet.
|
||||
}
|
||||
|
||||
nova_post_upgrade() {
|
||||
# Post-upgrade helper. Caller should pass the version of OpenStack we are
|
||||
# upgrading from.
|
||||
local upgrade_from="$1"
|
||||
juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> grizzly."
|
||||
# We only support folsom -> grizzly, currently.
|
||||
[[ "$upgrade_from" != "folsom" ]] &&
|
||||
error_out "Unsupported upgrade: $upgrade_from -> grizzly"
|
||||
|
||||
# This may be dangerous, if we are upgrading a number of units at once
|
||||
# and they all begin the same migration concurrently. Migrate only from
|
||||
# the cloud controller(s).
|
||||
if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
|
||||
juju-log "$CHARM: Migrating nova database."
|
||||
/usr/bin/nova-manage db sync
|
||||
|
||||
# Trigger a service restart on all other nova nodes.
|
||||
trigger_remote_service_restarts
|
||||
fi
|
||||
|
||||
juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> grizzly."
|
||||
}
|
@ -1,169 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Common utility functions used across all nova charms.
|
||||
|
||||
CONFIG_CHANGED=False
|
||||
HOOKS_DIR="$CHARM_DIR/hooks"
|
||||
|
||||
# Load the common OpenStack helper library.
|
||||
if [[ -e $HOOKS_DIR/lib/openstack-common ]] ; then
|
||||
. $HOOKS_DIR/lib/openstack-common
|
||||
else
|
||||
juju-log "Couldn't load $HOOKS_DIR/lib/opentack-common." && exit 1
|
||||
fi
|
||||
|
||||
set_or_update() {
|
||||
# Update config flags in nova.conf or api-paste.ini.
|
||||
# Config layout changed in Folsom, so this is now OpenStack release specific.
|
||||
local rel=$(get_os_codename_package "nova-common")
|
||||
. $HOOKS_DIR/lib/nova/$rel
|
||||
nova_set_or_update $@
|
||||
}
|
||||
|
||||
function set_config_flags() {
|
||||
# Set user-defined nova.conf flags from deployment config
|
||||
juju-log "$CHARM: Processing config-flags."
|
||||
flags=$(config-get config-flags)
|
||||
if [[ "$flags" != "None" && -n "$flags" ]] ; then
|
||||
for f in $(echo $flags | sed -e 's/,/ /g') ; do
|
||||
k=$(echo $f | cut -d= -f1)
|
||||
v=$(echo $f | cut -d= -f2)
|
||||
set_or_update "$k" "$v"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
configure_volume_service() {
|
||||
local svc="$1"
|
||||
local cur_vers="$(get_os_codename_package "nova-common")"
|
||||
case "$svc" in
|
||||
"cinder")
|
||||
set_or_update "volume_api_class" "nova.volume.cinder.API" ;;
|
||||
"nova-volume")
|
||||
# nova-volume only supported before grizzly.
|
||||
[[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] &&
|
||||
set_or_update "volume_api_class" "nova.volume.api.API"
|
||||
;;
|
||||
*) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc"
|
||||
return 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
function configure_network_manager {
|
||||
local manager="$1"
|
||||
echo "$CHARM: configuring $manager network manager"
|
||||
case $1 in
|
||||
"FlatManager")
|
||||
set_or_update "network_manager" "nova.network.manager.FlatManager"
|
||||
;;
|
||||
"FlatDHCPManager")
|
||||
set_or_update "network_manager" "nova.network.manager.FlatDHCPManager"
|
||||
|
||||
if [[ "$CHARM" == "nova-compute" ]] ; then
|
||||
local flat_interface=$(config-get flat-interface)
|
||||
local ec2_host=$(relation-get ec2_host)
|
||||
set_or_update flat_inteface "$flat_interface"
|
||||
set_or_update ec2_dmz_host "$ec2_host"
|
||||
|
||||
# Ensure flat_interface has link.
|
||||
if ip link show $flat_interface >/dev/null 2>&1 ; then
|
||||
ip link set $flat_interface up
|
||||
fi
|
||||
|
||||
# work around (LP: #1035172)
|
||||
if [[ -e /dev/vhost-net ]] ; then
|
||||
iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \
|
||||
--checksum-fill
|
||||
fi
|
||||
fi
|
||||
|
||||
;;
|
||||
"Quantum")
|
||||
local local_ip=$(get_ip `unit-get private-address`)
|
||||
[[ -n $local_ip ]] || {
|
||||
juju-log "Unable to resolve local IP address"
|
||||
exit 1
|
||||
}
|
||||
set_or_update "network_api_class" "nova.network.quantumv2.api.API"
|
||||
set_or_update "quantum_auth_strategy" "keystone"
|
||||
set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF"
|
||||
set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF"
|
||||
local cur="$(get_os_codename_package "nova-common")"
|
||||
local vers=$(get_os_version_codename $cur)
|
||||
if dpkg --compare-versions $vers ge '2013.1'; then
|
||||
# Configure per-tenant managed quotas - >= grizzly only
|
||||
set_or_update "quota_driver" "quantum.db.quota_db.DbQuotaDriver" \
|
||||
"$QUANTUM_CONF" "QUOTAS"
|
||||
fi
|
||||
if [ "$QUANTUM_PLUGIN" == "ovs" ]; then
|
||||
set_or_update "tenant_network_type" "gre" $QUANTUM_PLUGIN_CONF "OVS"
|
||||
set_or_update "enable_tunneling" "True" $QUANTUM_PLUGIN_CONF "OVS"
|
||||
set_or_update "tunnel_id_ranges" "1:1000" $QUANTUM_PLUGIN_CONF "OVS"
|
||||
set_or_update "local_ip" "$local_ip" $QUANTUM_PLUGIN_CONF "OVS"
|
||||
fi
|
||||
if [ "$(config-get quantum-security-groups)" == "yes" ] && \
|
||||
dpkg --compare-versions $vers ge '2013.1'; then
|
||||
set_or_update "security_group_api" "quantum"
|
||||
set_or_update "firewall_driver" "nova.virt.firewall.NoopFirewallDriver"
|
||||
set_or_update "allow_overlapping_ips" "True" $QUANTUM_CONF
|
||||
if [ "$QUANTUM_PLUGIN" == "ovs" ]; then
|
||||
set_or_update "firewall_driver" \
|
||||
"quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver" \
|
||||
$QUANTUM_PLUGIN_CONF "SECURITYGROUP"
|
||||
fi
|
||||
# Ensure that security_group_* is included in quota'ed resources
|
||||
set_or_update "quota_items" "network,subnet,port,security_group,security_group_rule" \
|
||||
$QUANTUM_CONF "QUOTAS"
|
||||
fi
|
||||
;;
|
||||
*) juju-log "ERROR: Invalid network manager $1" && exit 1 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
function trigger_remote_service_restarts() {
|
||||
# Trigger a service restart on all other nova nodes that have a relation
|
||||
# via the cloud-controller interface.
|
||||
|
||||
# possible relations to other nova services.
|
||||
local relations="cloud-compute nova-volume-service"
|
||||
|
||||
for rel in $relations; do
|
||||
local r_ids=$(relation-ids $rel)
|
||||
for r_id in $r_ids ; do
|
||||
juju-log "$CHARM: Triggering a service restart on relation $r_id."
|
||||
relation-set -r $r_id restart-trigger=$(uuid)
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
do_openstack_upgrade() {
|
||||
# update openstack components to those provided by a new installation source
|
||||
# it is assumed the calling hook has confirmed that the upgrade is sane.
|
||||
local rel="$1"
|
||||
shift
|
||||
local packages=$@
|
||||
|
||||
orig_os_rel=$(get_os_codename_package "nova-common")
|
||||
new_rel=$(get_os_codename_install_source "$rel")
|
||||
|
||||
# Backup the config directory.
|
||||
local stamp=$(date +"%Y%m%d%M%S")
|
||||
tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR
|
||||
|
||||
# load the release helper library for pre/post upgrade hooks specific to the
|
||||
# release we are upgrading to.
|
||||
. $HOOKS_DIR/lib/nova/$new_rel
|
||||
|
||||
# new release specific pre-upgrade hook
|
||||
nova_pre_upgrade "$orig_os_rel"
|
||||
|
||||
# Setup apt repository access and kick off the actual package upgrade.
|
||||
configure_install_source "$rel"
|
||||
apt-get update
|
||||
DEBIAN_FRONTEND=noninteractive apt-get --option Dpkg::Options::=--force-confold -y \
|
||||
install --no-install-recommends $packages
|
||||
|
||||
# new release sepcific post-upgrade hook
|
||||
nova_post_upgrade "$orig_os_rel"
|
||||
|
||||
}
|
@ -1,781 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Common utility functions used across all OpenStack charms.
|
||||
|
||||
error_out() {
|
||||
juju-log "$CHARM ERROR: $@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function service_ctl_status {
|
||||
# Return 0 if a service is running, 1 otherwise.
|
||||
local svc="$1"
|
||||
local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }')
|
||||
case $status in
|
||||
"start") return 0 ;;
|
||||
"stop") return 1 ;;
|
||||
*) error_out "Unexpected status of service $svc: $status" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
function service_ctl {
|
||||
# control a specific service, or all (as defined by $SERVICES)
|
||||
# service restarts will only occur depending on global $CONFIG_CHANGED,
|
||||
# which should be updated in charm's set_or_update().
|
||||
local config_changed=${CONFIG_CHANGED:-True}
|
||||
if [[ $1 == "all" ]] ; then
|
||||
ctl="$SERVICES"
|
||||
else
|
||||
ctl="$1"
|
||||
fi
|
||||
action="$2"
|
||||
if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then
|
||||
error_out "ERROR service_ctl: Not enough arguments"
|
||||
fi
|
||||
|
||||
for i in $ctl ; do
|
||||
case $action in
|
||||
"start")
|
||||
service_ctl_status $i || service $i start ;;
|
||||
"stop")
|
||||
service_ctl_status $i && service $i stop || return 0 ;;
|
||||
"restart")
|
||||
if [[ "$config_changed" == "True" ]] ; then
|
||||
service_ctl_status $i && service $i restart || service $i start
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
if [[ $? != 0 ]] ; then
|
||||
juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action"
|
||||
fi
|
||||
done
|
||||
# all configs should have been reloaded on restart of all services, reset
|
||||
# flag if its being used.
|
||||
if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] &&
|
||||
[[ "$ctl" == "all" ]]; then
|
||||
CONFIG_CHANGED="False"
|
||||
fi
|
||||
}
|
||||
|
||||
function configure_install_source {
|
||||
# Setup and configure installation source based on a config flag.
|
||||
local src="$1"
|
||||
|
||||
# Default to installing from the main Ubuntu archive.
|
||||
[[ $src == "distro" ]] || [[ -z "$src" ]] && return 0
|
||||
|
||||
. /etc/lsb-release
|
||||
|
||||
# standard 'ppa:someppa/name' format.
|
||||
if [[ "${src:0:4}" == "ppa:" ]] ; then
|
||||
juju-log "$CHARM: Configuring installation from custom src ($src)"
|
||||
add-apt-repository -y "$src" || error_out "Could not configure PPA access."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# standard 'deb http://url/ubuntu main' entries. gpg key ids must
|
||||
# be appended to the end of url after a |, ie:
|
||||
# 'deb http://url/ubuntu main|$GPGKEYID'
|
||||
if [[ "${src:0:3}" == "deb" ]] ; then
|
||||
juju-log "$CHARM: Configuring installation from custom src URL ($src)"
|
||||
if echo "$src" | grep -q "|" ; then
|
||||
# gpg key id tagged to end of url folloed by a |
|
||||
url=$(echo $src | cut -d'|' -f1)
|
||||
key=$(echo $src | cut -d'|' -f2)
|
||||
juju-log "$CHARM: Importing repository key: $key"
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \
|
||||
juju-log "$CHARM WARN: Could not import key from keyserver: $key"
|
||||
else
|
||||
juju-log "$CHARM No repository key specified."
|
||||
url="$src"
|
||||
fi
|
||||
echo "$url" > /etc/apt/sources.list.d/juju_deb.list
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Cloud Archive
|
||||
if [[ "${src:0:6}" == "cloud:" ]] ; then
|
||||
|
||||
# current os releases supported by the UCA.
|
||||
local cloud_archive_versions="folsom grizzly"
|
||||
|
||||
local ca_rel=$(echo $src | cut -d: -f2)
|
||||
local u_rel=$(echo $ca_rel | cut -d- -f1)
|
||||
local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1)
|
||||
|
||||
[[ "$u_rel" != "$DISTRIB_CODENAME" ]] &&
|
||||
error_out "Cannot install from Cloud Archive pocket $src " \
|
||||
"on this Ubuntu version ($DISTRIB_CODENAME)!"
|
||||
|
||||
valid_release=""
|
||||
for rel in $cloud_archive_versions ; do
|
||||
if [[ "$os_rel" == "$rel" ]] ; then
|
||||
valid_release=1
|
||||
juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive."
|
||||
fi
|
||||
done
|
||||
if [[ -z "$valid_release" ]] ; then
|
||||
error_out "OpenStack release ($os_rel) not supported by "\
|
||||
"the Ubuntu Cloud Archive."
|
||||
fi
|
||||
|
||||
# CA staging repos are standard PPAs.
|
||||
if echo $ca_rel | grep -q "staging" ; then
|
||||
add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging
|
||||
return 0
|
||||
fi
|
||||
|
||||
# the others are LP-external deb repos.
|
||||
case "$ca_rel" in
|
||||
"$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
|
||||
"$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
|
||||
"$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
|
||||
"$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
|
||||
*) error_out "Invalid Cloud Archive repo specified: $src"
|
||||
esac
|
||||
|
||||
apt-get -y install ubuntu-cloud-keyring
|
||||
entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main"
|
||||
echo "$entry" \
|
||||
>/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list
|
||||
return 0
|
||||
fi
|
||||
|
||||
error_out "Invalid installation source specified in config: $src"
|
||||
|
||||
}
|
||||
|
||||
get_os_codename_install_source() {
|
||||
# derive the openstack release provided by a supported installation source.
|
||||
local rel="$1"
|
||||
local codename="unknown"
|
||||
. /etc/lsb-release
|
||||
|
||||
# map ubuntu releases to the openstack version shipped with it.
|
||||
if [[ "$rel" == "distro" ]] ; then
|
||||
case "$DISTRIB_CODENAME" in
|
||||
"oneiric") codename="diablo" ;;
|
||||
"precise") codename="essex" ;;
|
||||
"quantal") codename="folsom" ;;
|
||||
"raring") codename="grizzly" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# derive version from cloud archive strings.
|
||||
if [[ "${rel:0:6}" == "cloud:" ]] ; then
|
||||
rel=$(echo $rel | cut -d: -f2)
|
||||
local u_rel=$(echo $rel | cut -d- -f1)
|
||||
local ca_rel=$(echo $rel | cut -d- -f2)
|
||||
if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then
|
||||
case "$ca_rel" in
|
||||
"folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging")
|
||||
codename="folsom" ;;
|
||||
"grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging")
|
||||
codename="grizzly" ;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
|
||||
# have a guess based on the deb string provided
|
||||
if [[ "${rel:0:3}" == "deb" ]] || \
|
||||
[[ "${rel:0:3}" == "ppa" ]] ; then
|
||||
CODENAMES="diablo essex folsom grizzly havana"
|
||||
for cname in $CODENAMES; do
|
||||
if echo $rel | grep -q $cname; then
|
||||
codename=$cname
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo $codename
|
||||
}
|
||||
|
||||
get_os_codename_package() {
|
||||
local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none"
|
||||
pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs
|
||||
case "${pkg_vers:0:6}" in
|
||||
"2011.2") echo "diablo" ;;
|
||||
"2012.1") echo "essex" ;;
|
||||
"2012.2") echo "folsom" ;;
|
||||
"2013.1") echo "grizzly" ;;
|
||||
"2013.2") echo "havana" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
get_os_version_codename() {
|
||||
case "$1" in
|
||||
"diablo") echo "2011.2" ;;
|
||||
"essex") echo "2012.1" ;;
|
||||
"folsom") echo "2012.2" ;;
|
||||
"grizzly") echo "2013.1" ;;
|
||||
"havana") echo "2013.2" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
get_ip() {
|
||||
dpkg -l | grep -q python-dnspython || {
|
||||
apt-get -y install python-dnspython 2>&1 > /dev/null
|
||||
}
|
||||
hostname=$1
|
||||
python -c "
|
||||
import dns.resolver
|
||||
import socket
|
||||
try:
|
||||
# Test to see if already an IPv4 address
|
||||
socket.inet_aton('$hostname')
|
||||
print '$hostname'
|
||||
except socket.error:
|
||||
try:
|
||||
answers = dns.resolver.query('$hostname', 'A')
|
||||
if answers:
|
||||
print answers[0].address
|
||||
except dns.resolver.NXDOMAIN:
|
||||
pass
|
||||
"
|
||||
}
|
||||
|
||||
# Common storage routines used by cinder, nova-volume and swift-storage.
|
||||
clean_storage() {
|
||||
# if configured to overwrite existing storage, we unmount the block-dev
|
||||
# if mounted and clear any previous pv signatures
|
||||
local block_dev="$1"
|
||||
juju-log "Cleaining storage '$block_dev'"
|
||||
if grep -q "^$block_dev" /proc/mounts ; then
|
||||
mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }')
|
||||
juju-log "Unmounting $block_dev from $mp"
|
||||
umount "$mp" || error_out "ERROR: Could not unmount storage from $mp"
|
||||
fi
|
||||
if pvdisplay "$block_dev" >/dev/null 2>&1 ; then
|
||||
juju-log "Removing existing LVM PV signatures from $block_dev"
|
||||
|
||||
# deactivate any volgroups that may be built on this dev
|
||||
vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }')
|
||||
if [[ -n "$vg" ]] ; then
|
||||
juju-log "Deactivating existing volume group: $vg"
|
||||
vgchange -an "$vg" ||
|
||||
error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?"
|
||||
fi
|
||||
echo "yes" | pvremove -ff "$block_dev" ||
|
||||
error_out "Could not pvremove $block_dev"
|
||||
else
|
||||
juju-log "Zapping disk of all GPT and MBR structures"
|
||||
sgdisk --zap-all $block_dev ||
|
||||
error_out "Unable to zap $block_dev"
|
||||
fi
|
||||
}
|
||||
|
||||
function get_block_device() {
|
||||
# given a string, return full path to the block device for that
|
||||
# if input is not a block device, find a loopback device
|
||||
local input="$1"
|
||||
|
||||
case "$input" in
|
||||
/dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist."
|
||||
echo "$input"; return 0;;
|
||||
/*) :;;
|
||||
*) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist."
|
||||
echo "/dev/$input"; return 0;;
|
||||
esac
|
||||
|
||||
# this represents a file
|
||||
# support "/path/to/file|5G"
|
||||
local fpath size oifs="$IFS"
|
||||
if [ "${input#*|}" != "${input}" ]; then
|
||||
size=${input##*|}
|
||||
fpath=${input%|*}
|
||||
else
|
||||
fpath=${input}
|
||||
size=5G
|
||||
fi
|
||||
|
||||
## loop devices are not namespaced. This is bad for containers.
|
||||
## it means that the output of 'losetup' may have the given $fpath
|
||||
## in it, but that may not represent this containers $fpath, but
|
||||
## another containers. To address that, we really need to
|
||||
## allow some uniq container-id to be expanded within path.
|
||||
## TODO: find a unique container-id that will be consistent for
|
||||
## this container throughout its lifetime and expand it
|
||||
## in the fpath.
|
||||
# fpath=${fpath//%{id}/$THAT_ID}
|
||||
|
||||
local found=""
|
||||
# parse through 'losetup -a' output, looking for this file
|
||||
# output is expected to look like:
|
||||
# /dev/loop0: [0807]:961814 (/tmp/my.img)
|
||||
found=$(losetup -a |
|
||||
awk 'BEGIN { found=0; }
|
||||
$3 == f { sub(/:$/,"",$1); print $1; found=found+1; }
|
||||
END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \
|
||||
f="($fpath)")
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "multiple devices found for $fpath: $found" 1>&2
|
||||
return 1;
|
||||
fi
|
||||
|
||||
[ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; }
|
||||
|
||||
if [ -n "$found" ]; then
|
||||
echo "confused, $found is not a block device for $fpath";
|
||||
return 1;
|
||||
fi
|
||||
|
||||
# no existing device was found, create one
|
||||
mkdir -p "${fpath%/*}"
|
||||
truncate --size "$size" "$fpath" ||
|
||||
{ echo "failed to create $fpath of size $size"; return 1; }
|
||||
|
||||
found=$(losetup --find --show "$fpath") ||
|
||||
{ echo "failed to setup loop device for $fpath" 1>&2; return 1; }
|
||||
|
||||
echo "$found"
|
||||
return 0
|
||||
}
|
||||
|
||||
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
|
||||
HAPROXY_DEFAULT=/etc/default/haproxy
|
||||
##########################################################################
|
||||
# Description: Configures HAProxy services for Openstack API's
|
||||
# Parameters:
|
||||
# Space delimited list of service:port:mode combinations for which
|
||||
# haproxy service configuration should be generated for. The function
|
||||
# assumes the name of the peer relation is 'cluster' and that every
|
||||
# service unit in the peer relation is running the same services.
|
||||
#
|
||||
# Services that do not specify :mode in parameter will default to http.
|
||||
#
|
||||
# Example
|
||||
# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http
|
||||
##########################################################################
|
||||
configure_haproxy() {
|
||||
local address=`unit-get private-address`
|
||||
local name=${JUJU_UNIT_NAME////-}
|
||||
cat > $HAPROXY_CFG << EOF
|
||||
global
|
||||
log 127.0.0.1 local0
|
||||
log 127.0.0.1 local1 notice
|
||||
maxconn 20000
|
||||
user haproxy
|
||||
group haproxy
|
||||
spread-checks 0
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
retries 3
|
||||
timeout queue 1000
|
||||
timeout connect 1000
|
||||
timeout client 30000
|
||||
timeout server 30000
|
||||
|
||||
listen stats :8888
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
stats realm Haproxy\ Statistics
|
||||
stats uri /
|
||||
stats auth admin:password
|
||||
|
||||
EOF
|
||||
for service in $@; do
|
||||
local service_name=$(echo $service | cut -d : -f 1)
|
||||
local haproxy_listen_port=$(echo $service | cut -d : -f 2)
|
||||
local api_listen_port=$(echo $service | cut -d : -f 3)
|
||||
local mode=$(echo $service | cut -d : -f 4)
|
||||
[[ -z "$mode" ]] && mode="http"
|
||||
juju-log "Adding haproxy configuration entry for $service "\
|
||||
"($haproxy_listen_port -> $api_listen_port)"
|
||||
cat >> $HAPROXY_CFG << EOF
|
||||
listen $service_name 0.0.0.0:$haproxy_listen_port
|
||||
balance roundrobin
|
||||
mode $mode
|
||||
option ${mode}log
|
||||
server $name $address:$api_listen_port check
|
||||
EOF
|
||||
local r_id=""
|
||||
local unit=""
|
||||
for r_id in `relation-ids cluster`; do
|
||||
for unit in `relation-list -r $r_id`; do
|
||||
local unit_name=${unit////-}
|
||||
local unit_address=`relation-get -r $r_id private-address $unit`
|
||||
if [ -n "$unit_address" ]; then
|
||||
echo " server $unit_name $unit_address:$api_listen_port check" \
|
||||
>> $HAPROXY_CFG
|
||||
fi
|
||||
done
|
||||
done
|
||||
done
|
||||
echo "ENABLED=1" > $HAPROXY_DEFAULT
|
||||
service haproxy restart
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Query HA interface to determine is cluster is configured
|
||||
# Returns: 0 if configured, 1 if not configured
|
||||
##########################################################################
|
||||
is_clustered() {
|
||||
local r_id=""
|
||||
local unit=""
|
||||
for r_id in $(relation-ids ha); do
|
||||
if [ -n "$r_id" ]; then
|
||||
for unit in $(relation-list -r $r_id); do
|
||||
clustered=$(relation-get -r $r_id clustered $unit)
|
||||
if [ -n "$clustered" ]; then
|
||||
juju-log "Unit is haclustered"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
juju-log "Unit is not haclustered"
|
||||
return 1
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Return a list of all peers in cluster relations
|
||||
##########################################################################
|
||||
peer_units() {
|
||||
local peers=""
|
||||
local r_id=""
|
||||
for r_id in $(relation-ids cluster); do
|
||||
peers="$peers $(relation-list -r $r_id)"
|
||||
done
|
||||
echo $peers
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Determines whether the current unit is the oldest of all
|
||||
# its peers - supports partial leader election
|
||||
# Returns: 0 if oldest, 1 if not
|
||||
##########################################################################
|
||||
oldest_peer() {
|
||||
peers=$1
|
||||
local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2)
|
||||
for peer in $peers; do
|
||||
echo "Comparing $JUJU_UNIT_NAME with peers: $peers"
|
||||
local r_unit_no=$(echo $peer | cut -d / -f 2)
|
||||
if (($r_unit_no<$l_unit_no)); then
|
||||
juju-log "Not oldest peer; deferring"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
juju-log "Oldest peer; might take charge?"
|
||||
return 0
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Determines whether the current service units is the
|
||||
# leader within a) a cluster of its peers or b) across a
|
||||
# set of unclustered peers.
|
||||
# Parameters: CRM resource to check ownership of if clustered
|
||||
# Returns: 0 if leader, 1 if not
|
||||
##########################################################################
|
||||
eligible_leader() {
|
||||
if is_clustered; then
|
||||
if ! is_leader $1; then
|
||||
juju-log 'Deferring action to CRM leader'
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
peers=$(peer_units)
|
||||
if [ -n "$peers" ] && ! oldest_peer "$peers"; then
|
||||
juju-log 'Deferring action to oldest service unit.'
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Query Cluster peer interface to see if peered
|
||||
# Returns: 0 if peered, 1 if not peered
|
||||
##########################################################################
|
||||
is_peered() {
|
||||
local r_id=$(relation-ids cluster)
|
||||
if [ -n "$r_id" ]; then
|
||||
if [ -n "$(relation-list -r $r_id)" ]; then
|
||||
juju-log "Unit peered"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
juju-log "Unit not peered"
|
||||
return 1
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Determines whether host is owner of clustered services
|
||||
# Parameters: Name of CRM resource to check ownership of
|
||||
# Returns: 0 if leader, 1 if not leader
|
||||
##########################################################################
|
||||
is_leader() {
|
||||
hostname=`hostname`
|
||||
if [ -x /usr/sbin/crm ]; then
|
||||
if crm resource show $1 | grep -q $hostname; then
|
||||
juju-log "$hostname is cluster leader."
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
juju-log "$hostname is not cluster leader."
|
||||
return 1
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Determines whether enough data has been provided in
|
||||
# configuration or relation data to configure HTTPS.
|
||||
# Parameters: None
|
||||
# Returns: 0 if HTTPS can be configured, 1 if not.
|
||||
##########################################################################
|
||||
https() {
|
||||
local r_id=""
|
||||
if [[ -n "$(config-get ssl_cert)" ]] &&
|
||||
[[ -n "$(config-get ssl_key)" ]] ; then
|
||||
return 0
|
||||
fi
|
||||
for r_id in $(relation-ids identity-service) ; do
|
||||
for unit in $(relation-list -r $r_id) ; do
|
||||
if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] &&
|
||||
[[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] &&
|
||||
[[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] &&
|
||||
[[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: For a given number of port mappings, configures apache2
|
||||
# HTTPs local reverse proxying using certficates and keys provided in
|
||||
# either configuration data (preferred) or relation data. Assumes ports
|
||||
# are not in use (calling charm should ensure that).
|
||||
# Parameters: Variable number of proxy port mappings as
|
||||
# $internal:$external.
|
||||
# Returns: 0 if reverse proxy(s) have been configured, 0 if not.
|
||||
##########################################################################
|
||||
enable_https() {
|
||||
local port_maps="$@"
|
||||
local http_restart=""
|
||||
juju-log "Enabling HTTPS for port mappings: $port_maps."
|
||||
|
||||
# allow overriding of keystone provided certs with those set manually
|
||||
# in config.
|
||||
local cert=$(config-get ssl_cert)
|
||||
local key=$(config-get ssl_key)
|
||||
local ca_cert=""
|
||||
if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then
|
||||
juju-log "Inspecting identity-service relations for SSL certificate."
|
||||
local r_id=""
|
||||
cert=""
|
||||
key=""
|
||||
ca_cert=""
|
||||
for r_id in $(relation-ids identity-service) ; do
|
||||
for unit in $(relation-list -r $r_id) ; do
|
||||
[[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)"
|
||||
[[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)"
|
||||
[[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)"
|
||||
done
|
||||
done
|
||||
[[ -n "$cert" ]] && cert=$(echo $cert | base64 -di)
|
||||
[[ -n "$key" ]] && key=$(echo $key | base64 -di)
|
||||
[[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di)
|
||||
else
|
||||
juju-log "Using SSL certificate provided in service config."
|
||||
fi
|
||||
|
||||
[[ -z "$cert" ]] || [[ -z "$key" ]] &&
|
||||
juju-log "Expected but could not find SSL certificate data, not "\
|
||||
"configuring HTTPS!" && return 1
|
||||
|
||||
apt-get -y install apache2
|
||||
a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" &&
|
||||
http_restart=1
|
||||
|
||||
mkdir -p /etc/apache2/ssl/$CHARM
|
||||
echo "$cert" >/etc/apache2/ssl/$CHARM/cert
|
||||
echo "$key" >/etc/apache2/ssl/$CHARM/key
|
||||
if [[ -n "$ca_cert" ]] ; then
|
||||
juju-log "Installing Keystone supplied CA cert."
|
||||
echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
|
||||
update-ca-certificates --fresh
|
||||
|
||||
# XXX TODO: Find a better way of exporting this?
|
||||
if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
|
||||
[[ -e /var/www/keystone_juju_ca_cert.crt ]] &&
|
||||
rm -rf /var/www/keystone_juju_ca_cert.crt
|
||||
ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \
|
||||
/var/www/keystone_juju_ca_cert.crt
|
||||
fi
|
||||
|
||||
fi
|
||||
for port_map in $port_maps ; do
|
||||
local ext_port=$(echo $port_map | cut -d: -f1)
|
||||
local int_port=$(echo $port_map | cut -d: -f2)
|
||||
juju-log "Creating apache2 reverse proxy vhost for $port_map."
|
||||
cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END
|
||||
Listen $ext_port
|
||||
NameVirtualHost *:$ext_port
|
||||
<VirtualHost *:$ext_port>
|
||||
ServerName $(unit-get private-address)
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/apache2/ssl/$CHARM/cert
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key
|
||||
ProxyPass / http://localhost:$int_port/
|
||||
ProxyPassReverse / http://localhost:$int_port/
|
||||
ProxyPreserveHost on
|
||||
</VirtualHost>
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
<Location />
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Location>
|
||||
END
|
||||
a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
|
||||
http_restart=1
|
||||
done
|
||||
if [[ -n "$http_restart" ]] ; then
|
||||
service apache2 restart
|
||||
fi
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Ensure HTTPS reverse proxying is disabled for given port
|
||||
# mappings.
|
||||
# Parameters: Variable number of proxy port mappings as
|
||||
# $internal:$external.
|
||||
# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error.
|
||||
##########################################################################
|
||||
disable_https() {
|
||||
local port_maps="$@"
|
||||
local http_restart=""
|
||||
juju-log "Ensuring HTTPS disabled for $port_maps."
|
||||
( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0
|
||||
for port_map in $port_maps ; do
|
||||
local ext_port=$(echo $port_map | cut -d: -f1)
|
||||
local int_port=$(echo $port_map | cut -d: -f2)
|
||||
if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then
|
||||
juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map."
|
||||
a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
|
||||
http_restart=1
|
||||
fi
|
||||
done
|
||||
if [[ -n "$http_restart" ]] ; then
|
||||
service apache2 restart
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
##########################################################################
|
||||
# Description: Ensures HTTPS is either enabled or disabled for given port
|
||||
# mapping.
|
||||
# Parameters: Variable number of proxy port mappings as
|
||||
# $internal:$external.
|
||||
# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not.
|
||||
##########################################################################
|
||||
setup_https() {
|
||||
# configure https via apache reverse proxying either
|
||||
# using certs provided by config or keystone.
|
||||
[[ -z "$CHARM" ]] &&
|
||||
error_out "setup_https(): CHARM not set."
|
||||
if ! https ; then
|
||||
disable_https $@
|
||||
else
|
||||
enable_https $@
|
||||
fi
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Determine correct API server listening port based on
|
||||
# existence of HTTPS reverse proxy and/or haproxy.
|
||||
# Paremeters: The standard public port for given service.
|
||||
# Returns: The correct listening port for API service.
|
||||
##########################################################################
|
||||
determine_api_port() {
|
||||
local public_port="$1"
|
||||
local i=0
|
||||
( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1]
|
||||
https >/dev/null 2>&1 && i=$[$i + 1]
|
||||
echo $[$public_port - $[$i * 10]]
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Determine correct proxy listening port based on public IP +
|
||||
# existence of HTTPS reverse proxy.
|
||||
# Paremeters: The standard public port for given service.
|
||||
# Returns: The correct listening port for haproxy service public address.
|
||||
##########################################################################
|
||||
determine_haproxy_port() {
|
||||
local public_port="$1"
|
||||
local i=0
|
||||
https >/dev/null 2>&1 && i=$[$i + 1]
|
||||
echo $[$public_port - $[$i * 10]]
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Print the value for a given config option in an OpenStack
|
||||
# .ini style configuration file.
|
||||
# Parameters: File path, option to retrieve, optional
|
||||
# section name (default=DEFAULT)
|
||||
# Returns: Prints value if set, prints nothing otherwise.
|
||||
##########################################################################
|
||||
local_config_get() {
|
||||
# return config values set in openstack .ini config files.
|
||||
# default placeholders starting (eg, %AUTH_HOST%) treated as
|
||||
# unset values.
|
||||
local file="$1"
|
||||
local option="$2"
|
||||
local section="$3"
|
||||
[[ -z "$section" ]] && section="DEFAULT"
|
||||
python -c "
|
||||
import ConfigParser
|
||||
config = ConfigParser.RawConfigParser()
|
||||
config.read('$file')
|
||||
try:
|
||||
value = config.get('$section', '$option')
|
||||
except:
|
||||
print ''
|
||||
exit(0)
|
||||
if value.startswith('%'): exit(0)
|
||||
print value
|
||||
"
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Creates an rc file exporting environment variables to a
|
||||
# script_path local to the charm's installed directory.
|
||||
# Any charm scripts run outside the juju hook environment can source this
|
||||
# scriptrc to obtain updated config information necessary to perform health
|
||||
# checks or service changes
|
||||
#
|
||||
# Parameters:
|
||||
# An array of '=' delimited ENV_VAR:value combinations to export.
|
||||
# If optional script_path key is not provided in the array, script_path
|
||||
# defaults to scripts/scriptrc
|
||||
##########################################################################
|
||||
function save_script_rc {
|
||||
if [ ! -n "$JUJU_UNIT_NAME" ]; then
|
||||
echo "Error: Missing JUJU_UNIT_NAME environment variable"
|
||||
exit 1
|
||||
fi
|
||||
# our default unit_path
|
||||
unit_path="$CHARM_DIR/scripts/scriptrc"
|
||||
echo $unit_path
|
||||
tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc"
|
||||
|
||||
echo "#!/bin/bash" > $tmp_rc
|
||||
for env_var in "${@}"
|
||||
do
|
||||
if `echo $env_var | grep -q script_path`; then
|
||||
# well then we need to reset the new unit-local script path
|
||||
unit_path="$CHARM_DIR/${env_var/script_path=/}"
|
||||
else
|
||||
echo "export $env_var" >> $tmp_rc
|
||||
fi
|
||||
done
|
||||
chmod 755 $tmp_rc
|
||||
mv $tmp_rc $unit_path
|
||||
}
|
243
hooks/misc_utils.py
Normal file
243
hooks/misc_utils.py
Normal file
@ -0,0 +1,243 @@
|
||||
# TODO: Promote all of this to charm-helpers, its shared with nova-compute
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
relation_get,
|
||||
unit_private_ip,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack import context
|
||||
|
||||
from charmhelpers.fetch import apt_install, filter_installed_packages
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import os_release
|
||||
|
||||
|
||||
def _save_flag_file(path, data):
|
||||
'''
|
||||
Saves local state about plugin or manager to specified file.
|
||||
'''
|
||||
# Wonder if we can move away from this now?
|
||||
with open(path, 'wb') as out:
|
||||
out.write(data)
|
||||
|
||||
|
||||
class NeutronContext(object):
|
||||
interfaces = []
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def network_manager(self):
|
||||
return network_manager()
|
||||
|
||||
@property
|
||||
def packages(self):
|
||||
return network_plugin_attribute(self.plugin, 'packages')
|
||||
|
||||
@property
|
||||
def neutron_security_groups(self):
|
||||
return None
|
||||
|
||||
def _ensure_packages(self):
|
||||
'''Install but do not upgrade required plugin packages'''
|
||||
required = filter_installed_packages(self.packages)
|
||||
if required:
|
||||
apt_install(required, fatal=True)
|
||||
|
||||
def ovs_ctxt(self):
|
||||
ovs_ctxt = {
|
||||
'neutron_plugin': 'ovs',
|
||||
# quantum.conf
|
||||
'core_plugin': network_plugin_attribute(self.plugin, 'driver'),
|
||||
# NOTE: network api class in template for each release.
|
||||
# nova.conf
|
||||
#'libvirt_vif_driver': n_driver,
|
||||
#'libvirt_use_virtio_for_bridges': True,
|
||||
# ovs config
|
||||
'local_ip': unit_private_ip(),
|
||||
}
|
||||
|
||||
if self.neutron_security_groups:
|
||||
ovs_ctxt['neutron_security_groups'] = True
|
||||
|
||||
return ovs_ctxt
|
||||
|
||||
def __call__(self):
|
||||
|
||||
if self.network_manager not in ['quantum', 'neutron']:
|
||||
return {}
|
||||
|
||||
if not self.plugin:
|
||||
return {}
|
||||
|
||||
self._ensure_packages()
|
||||
|
||||
ctxt = {'network_manager': self.network_manager}
|
||||
|
||||
if self.plugin == 'ovs':
|
||||
ctxt.update(self.ovs_ctxt())
|
||||
|
||||
_save_flag_file(path='/etc/nova/quantum_plugin.conf', data=self.plugin)
|
||||
_save_flag_file(path='/etc/nova/neutron_plugin.conf', data=self.plugin)
|
||||
return ctxt
|
||||
|
||||
|
||||
class NeutronComputeContext(NeutronContext):
|
||||
interfaces = []
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
return relation_get('neutron_plugin') or relation_get('quantum_plugin')
|
||||
|
||||
@property
|
||||
def network_manager(self):
|
||||
return relation_get('network_manager')
|
||||
|
||||
@property
|
||||
def neutron_security_groups(self):
|
||||
groups = [relation_get('neutron_security_groups'),
|
||||
relation_get('quantum_security_groups')]
|
||||
return ('yes' in groups or 'Yes' in groups)
|
||||
|
||||
def ovs_ctxt(self):
|
||||
ctxt = super(NeutronComputeContext, self).ovs_ctxt()
|
||||
if os_release('nova-common') == 'folsom':
|
||||
n_driver = 'nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver'
|
||||
else:
|
||||
n_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
|
||||
ctxt.update({
|
||||
'libvirt_vif_driver': n_driver,
|
||||
})
|
||||
return ctxt
|
||||
|
||||
|
||||
class NeutronCCContext(NeutronContext):
|
||||
interfaces = []
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
return neutron_plugin()
|
||||
|
||||
@property
|
||||
def network_manager(self):
|
||||
return network_manager()
|
||||
|
||||
@property
|
||||
def neutron_security_groups(self):
|
||||
sec_groups = (config('neutron-security-groups') or
|
||||
config('quantum-security-groups'))
|
||||
return sec_groups.lower() == 'yes'
|
||||
|
||||
|
||||
# legacy
|
||||
def quantum_plugins():
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/quantum/plugins/openvswitch/'
|
||||
'ovs_quantum_plugin.ini',
|
||||
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
||||
'OVSQuantumPluginV2',
|
||||
'contexts': [
|
||||
NeutronContext(),
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': ['quantum-plugin-openvswitch-agent'],
|
||||
'packages': ['quantum-plugin-openvswitch-agent',
|
||||
'openvswitch-datapath-dkms'],
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||
'QuantumPlugin.NvpPluginV2',
|
||||
'services': [],
|
||||
'packages': ['quantum-plugin-nicira'],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def neutron_plugins():
|
||||
return {
|
||||
'ovs': {
|
||||
'config': '/etc/neutron/plugins/openvswitch/'
|
||||
'ovs_neutron_plugin.ini',
|
||||
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
||||
'OVSNeutronPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': ['neutron-plugin-openvswitch-agent'],
|
||||
'packages': ['neutron-plugin-openvswitch-agent',
|
||||
'openvswitch-datapath-dkms'],
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||
'NeutronPlugin.NvpPluginV2',
|
||||
'services': [],
|
||||
'packages': ['neutron-plugin-nicira'],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def neutron_plugin():
|
||||
# quantum-plugin config setting can be safely overriden
|
||||
# as we only supported OVS in G/neutron
|
||||
return config('neutron-plugin') or config('quantum-plugin')
|
||||
|
||||
|
||||
def _net_manager_enabled(manager):
|
||||
manager = config('network-manager')
|
||||
if not manager:
|
||||
return False
|
||||
return manager.lower() == manager
|
||||
|
||||
|
||||
def network_plugin_attribute(plugin, attr):
|
||||
manager = network_manager()
|
||||
if manager == 'quantum':
|
||||
plugins = quantum_plugins()
|
||||
elif manager == 'neutron':
|
||||
plugins = neutron_plugins()
|
||||
else:
|
||||
log('Error: Network manager does not support plugins.')
|
||||
raise Exception
|
||||
try:
|
||||
_plugin = plugins[plugin]
|
||||
except KeyError:
|
||||
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
||||
raise
|
||||
try:
|
||||
return _plugin[attr]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
def network_manager():
|
||||
'''
|
||||
Deals with the renaming of Quantum to Neutron in H and any situations
|
||||
that require compatability (eg, deploying H with network-manager=quantum,
|
||||
upgrading from G).
|
||||
'''
|
||||
release = os_release('nova-common')
|
||||
manager = config('network-manager').lower()
|
||||
|
||||
if manager not in ['quantum', 'neutron']:
|
||||
return manager
|
||||
|
||||
if release in ['essex']:
|
||||
# E does not support neutron
|
||||
log('Neutron networking not supported in Essex.', level=ERROR)
|
||||
raise
|
||||
elif release in ['folsom', 'grizzly']:
|
||||
# neutron is named quantum in F and G
|
||||
return 'quantum'
|
||||
else:
|
||||
# ensure accurate naming for all releases post-H
|
||||
return 'neutron'
|
@ -1,293 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
CHARM="nova-cloud-controller"
|
||||
CONF_DIR="/etc/nova"
|
||||
|
||||
NOVA_CONF="/etc/nova/nova.conf"
|
||||
API_CONF="/etc/nova/api-paste.ini"
|
||||
QUANTUM_CONF="/etc/quantum/quantum.conf"
|
||||
QUANTUM_API_CONF="/etc/quantum/api-paste.ini"
|
||||
HOOKS_DIR="$CHARM_DIR/hooks"
|
||||
NET_MANAGER=$(config-get network-manager)
|
||||
|
||||
if [[ -e $HOOKS_DIR/lib/nova/nova-common ]] ; then
|
||||
. $HOOKS_DIR/lib/nova/nova-common
|
||||
else
|
||||
juju-log "Couldn't load $HOOKS_DIR/lib/nova/nova-common" && exit 1
|
||||
fi
|
||||
|
||||
function determine_services {
|
||||
# Sets the global $SERVICES which contains a list of all services
|
||||
# managed by the charm. This changes based on OpenStack release.
|
||||
# Currently, the services also determines what ends up in $PACKAGES.
|
||||
|
||||
# base c-c services supported across all os releases since essex.
|
||||
SERVICES="nova-api-ec2 nova-api-os-compute nova-objectstore nova-cert nova-scheduler"
|
||||
|
||||
# determine additional services, dependent on what version of OS.
|
||||
local install_src="$(config-get openstack-origin)"
|
||||
install_src=$(get_os_codename_install_source "$install_src")
|
||||
local os_vers=$(get_os_codename_package "nova-common")
|
||||
if [[ "$os_vers" == "none" ]] ; then
|
||||
[[ "$install_src" == "unknown" ]] && echo "$SERVICES" && return 0
|
||||
fi
|
||||
|
||||
os_vers="$install_src"
|
||||
if [[ "$os_vers" != "essex" ]] && [[ "$os_vers" != "folsom" ]] ; then
|
||||
# nova-conductor was introduced in grizzly.
|
||||
SERVICES="$SERVICES nova-conductor"
|
||||
else
|
||||
local n_vol=$(relation-ids nova-volume-service)
|
||||
if [[ -n "$n_vol" ]] ; then
|
||||
# nova-volume was dropped in G but may still be deployed for E + F,
|
||||
# but should only be managed when a relation to nova-volume exists.
|
||||
SERVICES="$SERVICES nova-api-os-volume"
|
||||
# need to also ensure the package gets installed here. if the relation
|
||||
# is introduced during another hook, a call to 'service_ctl all' will
|
||||
# require it to be there.
|
||||
dpkg -l | grep -q nova-api-os-volume ||
|
||||
apt-get -y install nova-api-os-volume
|
||||
fi
|
||||
fi
|
||||
|
||||
# quantum is really only supported for folsom and beyond.
|
||||
if [[ "$NET_MANAGER" == "Quantum" ]] ; then
|
||||
[[ "$os_vers" == "essex" ]] &&
|
||||
error_out "Quantum network manager only supported for Folsom + beyond."
|
||||
SERVICES="$SERVICES quantum-server"
|
||||
fi
|
||||
}
|
||||
|
||||
function determine_packages {
|
||||
# Derive a list of packages based on what our service needs are. This changes
|
||||
# depending on several factors.
|
||||
determine_services
|
||||
PACKAGES="$SERVICES python-mysqldb python-keystone uuid charm-helper-sh haproxy"
|
||||
|
||||
if echo $PACKAGES | grep -q "quantum-server" ; then
|
||||
case "$(config-get quantum-plugin)" in
|
||||
"ovs") PACKAGES="$PACKAGES quantum-plugin-openvswitch" ;;
|
||||
"nvp") PACKAGES="$PACKAGES quantum-plugin-nicira" ;;
|
||||
esac
|
||||
fi
|
||||
juju-log "$CHARM: Determined required packages: $PACKAGES."
|
||||
}
|
||||
|
||||
function determine_quantum_config {
|
||||
# Set QUANTUM_PLUGIN and point QUANTUM_CORE_PLUGIN and QUANTUM_PLUGIN_CONF
|
||||
# to the correct files based on configuration.
|
||||
QUANTUM_PLUGIN=${QUANTUM_PLUGIN:-$(config-get quantum-plugin)}
|
||||
case "$QUANTUM_PLUGIN" in
|
||||
"ovs")
|
||||
QUANTUM_CORE_PLUGIN="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
|
||||
QUANTUM_PLUGIN_CONF="/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini"
|
||||
;;
|
||||
"nvp")
|
||||
QUANTUM_CORE_PLUGIN="quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2"
|
||||
QUANTUM_PLUGIN_CONF="/etc/quantum/plugins/nicira/nvp.ini"
|
||||
;;
|
||||
*)
|
||||
juju-log "Unrecognised plugin for quantum: $QUANTUM_PLUGIN" && exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function configure_quantum_networking {
|
||||
determine_quantum_config
|
||||
if [ "$(config-get conf-ext-net)" != "no" ] &&
|
||||
[ -f /etc/quantum/novarc ] &&
|
||||
[ -n "$(relation-ids amqp)" ] &&
|
||||
[ -n "$(relation-ids shared-db)" ]; then
|
||||
juju-log "Configuring external networking for quantum"
|
||||
if eligible_leader "res_nova_vip"; then
|
||||
# Use helper to create external network gateway
|
||||
# and router using generated credentials
|
||||
. /etc/quantum/novarc
|
||||
quantum-ext-net -g $(config-get ext-net-gateway) \
|
||||
-c $(config-get ext-net-cidr) \
|
||||
-f $(config-get pool-floating-start):$(config-get pool-floating-end) \
|
||||
$(config-get ext-net-name)
|
||||
fi
|
||||
set_or_update "default_floating_pool" "$(config-get ext-net-name)"
|
||||
fi
|
||||
}
|
||||
|
||||
function ssh_authorized_keys {
|
||||
local key="$1"
|
||||
local action="$2"
|
||||
local exists=""
|
||||
|
||||
local sunit=$(echo $JUJU_REMOTE_UNIT | cut -d/ -f1)
|
||||
mkdir -p /etc/nova/compute_ssh/$sunit
|
||||
local authorized_keys="/etc/nova/compute_ssh/$sunit/authorized_keys"
|
||||
|
||||
[[ -e "$authorized_keys" ]] &&
|
||||
grep -q "^$key" $authorized_keys && exists="true"
|
||||
|
||||
if [[ "$action" == "add" ]] ; then
|
||||
[[ -n "$exists" ]] &&
|
||||
juju-log "$CHARM: SSH key already authorized for $JUJU_REMOTE_UNIT." &&
|
||||
return 0
|
||||
|
||||
echo "$key" >>$authorized_keys
|
||||
juju-log "$CHARM: Authorized new SSH key for $JUJU_REMOTE_UNIT."
|
||||
return 0
|
||||
elif [[ "$action" == "remove" ]] ; then
|
||||
# we have no way of getting to the relation state during a departed hook.
|
||||
# we only have the peer's unit name, so remove an authorized key based on
|
||||
# its comment, which should can be derived from the remote unit name and
|
||||
# gets passed in here from caller as key/$1
|
||||
local key_ln=$(sed -n "\, ${key}$,=" $authorized_keys)
|
||||
[[ -z "$key_ln" ]] &&
|
||||
juju-log "$CHARM: Cannot remove SSH key for $key, not authorized?" &&
|
||||
return 0
|
||||
|
||||
for ln in $key_ln ; do
|
||||
sed -i "${ln}d" $authorized_keys
|
||||
juju-log "$CHARM: Removed existing SSH key ($key) from authorized_keys."
|
||||
done
|
||||
return 0
|
||||
else
|
||||
error_out "$CHARM: ssh_authorize_keys() invalid action specified: $action."
|
||||
fi
|
||||
}
|
||||
|
||||
function ssh_known_hosts {
|
||||
# Keeps the system-wide SSH known hosts file up to date with compute
|
||||
# nodes host keys.
|
||||
local host="$1"
|
||||
local sunit=$(echo $JUJU_REMOTE_UNIT | cut -d/ -f1)
|
||||
mkdir -p /etc/nova/compute_ssh/$sunit
|
||||
local known_hosts="/etc/nova/compute_ssh/$sunit/known_hosts"
|
||||
juju-log "$CHARM: Ensuring host is included and up to date in $known_hosts."
|
||||
|
||||
[[ ! -e $known_hosts ]] && touch $known_hosts
|
||||
|
||||
local remote_key=""
|
||||
remote_key=$(ssh-keyscan -H -t rsa $host) ||
|
||||
error_out "$CHARM: Couldn't obtain SSH host key from $host."
|
||||
local existing=$(ssh-keygen -f $known_hosts -H -F $host | tail -n1)
|
||||
if [[ -n "$existing" ]] ; then
|
||||
juju-log "$CHARM: Found existing SSH known host key for $host."
|
||||
[[ "$existing" == "$remote_key" ]] && echo "HI"
|
||||
remote=$(echo $remote_key | awk '{ print $2" "$3 }')
|
||||
existing=$(echo $existing | awk '{ print $2" "$3 }')
|
||||
if [[ "$remote" == "$existing" ]] ; then
|
||||
juju-log "$CHARM: SSH known host key for $host is up to date."
|
||||
return 0
|
||||
fi
|
||||
juju-log "$CHARM: Removing outdated SSH host key for $host."
|
||||
ssh-keygen -f $known_hosts -R $host
|
||||
else
|
||||
juju-log "$CHARM: No known hosts entry for $host."
|
||||
fi
|
||||
juju-log "$CHARM: Adding new SSH known hosts entry for $host."
|
||||
echo $remote_key >>$known_hosts
|
||||
|
||||
}
|
||||
|
||||
function ssh_compute {
|
||||
if [[ "$1" == "add" ]] ; then
|
||||
local ssh_key=$(relation-get ssh_public_key)
|
||||
[[ -z "$ssh_key" ]] &&
|
||||
juju-log "$CHARM: ssh_compute peer not ready." && exit 0
|
||||
|
||||
ssh_authorized_keys "$ssh_key" "add"
|
||||
|
||||
# need to ensure known hosts entries for all possible addresses
|
||||
. /usr/share/charm-helper/sh/net.sh
|
||||
local known_hosts=""
|
||||
local private_address=$(relation-get private-address)
|
||||
known_hosts="$private_address"
|
||||
if ! ch_is_ip "$private_address" ; then
|
||||
known_hosts="$known_hosts $(get_ip $private_address)"
|
||||
known_hosts="$known_hosts $(echo $private_address | cut -d. -f1)"
|
||||
fi
|
||||
for host in $known_hosts ; do
|
||||
ssh_known_hosts "$host"
|
||||
done
|
||||
elif [[ "$1" == "remove" ]] ; then
|
||||
# remove key by referencing remote unit, not entire key.
|
||||
local remote_unit=$(echo $JUJU_REMOTE_UNIT | sed -e 's,/,-,g')
|
||||
ssh_authorized_keys "$remote_unit" remove
|
||||
else
|
||||
error_out "ssh_compute: Invalid parameter: $1."
|
||||
fi
|
||||
|
||||
local sunit=$(echo $JUJU_REMOTE_UNIT | cut -d/ -f1)
|
||||
|
||||
# base64 encodings should trigger new relation events as needed.
|
||||
relation-set \
|
||||
known_hosts="$(base64 /etc/nova/compute_ssh/$sunit/known_hosts)" \
|
||||
authorized_keys="$(base64 /etc/nova/compute_ssh/$sunit/authorized_keys)"
|
||||
}
|
||||
|
||||
configure_https() {
|
||||
# setup https termination for all api services, depending on what is running
|
||||
# and topology of current deployment.
|
||||
local clustered=""
|
||||
( [[ -n "$(peer_units)" ]] || is_clustered ) && clustered="1"
|
||||
local services=""
|
||||
local ssl_port_maps=""
|
||||
local haproxy_port_maps=""
|
||||
local next_server=""
|
||||
local api_port=""
|
||||
|
||||
# upstartService:defaultPort:configOption
|
||||
local svcs="nova-api-ec2:8773:ec2_listen_port
|
||||
nova-api-os-compute:8774:osapi_compute_listen_port
|
||||
nova-objectstore:3333:s3_listen_port"
|
||||
[[ "$NET_MANAGER" == "Quantum" ]] &&
|
||||
svcs="$svcs quantum-server:9696:bind_port"
|
||||
|
||||
for s in $svcs ; do
|
||||
local service=$(echo $s | cut -d: -f1)
|
||||
local port=$(echo $s | cut -d: -f2)
|
||||
local opt=$(echo $s | cut -d: -f3)
|
||||
if [[ -n "$clustered" ]] ; then
|
||||
next_server="$(determine_haproxy_port $port)"
|
||||
api_port="$(determine_api_port $port)"
|
||||
haproxy_port_maps="$haproxy_port_maps $service:$next_server:$api_port"
|
||||
else
|
||||
api_port="$(determine_api_port $port)"
|
||||
next_server="$api_port"
|
||||
fi
|
||||
if [[ "$service" == "quantum-server" ]] ; then
|
||||
set_or_update "$opt" "$api_port" "$QUANTUM_CONF"
|
||||
else
|
||||
set_or_update "$opt" "$api_port"
|
||||
fi
|
||||
ssl_port_maps="$ssl_port_maps $port:$next_server"
|
||||
done
|
||||
|
||||
# make sure all backend api servers are bound to new backend port
|
||||
# before setting up any frontends.
|
||||
for s in $svcs ; do
|
||||
local service=$(echo $s | cut -d: -f1)
|
||||
service_ctl $service restart
|
||||
done
|
||||
|
||||
[[ -n "$haproxy_port_maps" ]] && configure_haproxy $haproxy_port_maps
|
||||
setup_https $ssl_port_maps
|
||||
|
||||
# another restart to ensure api servers are now bound to frontend ports
|
||||
# that may have just been disabled.
|
||||
for s in $svcs ; do
|
||||
local service=$(echo $s | cut -d: -f1)
|
||||
service_ctl $service restart
|
||||
done
|
||||
|
||||
local r_id=""
|
||||
# (re)configure ks endpoint accordingly
|
||||
for r_id in $(relation-ids identity-service) ; do
|
||||
keystone_joined "$r_id"
|
||||
done
|
||||
# pass on possibly updated quantum URL + ca_cert to compute nodes.
|
||||
for r_id in $(relation-ids cloud-compute) ; do
|
||||
compute_joined "$r_id"
|
||||
done
|
||||
# update the quantum relation, as well.
|
||||
for r_id in $(relation-ids quantum-network-service) ; do
|
||||
quantum_joined "$r_id"
|
||||
done
|
||||
}
|
@ -1,698 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
HOOKS_DIR="$CHARM_DIR/hooks"
|
||||
arg0=$(basename $0)
|
||||
|
||||
if [[ -e $HOOKS_DIR/nova-cloud-controller-common ]] ; then
|
||||
. $HOOKS_DIR/nova-cloud-controller-common
|
||||
else
|
||||
juju-log "ERROR: Could not load nova-cloud-controller-common from $HOOKS_DIR"
|
||||
fi
|
||||
|
||||
function install_hook {
|
||||
juju-log "$CHARM: Installing nova packages"
|
||||
apt-get -y install python-software-properties || exit 1
|
||||
configure_install_source "$(config-get openstack-origin)"
|
||||
apt-get update || exit 1
|
||||
|
||||
determine_packages
|
||||
DEBIAN_FRONTEND=noninteractive apt-get -y \
|
||||
install --no-install-recommends $PACKAGES || exit 1
|
||||
|
||||
if [[ "$NET_MANAGER" == "Quantum" ]] ; then
|
||||
determine_quantum_config
|
||||
fi
|
||||
configure_network_manager $NET_MANAGER
|
||||
|
||||
# Configure any flags specified in deployment config
|
||||
set_config_flags
|
||||
|
||||
# Open up the various API endpoints
|
||||
# EC2
|
||||
open-port 8773
|
||||
# osapi-compute
|
||||
open-port 8774
|
||||
# object-store / s3
|
||||
open-port 3333
|
||||
# Quantum API if configured
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
open-port 9696
|
||||
fi
|
||||
|
||||
# Helpers for creating external and tenant networks
|
||||
cp files/create_ext_net.py /usr/bin/quantum-ext-net
|
||||
cp files/create_tenant_net.py /usr/bin/quantum-tenant-net
|
||||
|
||||
service_ctl all stop
|
||||
configure_https
|
||||
}
|
||||
|
||||
function upgrade_charm {
|
||||
install_hook
|
||||
service_ctl all start
|
||||
}
|
||||
|
||||
function config_changed {
|
||||
|
||||
# Determine whether or not we should do an upgrade, based on whether or not
|
||||
# the version offered in openstack-origin is greater than what is installed.
|
||||
|
||||
local install_src=$(config-get openstack-origin)
|
||||
local cur=$(get_os_codename_package "nova-common")
|
||||
local available=$(get_os_codename_install_source "$install_src")
|
||||
|
||||
if dpkg --compare-versions $(get_os_version_codename "$cur") lt \
|
||||
$(get_os_version_codename "$available") ; then
|
||||
juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available."
|
||||
determine_packages
|
||||
do_openstack_upgrade "$install_src" $PACKAGES
|
||||
fi
|
||||
|
||||
set_config_flags
|
||||
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
configure_quantum_networking
|
||||
fi
|
||||
|
||||
determine_services
|
||||
service_ctl all restart
|
||||
|
||||
# Save our scriptrc env variables for health checks
|
||||
declare -a env_vars=(
|
||||
"OPENSTACK_PORT_MCASTPORT=$(config-get ha-mcastport)"
|
||||
'OPENSTACK_SERVICE_API_EC2=nova-api-ec2'
|
||||
'OPENSTACK_SERVICE_API_OS_COMPUTE=nova-api-os-compute'
|
||||
'OPENSTACK_SERVICE_CERT=nova-cert'
|
||||
'OPENSTACK_SERVICE_CONDUCTOR=nova-conductor'
|
||||
'OPENSTACK_SERVICE_OBJECTSTORE=nova-objectstore'
|
||||
'OPENSTACK_SERVICE_SCHEDULER=nova-scheduler')
|
||||
save_script_rc ${env_vars[@]}
|
||||
configure_https
|
||||
}
|
||||
|
||||
function amqp_joined {
|
||||
# we request a username on the rabbit queue
|
||||
# and store it in nova.conf. our response is its IP + PASSWD
|
||||
# but we configure that in _changed
|
||||
local rabbit_user=$(config-get rabbit-user)
|
||||
local rabbit_vhost=$(config-get rabbit-vhost)
|
||||
juju-log "$CHARM - amqp_joined: requesting credentials for $rabbit_user"
|
||||
relation-set username=$rabbit_user
|
||||
relation-set vhost=$rabbit_vhost
|
||||
}
|
||||
|
||||
function amqp_changed {
|
||||
# server creates our credentials and tells us where
|
||||
# to connect. for now, using default vhost '/'
|
||||
local rabbit_host=$(relation-get private-address)
|
||||
local rabbit_password=$(relation-get password)
|
||||
|
||||
if [[ -z $rabbit_host ]] || \
|
||||
[[ -z $rabbit_password ]] ; then
|
||||
juju-log "$CHARM - amqp_changed: rabbit_host||rabbit_password not set."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# if the rabbitmq service is clustered among nodes with hacluster,
|
||||
# point to its vip instead of its private-address.
|
||||
local clustered=$(relation-get clustered)
|
||||
if [[ -n "$clustered" ]] ; then
|
||||
juju-log "$CHARM - ampq_changed: Configuring for "\
|
||||
"access to haclustered rabbitmq service."
|
||||
local vip=$(relation-get vip)
|
||||
[[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered but no vip."\
|
||||
&& exit 0
|
||||
rabbit_host="$vip"
|
||||
fi
|
||||
|
||||
local rabbit_user=$(config-get rabbit-user)
|
||||
local rabbit_vhost=$(config-get rabbit-vhost)
|
||||
juju-log "$CHARM - amqp_changed: Setting rabbit config in nova.conf: " \
|
||||
"$rabbit_user@$rabbit_host/$rabbit_vhost"
|
||||
set_or_update rabbit_host $rabbit_host
|
||||
set_or_update rabbit_userid $rabbit_user
|
||||
set_or_update rabbit_password $rabbit_password
|
||||
set_or_update rabbit_virtual_host $rabbit_vhost
|
||||
|
||||
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
||||
set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF"
|
||||
set_or_update rabbit_userid "$rabbit_user" "$QUANTUM_CONF"
|
||||
set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF"
|
||||
set_or_update rabbit_virtual_host "$rabbit_vhost" "$QUANTUM_CONF"
|
||||
fi
|
||||
|
||||
determine_services && service_ctl all restart
|
||||
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
configure_quantum_networking
|
||||
fi
|
||||
}
|
||||
|
||||
function db_joined {
|
||||
# tell mysql provider which database we want. it will create it and give us
|
||||
# credentials
|
||||
local nova_db=$(config-get nova-db)
|
||||
local db_user=$(config-get db-user)
|
||||
local hostname=$(unit-get private-address)
|
||||
juju-log "$CHARM - db_joined: requesting database access to $nova_db for "\
|
||||
"$db_user@$hostname"
|
||||
relation-set nova_database=$nova_db nova_username=$db_user nova_hostname=$hostname
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
relation-set quantum_database=quantum quantum_username=quantum quantum_hostname=$hostname
|
||||
fi
|
||||
}
|
||||
|
||||
function db_changed {
|
||||
local db_host=`relation-get db_host`
|
||||
local db_password=`relation-get nova_password`
|
||||
|
||||
if [[ -z $db_host ]] || [[ -z $db_password ]] ; then
|
||||
juju-log "$CHARM - db_changed: db_host||db_password set, will retry."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
local nova_db=$(config-get nova-db)
|
||||
local db_user=$(config-get db-user)
|
||||
juju-log "$CHARM - db_changed: Configuring nova.conf for access to $nova_db"
|
||||
|
||||
set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$nova_db"
|
||||
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
local quantum_db_password=`relation-get quantum_password`
|
||||
determine_quantum_config
|
||||
set_or_update sql_connection "mysql://quantum:$quantum_db_password@$db_host/quantum?charset=utf8" \
|
||||
$QUANTUM_PLUGIN_CONF "DATABASE"
|
||||
fi
|
||||
|
||||
eligible_leader 'res_nova_vip' && /usr/bin/nova-manage db sync
|
||||
|
||||
determine_services
|
||||
service_ctl all restart
|
||||
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
configure_quantum_networking
|
||||
fi
|
||||
|
||||
trigger_remote_service_restarts
|
||||
}
|
||||
|
||||
function image-service_changed {
|
||||
local api_server=$(relation-get glance-api-server)
|
||||
[[ -z $api_server ]] &&
|
||||
juju-log "$CHARM - image-service_changed: Peer not ready?" && exit 0
|
||||
|
||||
if [[ "$(get_os_codename_package nova-common)" == "essex" ]] ; then
|
||||
# essex needs glance_api_servers urls stripped of protocol.
|
||||
api_server="$(echo $api_server | awk '{gsub(/http:\/\/|https:\/\//,"")}1')"
|
||||
fi
|
||||
|
||||
set_or_update glance_api_servers $api_server
|
||||
set_or_update image_service "nova.image.glance.GlanceImageService"
|
||||
determine_services && service_ctl all restart
|
||||
}
|
||||
|
||||
function keystone_joined {
|
||||
# we need to get two entries into keystone's catalog, nova + ec2
|
||||
# group, them by prepending $service_ to each setting. the keystone
|
||||
# charm will assemble settings into corresponding catalog entries
|
||||
eligible_leader 'res_nova_vip' || return 0
|
||||
|
||||
is_clustered && local host=$(config-get vip) ||
|
||||
local host=$(unit-get private-address)
|
||||
|
||||
if [[ "$arg0" == "identity-service-relation-joined" ]] ; then
|
||||
# determine https status based only on config at this point,
|
||||
# insepcting KS relation is not reliable. if KS has mulitple
|
||||
# units, multiple relation-joineds are fired, resulting in the
|
||||
# endpoint being configured in catalog as https before https
|
||||
# is actually setup on this end. ends with failure to configure
|
||||
# quantum network, if its enabled.
|
||||
# if specified in config, https will have already been setup in
|
||||
# install or config-changed.
|
||||
if [[ -n "$(config-get ssl_cert)" ]] &&
|
||||
[[ -n "$(config-get ssl_key)" ]] ; then
|
||||
local scheme="https"
|
||||
else
|
||||
local scheme="http"
|
||||
fi
|
||||
else
|
||||
# this function is called from other hook contexts, use normal method
|
||||
# for determining https
|
||||
https && scheme="https" || scheme="http"
|
||||
fi
|
||||
|
||||
local nova_url="$scheme://$host:8774/v1.1/\$(tenant_id)s"
|
||||
local ec2_url="$scheme://$host:8773/services/Cloud"
|
||||
local s3_url="$scheme://$host:3333"
|
||||
local region="$(config-get region)"
|
||||
local quantum_url="$scheme://$host:9696"
|
||||
|
||||
# these are the default endpoints
|
||||
relation-set nova_service="nova" \
|
||||
nova_region="$region" \
|
||||
nova_public_url="$nova_url" \
|
||||
nova_admin_url="$nova_url" \
|
||||
nova_internal_url="$nova_url" \
|
||||
ec2_service="ec2" \
|
||||
ec2_region="$region" \
|
||||
ec2_public_url="$ec2_url" \
|
||||
ec2_admin_url="$ec2_url" \
|
||||
ec2_internal_url="$ec2_url" \
|
||||
s3_service="s3" \
|
||||
s3_region="$region" \
|
||||
s3_public_url="$s3_url" \
|
||||
s3_admin_url="$s3_url" \
|
||||
s3_internal_url="$s3_url"
|
||||
|
||||
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
||||
relation-set quantum_service="quantum" \
|
||||
quantum_region="$region" \
|
||||
quantum_public_url="$quantum_url" \
|
||||
quantum_admin_url="$quantum_url" \
|
||||
quantum_internal_url="$quantum_url"
|
||||
fi
|
||||
|
||||
# tack on an endpoint for nova-volume a relation exists.
|
||||
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
|
||||
nova_vol_url="$scheme://$host:8776/v1/\$(tenant_id)s"
|
||||
relation-set nova-volume_service="nova-volume" \
|
||||
nova-volume_region="$region" \
|
||||
nova-volume_public_url="$nova_vol_url" \
|
||||
nova-volume_admin_url="$nova_vol_url" \
|
||||
nova-volume_internal_url="$nova_vol_url"
|
||||
fi
|
||||
}
|
||||
|
||||
function keystone_changed {
|
||||
token=$(relation-get admin_token)
|
||||
service_port=$(relation-get service_port)
|
||||
auth_port=$(relation-get auth_port)
|
||||
service_username=$(relation-get service_username)
|
||||
service_password=$(relation-get service_password)
|
||||
service_tenant=$(relation-get service_tenant)
|
||||
region=$(config-get region)
|
||||
|
||||
[[ -z "$token" ]] || [[ -z "$service_port" ]] || [[ -z "$auth_port" ]] ||
|
||||
[[ -z "$service_username" ]] || [[ -z "$service_password" ]] ||
|
||||
[[ -z "$service_tenant" ]] &&
|
||||
juju-log "$CHARM - keystone_changed: Peer not ready" && exit 0
|
||||
|
||||
[[ "$token" == "-1" ]] &&
|
||||
juju-log "$CHARM - keystone_changed: admin token error" && exit 1
|
||||
|
||||
# No need to update paste deploy pipelines, just set a flag in nova.conf
|
||||
set_or_update "auth_strategy" "keystone"
|
||||
|
||||
# Update keystone authentication configuration
|
||||
service_host=$(relation-get service_host)
|
||||
auth_host=$(relation-get auth_host)
|
||||
set_or_update "keystone_ec2_url" "http://$service_host:$service_port/v2.0/ec2tokens"
|
||||
|
||||
if grep -q use_deprecated_auth $NOVA_CONF ; then
|
||||
juju-log "$CHARM - keystone_changed: Disabling '--use_deprecated_auth"
|
||||
sed -i '/--use_deprecated_auth/d' $NOVA_CONF
|
||||
fi
|
||||
|
||||
local clustered=""
|
||||
is_clustered && clustered="1"
|
||||
|
||||
[[ -n "$clustered" ]] && local host=$(config-get vip) ||
|
||||
local host=$(unit-get private-address)
|
||||
https && local scheme="https" || local scheme="http"
|
||||
|
||||
# update keystone authtoken settings accordingly
|
||||
set_or_update "service_host" "$service_host" "$API_CONF"
|
||||
set_or_update "service_port" "$service_port" "$API_CONF"
|
||||
set_or_update "auth_host" "$auth_host" "$API_CONF"
|
||||
set_or_update "auth_port" "$auth_port" "$API_CONF"
|
||||
# XXX http hard-coded
|
||||
set_or_update "auth_uri" "http://$service_host:$service_port/" "$API_CONF"
|
||||
set_or_update "admin_token" "$token" "$API_CONF"
|
||||
set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF"
|
||||
set_or_update "admin_user" "$service_username" "$API_CONF"
|
||||
set_or_update "admin_password" "$service_password" "$API_CONF"
|
||||
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
# Configure Nova for quantum
|
||||
keystone_url="http://${auth_host}:${auth_port}/v2.0"
|
||||
set_or_update "quantum_url" "$scheme://$host:9696"
|
||||
set_or_update "quantum_admin_tenant_name" "${service_tenant}"
|
||||
set_or_update "quantum_admin_username" "${service_username}"
|
||||
set_or_update "quantum_admin_password" "${service_password}"
|
||||
set_or_update "quantum_admin_auth_url" "${keystone_url}"
|
||||
# Configure API server for quantum
|
||||
set_or_update "admin_tenant_name" "$service_tenant" "$QUANTUM_API_CONF" "filter:authtoken"
|
||||
set_or_update "admin_user" "$service_username" "$QUANTUM_API_CONF" "filter:authtoken"
|
||||
set_or_update "admin_password" "$service_password" "$QUANTUM_API_CONF" "filter:authtoken"
|
||||
set_or_update "auth_host" "$auth_host" "$QUANTUM_API_CONF" "filter:authtoken"
|
||||
set_or_update "auth_port" "$auth_port" "$QUANTUM_API_CONF" "filter:authtoken"
|
||||
# Save a local copy of the credentials for later use
|
||||
cat > /etc/quantum/novarc << EOF
|
||||
export OS_USERNAME=${service_username}
|
||||
export OS_PASSWORD=${service_password}
|
||||
export OS_TENANT_NAME=${service_tenant}
|
||||
export OS_AUTH_URL=${keystone_url}
|
||||
export OS_REGION_NAME=$region
|
||||
EOF
|
||||
fi
|
||||
|
||||
determine_services && service_ctl all restart
|
||||
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
# if first time here, config quantum before setting up
|
||||
# https.
|
||||
configure_quantum_networking
|
||||
# ripple out changes to identity to connected services
|
||||
# which use cloud-controller as source of information for
|
||||
# keystone
|
||||
local r_ids="$(relation-ids cloud-compute) $(relation-ids quantum-network-service)"
|
||||
for id in $r_ids ; do
|
||||
relation-set -r $id \
|
||||
keystone_host=$auth_host \
|
||||
auth_port=$auth_port \
|
||||
service_port=$service_port \
|
||||
service_username=$service_username \
|
||||
service_password=$service_password \
|
||||
service_tenant=$service_tenant \
|
||||
region=$region \
|
||||
# XXX http hard-coded
|
||||
auth_uri="http://$service_host:$service_port/"
|
||||
|
||||
done
|
||||
fi
|
||||
configure_https
|
||||
|
||||
# if this changed event happens as a result of clustered VIP
|
||||
# reconfigure, configure_https needs to update VIP certificate
|
||||
# before quantumclient is used.
|
||||
if [[ "$NET_MANAGER" == "Quantum" ]]; then
|
||||
configure_quantum_networking
|
||||
fi
|
||||
}
|
||||
|
||||
volume_joined() {
|
||||
local svc=""
|
||||
case "$arg0" in
|
||||
"cinder-volume-service-relation-joined") svc="cinder" ;;
|
||||
"nova-volume-service-relation-joined") svc="nova-volume" ;;
|
||||
*) svc="nova-volume" ;;
|
||||
esac
|
||||
|
||||
local cur_vers=$(get_os_codename_package "nova-common")
|
||||
if [[ "$cur_vers" != "essex" ]] && [[ "$cur_vers" != "folsom" ]] &&
|
||||
[[ "$svc" == "nova-volume" ]] ; then
|
||||
juju-log "$CHARM: WARNING nova-volume is only supported on Essex "\
|
||||
"and Folsom. Ignoring new relation to nova-volume service."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
configure_volume_service "$svc"
|
||||
determine_services && service_ctl all restart
|
||||
|
||||
# The nova-volume API can be hosted here alongside the other
|
||||
# nova API services, but there needs to be a new endpoint
|
||||
# configured in keystone.
|
||||
if [[ "$svc" == "nova-volume" ]] ; then
|
||||
apt-get -y install nova-api-os-volume
|
||||
local nova_vol_url="http://$(unit-get private-address):8776/v1/\$(tenant_id)s"
|
||||
local r_ids=$(relation-ids identity-service)
|
||||
for id in $r_ids ; do
|
||||
juju-log "$CHARM: Registering new endpoint for nova-volume API on "\
|
||||
"existing identity-service relation: $id"
|
||||
nova_vol_url="http://$(unit-get private-address):8776/v1/\$(tenant_id)s"
|
||||
relation-set -r $id nova-volume_service="nova-volume" \
|
||||
nova-volume_region="$(config-get region)" \
|
||||
nova-volume_public_url="$nova_vol_url" \
|
||||
nova-volume_admin_url="$nova_vol_url" \
|
||||
nova-volume_internal_url="$nova_vol_url"
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ "$svc" == "cinder" ]] ; then
|
||||
# Compute nodes need to be notified to set their volume
|
||||
# driver accordingly.
|
||||
r_ids=$(relation-ids cloud-compute)
|
||||
for id in $r_ids ; do
|
||||
relation-set -r $id volume_service="cinder"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
compute_joined() {
|
||||
local r_id="$1"
|
||||
[[ -n "$r_id" ]] && r_id="-r $r_id"
|
||||
eligible_leader 'res_nova_vip' || return 0
|
||||
relation-set $r_id network_manager=$(config-get network-manager)
|
||||
# XXX Should point to VIP if clustered, or this may not even be needed.
|
||||
relation-set $r_id ec2_host=$(unit-get private-address)
|
||||
|
||||
local sect="filter:authtoken"
|
||||
keystone_host=$(local_config_get $API_CONF auth_host $sect)
|
||||
|
||||
if [ "$NET_MANAGER" == "Quantum" ]; then
|
||||
if [[ -n "$keystone_host" ]]; then
|
||||
relation-set $r_id \
|
||||
keystone_host=$keystone_host \
|
||||
auth_port=$(local_config_get $API_CONF auth_port $sect) \
|
||||
service_port=$(local_config_get $API_CONF service_port $sect) \
|
||||
service_username=$(local_config_get $API_CONF admin_user $sect) \
|
||||
service_password=$(local_config_get $API_CONF admin_password $sect) \
|
||||
service_tenant=$(local_config_get $API_CONF admin_tenant_name $sect) \
|
||||
auth_uri=$(local_config_get $API_CONF auth_uri $sect)
|
||||
fi
|
||||
is_clustered && local host=$(config-get vip) ||
|
||||
local host=$(unit-get private-address)
|
||||
https && local scheme="https" || local scheme="http"
|
||||
local quantum_url="$scheme://$host:9696"
|
||||
|
||||
relation-set $r_id quantum_url=$quantum_url \
|
||||
quantum_plugin=$(config-get quantum-plugin) \
|
||||
region=$(config-get region) \
|
||||
quantum_security_groups=$(config-get quantum-security-groups)
|
||||
|
||||
fi
|
||||
|
||||
# must pass on the keystone CA certficiate, if it exists.
|
||||
cert="/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt"
|
||||
if [[ -n "$keystone_host" ]] && [[ -e $cert ]] ; then
|
||||
cert=$(cat $cert | base64)
|
||||
relation-set $r_id ca_cert="$cert"
|
||||
fi
|
||||
|
||||
# volume driver is dependent on os version, or presence
|
||||
# of cinder (on folsom, at least)
|
||||
local cur_vers=$(get_os_codename_package "nova-common")
|
||||
local vol_drv="cinder"
|
||||
case "$cur_vers" in
|
||||
"essex")
|
||||
vol_drv="nova-volume"
|
||||
;;
|
||||
"folsom")
|
||||
[[ -z "$(relation-ids cinder-volume-service)" ]] && vol_drv="nova-volume"
|
||||
;;
|
||||
esac
|
||||
relation-set $r_id volume_service="$vol_drv"
|
||||
}
|
||||
|
||||
compute_changed() {
|
||||
local migration_auth="$(relation-get migration_auth_type)"
|
||||
[[ -z "$migration_auth" ]] &&
|
||||
juju-log "$CHARM: compute_changed - Peer not ready or "\
|
||||
"no migration auth. configured." && exit 0
|
||||
|
||||
case "$migration_auth" in
|
||||
"ssh") ssh_compute add ;;
|
||||
esac
|
||||
}
|
||||
|
||||
compute_departed() {
|
||||
ssh_compute remove
|
||||
}
|
||||
|
||||
function quantum_joined() {
|
||||
# Tell quantum service about keystone
|
||||
eligible_leader || return 0
|
||||
local r_id="$1"
|
||||
[[ -n "$r_id" ]] && r_id="-r $r_id"
|
||||
|
||||
local sect="filter:authtoken"
|
||||
keystone_host=$(local_config_get $API_CONF auth_host $sect)
|
||||
if [ -n "$keystone_host" ]; then
|
||||
relation-set $r_id \
|
||||
keystone_host=$keystone_host \
|
||||
auth_port=$(local_config_get $API_CONF auth_port $sect) \
|
||||
service_port=$(local_config_get $API_CONF service_port $sect) \
|
||||
service_username=$(local_config_get $API_CONF admin_user $sect) \
|
||||
service_password=$(local_config_get $API_CONF admin_password $sect) \
|
||||
service_tenant=$(local_config_get $API_CONF admin_tenant_name $sect) \
|
||||
auth_uri=$(local_config_get $API_CONF auth_uri $sect)
|
||||
fi
|
||||
|
||||
# must pass on the keystone CA certficiate, if it exists.
|
||||
cert="/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt"
|
||||
if [[ -n "$keystone_host" ]] && [[ -e $cert ]] ; then
|
||||
cert=$(cat $cert | base64)
|
||||
relation-set $r_id ca_cert="$cert"
|
||||
fi
|
||||
|
||||
is_clustered && local host=$(config-get vip) ||
|
||||
local host=$(unit-get private-address)
|
||||
https && local scheme="https" || local scheme="http"
|
||||
local quantum_url="$scheme://$host:9696"
|
||||
|
||||
relation-set $r_id quantum_host="$host" quantum_port="9696" \
|
||||
quantum_url=$quantum_url \
|
||||
quantum_plugin=$(config-get quantum-plugin) \
|
||||
region=$(config-get region)
|
||||
|
||||
}
|
||||
|
||||
function cluster_changed() {
|
||||
[[ -z "$(peer_units)" ]] &&
|
||||
juju-log "cluster_changed() with no peers." && exit 0
|
||||
# upstartService:defaultPort:configOption
|
||||
local svcs="nova-api-ec2:8773:ec2_listen_port
|
||||
nova-api-os-compute:8774:osapi_compute_listen_port
|
||||
nova-objectstore:3333:s3_listen_port"
|
||||
[[ "$NET_MANAGER" == "Quantum" ]] &&
|
||||
svcs="$svcs quantum-server:9696:bind_port"
|
||||
|
||||
for s in $svcs ; do
|
||||
local service=$(echo $s | cut -d: -f1)
|
||||
local port=$(echo $s | cut -d: -f2)
|
||||
local opt=$(echo $s | cut -d: -f3)
|
||||
local next_server="$(determine_haproxy_port $port)"
|
||||
local api_port="$(determine_api_port $port)"
|
||||
local haproxy_port_maps="$haproxy_port_maps $service:$next_server:$api_port:http"
|
||||
if [[ "$service" == "quantum-server" ]] ; then
|
||||
set_or_update "$opt" "$api_port" "$QUANTUM_CONF"
|
||||
else
|
||||
set_or_update "$opt" "$api_port"
|
||||
fi
|
||||
|
||||
service_ctl $service restart
|
||||
done
|
||||
configure_haproxy $haproxy_port_maps
|
||||
}
|
||||
|
||||
function ha_relation_joined() {
|
||||
local corosync_bindiface=`config-get ha-bindiface`
|
||||
local corosync_mcastport=`config-get ha-mcastport`
|
||||
local vip=`config-get vip`
|
||||
local vip_iface=`config-get vip_iface`
|
||||
local vip_cidr=`config-get vip_cidr`
|
||||
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
|
||||
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
|
||||
[ -n "$corosync_mcastport" ]; then
|
||||
# TODO: This feels horrible but the data required by the hacluster
|
||||
# charm is quite complex and is python ast parsed.
|
||||
resources="{
|
||||
'res_nova_vip':'ocf:heartbeat:IPaddr2',
|
||||
'res_nova_haproxy':'lsb:haproxy'
|
||||
}"
|
||||
resource_params="{
|
||||
'res_nova_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
|
||||
'res_nova_haproxy': 'op monitor interval=\"5s\"'
|
||||
}"
|
||||
init_services="{
|
||||
'res_nova_haproxy':'haproxy'
|
||||
}"
|
||||
clones="{
|
||||
'cl_nova_haproxy':'res_nova_haproxy'
|
||||
}"
|
||||
relation-set corosync_bindiface=$corosync_bindiface \
|
||||
corosync_mcastport=$corosync_mcastport \
|
||||
resources="$resources" resource_params="$resource_params" \
|
||||
init_services="$init_services" clones="$clones"
|
||||
else
|
||||
juju-log "Insufficient configuration data to configure hacluster"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function ha_relation_changed() {
|
||||
local clustered=`relation-get clustered`
|
||||
if [ -n "$clustered" ] && is_leader 'res_nova_vip'; then
|
||||
https && local scheme="https" || local scheme="http"
|
||||
for r_id in `relation-ids identity-service`; do
|
||||
local address=$(config-get vip)
|
||||
local region=$(config-get region)
|
||||
local nova_url="$scheme://$address:8774/v1.1/\$(tenant_id)s"
|
||||
local ec2_url="$scheme://$address:8773/services/Cloud"
|
||||
local s3_url="$scheme://$address:3333"
|
||||
local quantum_url="$scheme://$address:9696"
|
||||
local nova_vol_url="$scheme://$address:8776/v1/\$(tenant_id)s"
|
||||
|
||||
relation-set -r $r_id \
|
||||
nova_service="nova" \
|
||||
nova_region="$region" \
|
||||
nova_public_url="$nova_url" \
|
||||
nova_admin_url="$nova_url" \
|
||||
nova_internal_url="$nova_url" \
|
||||
ec2_service="ec2" \
|
||||
ec2_region="$region" \
|
||||
ec2_public_url="$ec2_url" \
|
||||
ec2_admin_url="$ec2_url" \
|
||||
ec2_internal_url="$ec2_url" \
|
||||
s3_service="s3" \
|
||||
s3_region="$region" \
|
||||
s3_public_url="$s3_url" \
|
||||
s3_admin_url="$s3_url" \
|
||||
s3_internal_url="$s3_url"
|
||||
|
||||
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
||||
relation-set -r $r_id \
|
||||
quantum_service="quantum" \
|
||||
quantum_region="$region" \
|
||||
quantum_public_url="$quantum_url" \
|
||||
quantum_admin_url="$quantum_url" \
|
||||
quantum_internal_url="$quantum_url"
|
||||
fi
|
||||
|
||||
if [[ -n "$(relation-ids nova-volume-service)" ]] ; then
|
||||
relation-set -r $r_id \
|
||||
nova-volume_service="nova-volume" \
|
||||
nova-volume_region="$region" \
|
||||
nova-volume_public_url="$nova_vol_url" \
|
||||
nova-volume_admin_url="$nova_vol_url" \
|
||||
nova-volume_internal_url="$nova_vol_url"
|
||||
fi
|
||||
done
|
||||
if [ "$(config-get network-manager)" == "Quantum" ]; then
|
||||
# Let gateway nodes use the new HA address for the
|
||||
# quantum API server
|
||||
for r_id in `relation-ids quantum-network-service`; do
|
||||
relation-set -r $r_id \
|
||||
quantum_host="$address" quantum_port="9696" \
|
||||
quantum_url="$quantum_url" region="$region"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
arg0=$(basename $0)
|
||||
case $arg0 in
|
||||
"start"|"stop") determine_services ; service_ctl all $arg0 ;;
|
||||
"install") install_hook ;;
|
||||
"config-changed") config_changed ;;
|
||||
"upgrade-charm") upgrade_charm ;;
|
||||
"amqp-relation-joined") amqp_joined ;;
|
||||
"amqp-relation-changed") amqp_changed ;;
|
||||
"shared-db-relation-joined") db_joined ;;
|
||||
"shared-db-relation-changed") db_changed ;;
|
||||
"image-service-relation-joined") exit 0 ;;
|
||||
"image-service-relation-changed") image-service_changed ;;
|
||||
"identity-service-relation-joined") keystone_joined ;;
|
||||
"identity-service-relation-changed") keystone_changed ;;
|
||||
"cinder-volume-service-relation-joined") volume_joined ;;
|
||||
"nova-volume-service-relation-joined") volume_joined ;;
|
||||
"cloud-compute-relation-joined") compute_joined ;;
|
||||
"cloud-compute-relation-changed") compute_changed ;;
|
||||
"cloud-compute-relation-departed") compute_departed ;;
|
||||
"quantum-network-service-relation-joined") quantum_joined ;;
|
||||
"cluster-relation-changed") cluster_changed ;;
|
||||
"cluster-relation-departed") cluster_changed ;;
|
||||
"ha-relation-joined") ha_relation_joined ;;
|
||||
"ha-relation-changed") ha_relation_changed ;;
|
||||
*) exit 0 ;;
|
||||
esac
|
1
hooks/nova-volume-service-relation-broken
Symbolic link
1
hooks/nova-volume-service-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
156
hooks/nova_cc_context.py
Normal file
156
hooks/nova_cc_context.py
Normal file
@ -0,0 +1,156 @@
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config, relation_ids, relation_set, log, ERROR)
|
||||
|
||||
from charmhelpers.fetch import apt_install, filter_installed_packages
|
||||
from charmhelpers.contrib.openstack import context, neutron, utils
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_api_port, determine_haproxy_port)
|
||||
|
||||
|
||||
class ApacheSSLContext(context.ApacheSSLContext):
|
||||
|
||||
interfaces = ['https']
|
||||
external_ports = []
|
||||
service_namespace = 'nova'
|
||||
|
||||
def __call__(self):
|
||||
# late import to work around circular dependency
|
||||
from nova_cc_utils import determine_ports
|
||||
self.external_ports = determine_ports()
|
||||
return super(ApacheSSLContext, self).__call__()
|
||||
|
||||
|
||||
class VolumeServiceContext(context.OSContextGenerator):
|
||||
interfaces = []
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
|
||||
if relation_ids('nova-volume-service'):
|
||||
if utils.os_release('nova-common') not in ['essex', 'folsom']:
|
||||
e = ('Attempting to relate a nova-volume service to an '
|
||||
'Nova version (%s). Use cinder.')
|
||||
log(e, level=ERROR)
|
||||
|
||||
raise context.OSContextError(e)
|
||||
install_pkg = filter_installed_packages(['nova-api-os-volume'])
|
||||
if install_pkg:
|
||||
apt_install(install_pkg)
|
||||
ctxt['volume_service'] = 'nova-volume'
|
||||
elif relation_ids('cinder-volume-service'):
|
||||
ctxt['volume_service'] = 'cinder'
|
||||
# kick all compute nodes to know they should use cinder now.
|
||||
[relation_set(relation_id=rid, volume_service='cinder')
|
||||
for rid in relation_ids('cloud-compute')]
|
||||
return ctxt
|
||||
|
||||
|
||||
class HAProxyContext(context.HAProxyContext):
|
||||
interfaces = ['ceph']
|
||||
|
||||
def __call__(self):
|
||||
'''
|
||||
Extends the main charmhelpers HAProxyContext with a port mapping
|
||||
specific to this charm.
|
||||
Also used to extend nova.conf context with correct api_listening_ports
|
||||
'''
|
||||
from nova_cc_utils import api_port
|
||||
ctxt = super(HAProxyContext, self).__call__()
|
||||
|
||||
# determine which port api processes should bind to, depending
|
||||
# on existence of haproxy + apache frontends
|
||||
compute_api = determine_api_port(api_port('nova-api-os-compute'))
|
||||
ec2_api = determine_api_port(api_port('nova-api-ec2'))
|
||||
s3_api = determine_api_port(api_port('nova-objectstore'))
|
||||
nvol_api = determine_api_port(api_port('nova-api-os-volume'))
|
||||
neutron_api = determine_api_port(api_port('neutron-server'))
|
||||
|
||||
# to be set in nova.conf accordingly.
|
||||
listen_ports = {
|
||||
'osapi_compute_listen_port': compute_api,
|
||||
'ec2_listen_port': ec2_api,
|
||||
's3_listen_port': s3_api,
|
||||
}
|
||||
|
||||
port_mapping = {
|
||||
'nova-api-os-compute': [
|
||||
determine_haproxy_port(api_port('nova-api-os-compute')),
|
||||
compute_api,
|
||||
],
|
||||
'nova-api-ec2': [
|
||||
determine_haproxy_port(api_port('nova-api-ec2')),
|
||||
ec2_api,
|
||||
],
|
||||
'nova-objectstore': [
|
||||
determine_haproxy_port(api_port('nova-objectstore')),
|
||||
s3_api,
|
||||
],
|
||||
}
|
||||
|
||||
if relation_ids('nova-volume-service'):
|
||||
port_mapping.update({
|
||||
'nova-api-ec2': [
|
||||
determine_haproxy_port(api_port('nova-api-ec2')),
|
||||
nvol_api],
|
||||
})
|
||||
listen_ports['osapi_volume_listen_port'] = nvol_api
|
||||
|
||||
if neutron.network_manager() in ['neutron', 'quantum']:
|
||||
port_mapping.update({
|
||||
'neutron-server': [
|
||||
determine_haproxy_port(api_port('neutron-server')),
|
||||
neutron_api]
|
||||
})
|
||||
# quantum/neutron.conf listening port, set separte from nova's.
|
||||
ctxt['neutron_bind_port'] = neutron_api
|
||||
|
||||
# for haproxy.conf
|
||||
ctxt['service_ports'] = port_mapping
|
||||
# for nova.conf
|
||||
ctxt['listen_ports'] = listen_ports
|
||||
return ctxt
|
||||
|
||||
|
||||
class NeutronCCContext(context.NeutronContext):
|
||||
interfaces = []
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
from nova_cc_utils import neutron_plugin
|
||||
return neutron_plugin()
|
||||
|
||||
@property
|
||||
def network_manager(self):
|
||||
return neutron.network_manager()
|
||||
|
||||
@property
|
||||
def neutron_security_groups(self):
|
||||
sec_groups = (config('neutron-security-groups') or
|
||||
config('quantum-security-groups'))
|
||||
return sec_groups.lower() == 'yes'
|
||||
|
||||
def _ensure_packages(self):
|
||||
# Only compute nodes need to ensure packages here, to install
|
||||
# required agents.
|
||||
return
|
||||
|
||||
def __call__(self):
|
||||
ctxt = super(NeutronCCContext, self).__call__()
|
||||
ctxt['external_network'] = config('neutron-external-network')
|
||||
return ctxt
|
||||
|
||||
|
||||
class IdentityServiceContext(context.IdentityServiceContext):
|
||||
def __call__(self):
|
||||
ctxt = super(IdentityServiceContext, self).__call__()
|
||||
if not ctxt:
|
||||
return
|
||||
|
||||
# the ec2 api needs to know the location of the keystone ec2
|
||||
# tokens endpoint, set in nova.conf
|
||||
ec2_tokens = 'http://%s:%s/v2.0/ec2tokens' % (ctxt['service_host'],
|
||||
ctxt['service_port'])
|
||||
ctxt['keystone_ec2_url'] = ec2_tokens
|
||||
return ctxt
|
407
hooks/nova_cc_hooks.py
Executable file
407
hooks/nova_cc_hooks.py
Executable file
@ -0,0 +1,407 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from subprocess import check_call
|
||||
from urlparse import urlparse
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
Hooks,
|
||||
UnregisteredHookError,
|
||||
config,
|
||||
charm_dir,
|
||||
log,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
open_port,
|
||||
unit_get,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
restart_on_change
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install, apt_update, filter_installed_packages
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
configure_installation_source,
|
||||
openstack_upgrade_available,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.neutron import (
|
||||
network_manager,
|
||||
neutron_plugin_attribute,
|
||||
)
|
||||
|
||||
from nova_cc_utils import (
|
||||
api_port,
|
||||
auth_token_config,
|
||||
determine_endpoints,
|
||||
determine_packages,
|
||||
determine_ports,
|
||||
do_openstack_upgrade,
|
||||
keystone_ca_cert_b64,
|
||||
migrate_database,
|
||||
neutron_plugin,
|
||||
save_script_rc,
|
||||
ssh_compute_add,
|
||||
ssh_compute_remove,
|
||||
ssh_known_hosts_b64,
|
||||
ssh_authorized_keys_b64,
|
||||
register_configs,
|
||||
restart_map,
|
||||
volume_service,
|
||||
CLUSTER_RES,
|
||||
NOVA_CONF,
|
||||
QUANTUM_CONF,
|
||||
NEUTRON_CONF,
|
||||
QUANTUM_API_PASTE
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
canonical_url,
|
||||
eligible_leader,
|
||||
get_hacluster_config,
|
||||
is_leader,
|
||||
)
|
||||
|
||||
from charmhelpers.payload.execd import execd_preinstall
|
||||
|
||||
hooks = Hooks()
|
||||
CONFIGS = register_configs()
|
||||
|
||||
|
||||
@hooks.hook()
|
||||
def install():
|
||||
execd_preinstall()
|
||||
configure_installation_source(config('openstack-origin'))
|
||||
apt_update()
|
||||
apt_install(determine_packages(), fatal=True)
|
||||
|
||||
_files = os.path.join(charm_dir(), 'files')
|
||||
if os.path.isdir(_files):
|
||||
for f in os.listdir(_files):
|
||||
f = os.path.join(_files, f)
|
||||
log('Installing %s to /usr/bin' % f)
|
||||
shutil.copy2(f, '/usr/bin')
|
||||
[open_port(port) for port in determine_ports()]
|
||||
|
||||
|
||||
@hooks.hook('config-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def config_changed():
|
||||
if openstack_upgrade_available('nova-common'):
|
||||
do_openstack_upgrade(configs=CONFIGS)
|
||||
save_script_rc()
|
||||
configure_https()
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
@hooks.hook('amqp-relation-joined')
|
||||
def amqp_joined():
|
||||
relation_set(username=config('rabbit-user'), vhost=config('rabbit-vhost'))
|
||||
|
||||
|
||||
@hooks.hook('amqp-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def amqp_changed():
|
||||
if 'amqp' not in CONFIGS.complete_contexts():
|
||||
log('amqp relation incomplete. Peer not ready?')
|
||||
return
|
||||
CONFIGS.write(NOVA_CONF)
|
||||
if network_manager() == 'quantum':
|
||||
CONFIGS.write(QUANTUM_CONF)
|
||||
if network_manager() == 'neutron':
|
||||
CONFIGS.write(NEUTRON_CONF)
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-joined')
|
||||
def db_joined():
|
||||
relation_set(nova_database=config('database'),
|
||||
nova_username=config('database-user'),
|
||||
nova_hostname=unit_get('private-address'))
|
||||
if network_manager() in ['quantum', 'neutron']:
|
||||
# XXX: Renaming relations from quantum_* to neutron_* here.
|
||||
relation_set(neutron_database=config('neutron-database'),
|
||||
neutron_username=config('neutron-database-user'),
|
||||
neutron_hostname=unit_get('private-address'))
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def db_changed():
|
||||
if 'shared-db' not in CONFIGS.complete_contexts():
|
||||
log('shared-db relation incomplete. Peer not ready?')
|
||||
return
|
||||
CONFIGS.write(NOVA_CONF)
|
||||
|
||||
if network_manager() in ['neutron', 'quantum']:
|
||||
plugin = neutron_plugin()
|
||||
# DB config might have been moved to main neutron.conf in H?
|
||||
CONFIGS.write(neutron_plugin_attribute(plugin, 'config'))
|
||||
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
migrate_database()
|
||||
|
||||
|
||||
@hooks.hook('image-service-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def image_service_changed():
|
||||
if 'image-service' not in CONFIGS.complete_contexts():
|
||||
log('image-service relation incomplete. Peer not ready?')
|
||||
return
|
||||
CONFIGS.write(NOVA_CONF)
|
||||
# TODO: special case config flag for essex (strip protocol)
|
||||
|
||||
|
||||
@hooks.hook('identity-service-relation-joined')
|
||||
def identity_joined(rid=None):
|
||||
if not eligible_leader(CLUSTER_RES):
|
||||
return
|
||||
base_url = canonical_url(CONFIGS)
|
||||
relation_set(relation_id=rid, **determine_endpoints(base_url))
|
||||
|
||||
|
||||
@hooks.hook('identity-service-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def identity_changed():
|
||||
if 'identity-service' not in CONFIGS.complete_contexts():
|
||||
log('identity-service relation incomplete. Peer not ready?')
|
||||
return
|
||||
CONFIGS.write('/etc/nova/api-paste.ini')
|
||||
CONFIGS.write(NOVA_CONF)
|
||||
if network_manager() == 'quantum':
|
||||
CONFIGS.write(QUANTUM_API_PASTE)
|
||||
CONFIGS.write(QUANTUM_CONF)
|
||||
save_novarc()
|
||||
if network_manager() == 'neutron':
|
||||
CONFIGS.write(NEUTRON_CONF)
|
||||
[compute_joined(rid) for rid in relation_ids('cloud-compute')]
|
||||
[quantum_joined(rid) for rid in relation_ids('quantum-network-service')]
|
||||
configure_https()
|
||||
|
||||
|
||||
@hooks.hook('nova-volume-service-relation-joined',
|
||||
'cinder-volume-service-relation-joined')
|
||||
@restart_on_change(restart_map())
|
||||
def volume_joined():
|
||||
CONFIGS.write(NOVA_CONF)
|
||||
# kick identity_joined() to publish possibly new nova-volume endpoint.
|
||||
[identity_joined(rid) for rid in relation_ids('identity-service')]
|
||||
|
||||
|
||||
def _auth_config():
|
||||
'''Grab all KS auth token config from api-paste.ini, or return empty {}'''
|
||||
ks_auth_host = auth_token_config('auth_host')
|
||||
if not ks_auth_host:
|
||||
# if there is no auth_host set, identity-service changed hooks
|
||||
# have not fired, yet.
|
||||
return {}
|
||||
cfg = {
|
||||
'auth_host': ks_auth_host,
|
||||
'auth_port': auth_token_config('auth_port'),
|
||||
'service_port': auth_token_config('service_port'),
|
||||
'service_username': auth_token_config('admin_user'),
|
||||
'service_password': auth_token_config('admin_password'),
|
||||
'service_tenant_name': auth_token_config('admin_tenant_name'),
|
||||
'auth_uri': auth_token_config('auth_uri'),
|
||||
# quantum-gateway interface deviates a bit.
|
||||
'keystone_host': ks_auth_host,
|
||||
'service_tenant': auth_token_config('admin_tenant_name'),
|
||||
}
|
||||
return cfg
|
||||
|
||||
|
||||
def save_novarc():
|
||||
auth = _auth_config()
|
||||
# XXX hard-coded http
|
||||
ks_url = 'http://%s:%s/v2.0' % (auth['auth_host'], auth['auth_port'])
|
||||
with open('/etc/quantum/novarc', 'wb') as out:
|
||||
out.write('export OS_USERNAME=%s\n' % auth['service_username'])
|
||||
out.write('export OS_PASSWORD=%s\n' % auth['service_password'])
|
||||
out.write('export OS_TENANT_NAME=%s\n' % auth['service_tenant_name'])
|
||||
out.write('export OS_AUTH_URL=%s\n' % ks_url)
|
||||
out.write('export OS_REGION_NAME=%s\n' % config('region'))
|
||||
|
||||
|
||||
@hooks.hook('cloud-compute-relation-joined')
|
||||
def compute_joined(rid=None):
|
||||
if not eligible_leader(CLUSTER_RES):
|
||||
return
|
||||
rel_settings = {
|
||||
'network_manager': network_manager(),
|
||||
'volume_service': volume_service(),
|
||||
# (comment from bash vers) XXX Should point to VIP if clustered, or
|
||||
# this may not even be needed.
|
||||
'ec2_host': unit_get('private-address'),
|
||||
}
|
||||
|
||||
ks_auth_config = _auth_config()
|
||||
|
||||
if network_manager() in ['quantum', 'neutron']:
|
||||
if ks_auth_config:
|
||||
rel_settings.update(ks_auth_config)
|
||||
|
||||
rel_settings.update({
|
||||
# XXX: Rename these relations settings?
|
||||
'quantum_plugin': neutron_plugin(),
|
||||
'region': config('region'),
|
||||
'quantum_security_groups': config('quantum-security-groups'),
|
||||
'quantum_url': (canonical_url(CONFIGS) + ':' +
|
||||
str(api_port('neutron-server'))),
|
||||
})
|
||||
|
||||
ks_ca = keystone_ca_cert_b64()
|
||||
if ks_auth_config and ks_ca:
|
||||
rel_settings['ca_cert'] = ks_ca
|
||||
relation_set(relation_id=rid, **rel_settings)
|
||||
|
||||
|
||||
@hooks.hook('cloud-compute-relation-changed')
|
||||
def compute_changed():
|
||||
migration_auth = relation_get('migration_auth_type')
|
||||
if migration_auth == 'ssh':
|
||||
key = relation_get('ssh_public_key')
|
||||
if not key:
|
||||
log('SSH migration set but peer did not publish key.')
|
||||
return
|
||||
ssh_compute_add(key)
|
||||
relation_set(known_hosts=ssh_known_hosts_b64(),
|
||||
authorized_keys=ssh_authorized_keys_b64())
|
||||
|
||||
|
||||
@hooks.hook('cloud-compute-relation-departed')
|
||||
def compute_departed():
|
||||
ssh_compute_remove(public_key=relation_get('ssh_public_key'))
|
||||
|
||||
|
||||
@hooks.hook('neutron-network-service-relation-joined',
|
||||
'quantum-network-service-relation-joined')
|
||||
def quantum_joined(rid=None):
|
||||
if not eligible_leader(CLUSTER_RES):
|
||||
return
|
||||
|
||||
if network_manager() == 'quantum':
|
||||
pkg = 'quantum-server'
|
||||
else:
|
||||
pkg = 'neutron-server'
|
||||
|
||||
required_pkg = filter_installed_packages([pkg])
|
||||
if required_pkg:
|
||||
apt_install(required_pkg)
|
||||
|
||||
url = canonical_url(CONFIGS) + ':9696'
|
||||
# XXX: Can we rename to neutron_*?
|
||||
rel_settings = {
|
||||
'quantum_host': urlparse(url).hostname,
|
||||
'quantum_url': url,
|
||||
'quantum_port': 9696,
|
||||
'quantum_plugin': neutron_plugin(),
|
||||
'region': config('region')
|
||||
}
|
||||
|
||||
# inform quantum about local keystone auth config
|
||||
ks_auth_config = _auth_config()
|
||||
rel_settings.update(ks_auth_config)
|
||||
|
||||
# must pass the keystone CA cert, if it exists.
|
||||
ks_ca = keystone_ca_cert_b64()
|
||||
if ks_auth_config and ks_ca:
|
||||
rel_settings['ca_cert'] = ks_ca
|
||||
|
||||
relation_set(rid=rid, **rel_settings)
|
||||
|
||||
|
||||
@hooks.hook('cluster-relation-changed',
|
||||
'cluster-relation-departed')
|
||||
@restart_on_change(restart_map())
|
||||
def cluster_changed():
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
@hooks.hook('ha-relation-joined')
|
||||
def ha_joined():
|
||||
config = get_hacluster_config()
|
||||
resources = {
|
||||
'res_nova_vip': 'ocf:heartbeat:IPaddr2',
|
||||
'res_nova_haproxy': 'lsb:haproxy',
|
||||
}
|
||||
vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
|
||||
(config['vip'], config['vip_cidr'], config['vip_iface'])
|
||||
resource_params = {
|
||||
'res_nova_vip': vip_params,
|
||||
'res_nova_haproxy': 'op monitor interval="5s"'
|
||||
}
|
||||
init_services = {
|
||||
'res_nova_haproxy': 'haproxy'
|
||||
}
|
||||
clones = {
|
||||
'cl_nova_haproxy': 'res_nova_haproxy'
|
||||
}
|
||||
relation_set(init_services=init_services,
|
||||
corosync_bindiface=config['ha-bindiface'],
|
||||
corosync_mcastport=config['ha-mcastport'],
|
||||
resources=resources,
|
||||
resource_params=resource_params,
|
||||
clones=clones)
|
||||
|
||||
|
||||
@hooks.hook('ha-relation-changed')
|
||||
def ha_changed():
|
||||
clustered = relation_get('clustered')
|
||||
if not clustered or clustered in [None, 'None', '']:
|
||||
log('ha_changed: hacluster subordinate not fully clustered.')
|
||||
return
|
||||
if not is_leader(CLUSTER_RES):
|
||||
log('ha_changed: hacluster complete but we are not leader.')
|
||||
return
|
||||
log('Cluster configured, notifying other services and updating '
|
||||
'keystone endpoint configuration')
|
||||
for rid in relation_ids('identity-service'):
|
||||
identity_joined(rid=rid)
|
||||
|
||||
|
||||
@hooks.hook('amqp-relation-broken',
|
||||
'cinder-volume-service-relation-broken',
|
||||
'identity-service-relation-broken',
|
||||
'image-service-relation-broken',
|
||||
'nova-volume-service-relation-broken',
|
||||
'shared-db-relation-broken'
|
||||
'quantum-network-service-relation-broken')
|
||||
def relation_broken():
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
def configure_https():
|
||||
'''
|
||||
Enables SSL API Apache config if appropriate and kicks identity-service
|
||||
with any required api updates.
|
||||
'''
|
||||
# need to write all to ensure changes to the entire request pipeline
|
||||
# propagate (c-api, haprxy, apache)
|
||||
CONFIGS.write_all()
|
||||
if 'https' in CONFIGS.complete_contexts():
|
||||
cmd = ['a2ensite', 'openstack_https_frontend']
|
||||
check_call(cmd)
|
||||
else:
|
||||
cmd = ['a2dissite', 'openstack_https_frontend']
|
||||
check_call(cmd)
|
||||
|
||||
for rid in relation_ids('identity-service'):
|
||||
identity_joined(rid=rid)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
hooks.execute(sys.argv)
|
||||
except UnregisteredHookError as e:
|
||||
log('Unknown hook {} - skipping.'.format(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
480
hooks/nova_cc_utils.py
Normal file
480
hooks/nova_cc_utils.py
Normal file
@ -0,0 +1,480 @@
|
||||
import os
|
||||
import subprocess
|
||||
import ConfigParser
|
||||
|
||||
from base64 import b64encode
|
||||
from collections import OrderedDict
|
||||
from copy import deepcopy
|
||||
|
||||
from charmhelpers.contrib.openstack import context, templating
|
||||
from charmhelpers.contrib.openstack.neutron import (
|
||||
network_manager, neutron_plugin_attribute)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import eligible_leader
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
configure_installation_source,
|
||||
get_host_ip,
|
||||
get_hostname,
|
||||
get_os_codename_install_source,
|
||||
is_ip,
|
||||
os_release,
|
||||
save_script_rc as _save_script_rc)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
apt_update,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
remote_unit,
|
||||
INFO,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
|
||||
import nova_cc_context
|
||||
|
||||
TEMPLATES = 'templates/'
|
||||
|
||||
CLUSTER_RES = 'res_nova_vip'
|
||||
|
||||
# removed from original: python-mysqldb python-keystone charm-helper-sh
|
||||
BASE_PACKAGES = [
|
||||
'apache2',
|
||||
'haproxy',
|
||||
'python-keystoneclient',
|
||||
'uuid',
|
||||
]
|
||||
|
||||
BASE_SERVICES = [
|
||||
'nova-api-ec2',
|
||||
'nova-api-os-compute',
|
||||
'nova-objectstore',
|
||||
'nova-cert',
|
||||
'nova-scheduler',
|
||||
]
|
||||
|
||||
API_PORTS = {
|
||||
'nova-api-ec2': 8773,
|
||||
'nova-api-os-compute': 8774,
|
||||
'nova-api-os-volume': 8776,
|
||||
'nova-objectstore': 3333,
|
||||
'neutron-server': 9696,
|
||||
'quantum-server': 9696,
|
||||
}
|
||||
|
||||
NOVA_CONF = '/etc/nova/nova.conf'
|
||||
NOVA_API_PASTE = '/etc/nova/api-paste.ini'
|
||||
QUANTUM_CONF = '/etc/quantum/quantum.conf'
|
||||
QUANTUM_API_PASTE = '/etc/quantum/api-paste.ini'
|
||||
NEUTRON_CONF = '/etc/neutron/neutron.conf'
|
||||
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
||||
APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
|
||||
APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
|
||||
|
||||
BASE_RESOURCE_MAP = OrderedDict([
|
||||
(NOVA_CONF, {
|
||||
'services': BASE_SERVICES,
|
||||
'contexts': [context.AMQPContext(),
|
||||
context.SharedDBContext(relation_prefix='nova'),
|
||||
context.ImageServiceContext(),
|
||||
context.OSConfigFlagContext(),
|
||||
nova_cc_context.HAProxyContext(),
|
||||
nova_cc_context.IdentityServiceContext(),
|
||||
nova_cc_context.VolumeServiceContext(),
|
||||
nova_cc_context.NeutronCCContext()],
|
||||
}),
|
||||
(NOVA_API_PASTE, {
|
||||
'services': [s for s in BASE_SERVICES if 'api' in s],
|
||||
'contexts': [nova_cc_context.IdentityServiceContext()],
|
||||
}),
|
||||
(QUANTUM_CONF, {
|
||||
'services': ['quantum-server'],
|
||||
'contexts': [context.AMQPContext(),
|
||||
nova_cc_context.HAProxyContext(),
|
||||
nova_cc_context.IdentityServiceContext(),
|
||||
nova_cc_context.NeutronCCContext()],
|
||||
}),
|
||||
(QUANTUM_API_PASTE, {
|
||||
'services': ['quantum-server'],
|
||||
'contexts': [nova_cc_context.IdentityServiceContext()],
|
||||
}),
|
||||
(NEUTRON_CONF, {
|
||||
'services': ['neutron-server'],
|
||||
'contexts': [context.AMQPContext(),
|
||||
nova_cc_context.IdentityServiceContext(),
|
||||
nova_cc_context.NeutronCCContext(),
|
||||
nova_cc_context.HAProxyContext()],
|
||||
}),
|
||||
(HAPROXY_CONF, {
|
||||
'contexts': [context.HAProxyContext(),
|
||||
nova_cc_context.HAProxyContext()],
|
||||
'services': ['haproxy'],
|
||||
}),
|
||||
(APACHE_CONF, {
|
||||
'contexts': [nova_cc_context.ApacheSSLContext()],
|
||||
'services': ['apache2'],
|
||||
}),
|
||||
(APACHE_24_CONF, {
|
||||
'contexts': [nova_cc_context.ApacheSSLContext()],
|
||||
'services': ['apache2'],
|
||||
}),
|
||||
])
|
||||
|
||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
|
||||
NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
|
||||
|
||||
|
||||
def resource_map():
|
||||
'''
|
||||
Dynamically generate a map of resources that will be managed for a single
|
||||
hook execution.
|
||||
'''
|
||||
resource_map = deepcopy(BASE_RESOURCE_MAP)
|
||||
|
||||
if relation_ids('nova-volume-service'):
|
||||
# if we have a relation to a nova-volume service, we're
|
||||
# also managing the nova-volume API endpoint (legacy)
|
||||
resource_map['/etc/nova/nova.conf']['services'].append(
|
||||
'nova-api-os-volume')
|
||||
|
||||
net_manager = network_manager()
|
||||
|
||||
# pop out irrelevant resources from the OrderedDict (easier than adding
|
||||
# them late)
|
||||
if net_manager != 'quantum':
|
||||
[resource_map.pop(k) for k in list(resource_map.iterkeys())
|
||||
if 'quantum' in k]
|
||||
if net_manager != 'neutron':
|
||||
[resource_map.pop(k) for k in list(resource_map.iterkeys())
|
||||
if 'neutron' in k]
|
||||
|
||||
if os.path.exists('/etc/apache2/conf-available'):
|
||||
resource_map.pop(APACHE_CONF)
|
||||
else:
|
||||
resource_map.pop(APACHE_24_CONF)
|
||||
|
||||
# add neutron plugin requirements. nova-c-c only needs the neutron-server
|
||||
# associated with configs, not the plugin agent.
|
||||
if net_manager in ['quantum', 'neutron']:
|
||||
plugin = neutron_plugin()
|
||||
if plugin:
|
||||
conf = neutron_plugin_attribute(plugin, 'config', net_manager)
|
||||
service = '%s-server' % net_manager
|
||||
ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager)
|
||||
or [])
|
||||
resource_map[conf] = {}
|
||||
resource_map[conf]['services'] = [service]
|
||||
resource_map[conf]['contexts'] = ctxts
|
||||
resource_map[conf]['contexts'].append(
|
||||
nova_cc_context.NeutronCCContext())
|
||||
|
||||
# nova-conductor for releases >= G.
|
||||
if os_release('nova-common') not in ['essex', 'folsom']:
|
||||
resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor']
|
||||
return resource_map
|
||||
|
||||
|
||||
def register_configs():
|
||||
release = os_release('nova-common')
|
||||
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
|
||||
openstack_release=release)
|
||||
for cfg, rscs in resource_map().iteritems():
|
||||
configs.register(cfg, rscs['contexts'])
|
||||
return configs
|
||||
|
||||
|
||||
def restart_map():
|
||||
return OrderedDict([(cfg, v['services'])
|
||||
for cfg, v in resource_map().iteritems()
|
||||
if v['services']])
|
||||
|
||||
|
||||
def determine_ports():
|
||||
'''Assemble a list of API ports for services we are managing'''
|
||||
ports = []
|
||||
for cfg, services in restart_map().iteritems():
|
||||
for service in services:
|
||||
try:
|
||||
ports.append(API_PORTS[service])
|
||||
except KeyError:
|
||||
pass
|
||||
return list(set(ports))
|
||||
|
||||
|
||||
def api_port(service):
|
||||
return API_PORTS[service]
|
||||
|
||||
|
||||
def determine_packages():
|
||||
# currently all packages match service names
|
||||
packages = [] + BASE_PACKAGES
|
||||
for k, v in resource_map().iteritems():
|
||||
packages.extend(v['services'])
|
||||
return list(set(packages))
|
||||
|
||||
|
||||
def save_script_rc():
|
||||
env_vars = {
|
||||
'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'),
|
||||
'OPENSTACK_SERVICE_API_EC2': 'nova-api-ec2',
|
||||
'OPENSTACK_SERVICE_API_OS_COMPUTE': 'nova-api-os-compute',
|
||||
'OPENSTACK_SERVICE_CERT': 'nova-cert',
|
||||
'OPENSTACK_SERVICE_CONDUCTOR': 'nova-conductor',
|
||||
'OPENSTACK_SERVICE_OBJECTSTORE': 'nova-objectstore',
|
||||
'OPENSTACK_SERVICE_SCHEDULER': 'nova-scheduler',
|
||||
}
|
||||
if relation_ids('nova-volume-service'):
|
||||
env_vars['OPENSTACK_SERVICE_API_OS_VOL'] = 'nova-api-os-volume'
|
||||
if network_manager() == 'quantum':
|
||||
env_vars['OPENSTACK_SERVICE_API_QUANTUM'] = 'quantum-server'
|
||||
if network_manager() == 'neutron':
|
||||
env_vars['OPENSTACK_SERVICE_API_NEUTRON'] = 'neutron-server'
|
||||
_save_script_rc(**env_vars)
|
||||
|
||||
|
||||
def do_openstack_upgrade(configs):
|
||||
new_src = config('openstack-origin')
|
||||
new_os_rel = get_os_codename_install_source(new_src)
|
||||
log('Performing OpenStack upgrade to %s.' % (new_os_rel))
|
||||
|
||||
configure_installation_source(new_src)
|
||||
apt_update()
|
||||
|
||||
dpkg_opts = [
|
||||
'--option', 'Dpkg::Options::=--force-confnew',
|
||||
'--option', 'Dpkg::Options::=--force-confdef',
|
||||
]
|
||||
|
||||
apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True)
|
||||
|
||||
# set CONFIGS to load templates from new release and regenerate config
|
||||
configs.set_release(openstack_release=new_os_rel)
|
||||
configs.write_all()
|
||||
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
migrate_database()
|
||||
|
||||
|
||||
def volume_service():
|
||||
'''Specifies correct volume API for specific OS release'''
|
||||
os_vers = os_release('nova-common')
|
||||
if os_vers == 'essex':
|
||||
return 'nova-volume'
|
||||
elif os_vers == 'folsom': # support both drivers in folsom.
|
||||
if not relation_ids('cinder-volume-service'):
|
||||
return 'nova-volume'
|
||||
return 'cinder'
|
||||
|
||||
|
||||
def migrate_database():
|
||||
'''Runs nova-manage to initialize a new database or migrate existing'''
|
||||
log('Migrating the nova database.', level=INFO)
|
||||
cmd = ['nova-manage', 'db', 'sync']
|
||||
subprocess.check_output(cmd)
|
||||
|
||||
|
||||
def auth_token_config(setting):
|
||||
'''
|
||||
Returns currently configured value for setting in api-paste.ini's
|
||||
authtoken section, or None.
|
||||
'''
|
||||
config = ConfigParser.RawConfigParser()
|
||||
config.read('/etc/nova/api-paste.ini')
|
||||
try:
|
||||
value = config.get('filter:authtoken', setting)
|
||||
except:
|
||||
return None
|
||||
if value.startswith('%'):
|
||||
return None
|
||||
return value
|
||||
|
||||
|
||||
def keystone_ca_cert_b64():
|
||||
'''Returns the local Keystone-provided CA cert if it exists, or None.'''
|
||||
if not os.path.isfile(CA_CERT_PATH):
|
||||
return None
|
||||
with open(CA_CERT_PATH) as _in:
|
||||
return b64encode(_in.read())
|
||||
|
||||
|
||||
def ssh_directory_for_unit():
|
||||
remote_service = remote_unit().split('/')[0]
|
||||
_dir = os.path.join(NOVA_SSH_DIR, remote_service)
|
||||
for d in [NOVA_SSH_DIR, _dir]:
|
||||
if not os.path.isdir(d):
|
||||
os.mkdir(d)
|
||||
for f in ['authorized_keys', 'known_hosts']:
|
||||
f = os.path.join(_dir, f)
|
||||
if not os.path.isfile(f):
|
||||
open(f, 'w').close()
|
||||
return _dir
|
||||
|
||||
|
||||
def known_hosts():
|
||||
return os.path.join(ssh_directory_for_unit(), 'known_hosts')
|
||||
|
||||
|
||||
def authorized_keys():
|
||||
return os.path.join(ssh_directory_for_unit(), 'authorized_keys')
|
||||
|
||||
|
||||
def ssh_known_host_key(host):
|
||||
cmd = ['ssh-keygen', '-f', known_hosts(), '-H', '-F', host]
|
||||
return subprocess.check_output(cmd).strip()
|
||||
|
||||
|
||||
def remove_known_host(host):
|
||||
log('Removing SSH known host entry for compute host at %s' % host)
|
||||
cmd = ['ssh-kegen', '-f', known_hosts(), '-R', host]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def add_known_host(host):
|
||||
'''Add variations of host to a known hosts file.'''
|
||||
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
|
||||
try:
|
||||
remote_key = subprocess.check_output(cmd).strip()
|
||||
except Exception as e:
|
||||
log('Could not obtain SSH host key from %s' % host, level=ERROR)
|
||||
raise e
|
||||
|
||||
current_key = ssh_known_host_key(host)
|
||||
if current_key:
|
||||
if remote_key == current_key:
|
||||
log('Known host key for compute host %s up to date.' % host)
|
||||
return
|
||||
else:
|
||||
remove_known_host(host)
|
||||
|
||||
log('Adding SSH host key to known hosts for compute node at %s.' % host)
|
||||
with open(known_hosts(), 'a') as out:
|
||||
out.write(remote_key + '\n')
|
||||
|
||||
|
||||
def ssh_authorized_key_exists(public_key):
|
||||
with open(authorized_keys()) as keys:
|
||||
return (' %s ' % public_key) in keys.read()
|
||||
|
||||
|
||||
def add_authorized_key(public_key):
|
||||
with open(authorized_keys(), 'a') as keys:
|
||||
keys.write(public_key + '\n')
|
||||
|
||||
|
||||
def ssh_compute_add(public_key):
|
||||
# If remote compute node hands us a hostname, ensure we have a
|
||||
# known hosts entry for its IP, hostname and FQDN.
|
||||
private_address = relation_get('private-address')
|
||||
hosts = [private_address]
|
||||
|
||||
if not is_ip(private_address):
|
||||
hosts.append(get_host_ip(private_address))
|
||||
hosts.append(private_address.split('.')[0])
|
||||
else:
|
||||
hn = get_hostname(private_address)
|
||||
hosts.append(hn)
|
||||
hosts.append(hn.split('.')[0])
|
||||
|
||||
for host in list(set(hosts)):
|
||||
if not ssh_known_host_key(host):
|
||||
add_known_host(host)
|
||||
|
||||
if not ssh_authorized_key_exists(public_key):
|
||||
log('Saving SSH authorized key for compute host at %s.' %
|
||||
private_address)
|
||||
add_authorized_key(public_key)
|
||||
|
||||
|
||||
def ssh_known_hosts_b64():
|
||||
with open(known_hosts()) as hosts:
|
||||
return b64encode(hosts.read())
|
||||
|
||||
|
||||
def ssh_authorized_keys_b64():
|
||||
with open(authorized_keys()) as keys:
|
||||
return b64encode(keys.read())
|
||||
|
||||
|
||||
def ssh_compute_remove(public_key):
|
||||
if not (os.path.isfile(authorized_keys()) or
|
||||
os.path.isfile(known_hosts())):
|
||||
return
|
||||
|
||||
with open(authorized_keys()) as _keys:
|
||||
keys = [k.strip() for k in _keys.readlines()]
|
||||
|
||||
if public_key not in keys:
|
||||
return
|
||||
|
||||
[keys.remove(key) for key in keys if key == public_key]
|
||||
|
||||
with open(authorized_keys(), 'w') as _keys:
|
||||
_keys.write('\n'.join(keys))
|
||||
|
||||
|
||||
def determine_endpoints(url):
|
||||
'''Generates a dictionary containing all relevant endpoints to be
|
||||
passed to keystone as relation settings.'''
|
||||
region = config('region')
|
||||
|
||||
# TODO: Configurable nova API version.
|
||||
nova_url = ('%s:%s/v1.1/$(tenant_id)s' %
|
||||
(url, api_port('nova-api-os-compute')))
|
||||
ec2_url = '%s:%s/services/Cloud' % (url, api_port('nova-api-ec2'))
|
||||
nova_volume_url = ('%s:%s/v1/$(tenant_id)s' %
|
||||
(url, api_port('nova-api-os-compute')))
|
||||
neutron_url = '%s:%s' % (url, api_port('neutron-server'))
|
||||
s3_url = '%s:%s' % (url, api_port('nova-objectstore'))
|
||||
|
||||
# the base endpoints
|
||||
endpoints = {
|
||||
'nova_service': 'nova',
|
||||
'nova_region': region,
|
||||
'nova_public_url': nova_url,
|
||||
'nova_admin_url': nova_url,
|
||||
'nova_internal_url': nova_url,
|
||||
'ec2_service': 'ec2',
|
||||
'ec2_region': region,
|
||||
'ec2_public_url': ec2_url,
|
||||
'ec2_admin_url': ec2_url,
|
||||
'ec2_internal_url': ec2_url,
|
||||
's3_service': 's3',
|
||||
's3_region': region,
|
||||
's3_public_url': s3_url,
|
||||
's3_admin_url': s3_url,
|
||||
's3_internal_url': s3_url,
|
||||
}
|
||||
|
||||
if relation_ids('nova-volume-service'):
|
||||
endpoints.update({
|
||||
'nova-volume_service': 'nova-volume',
|
||||
'nova-volume_region': region,
|
||||
'nova-volume_public_url': nova_volume_url,
|
||||
'nova-volume_admin_url': nova_volume_url,
|
||||
'nova-volume_internal_url': nova_volume_url,
|
||||
})
|
||||
|
||||
# XXX: Keep these relations named quantum_*??
|
||||
if network_manager() in ['quantum', 'neutron']:
|
||||
endpoints.update({
|
||||
'quantum_service': 'quantum',
|
||||
'quantum_region': region,
|
||||
'quantum_public_url': neutron_url,
|
||||
'quantum_admin_url': neutron_url,
|
||||
'quantum_internal_url': neutron_url,
|
||||
})
|
||||
|
||||
return endpoints
|
||||
|
||||
|
||||
def neutron_plugin():
|
||||
# quantum-plugin config setting can be safely overriden
|
||||
# as we only supported OVS in G/neutron
|
||||
return config('neutron-plugin') or config('quantum-plugin')
|
1
hooks/quantum-network-service-relation-broken
Symbolic link
1
hooks/quantum-network-service-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
1
hooks/shared-db-relation-broken
Symbolic link
1
hooks/shared-db-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -1 +1 @@
|
||||
nova-cloud-controller-relations
|
||||
nova_cc_hooks.py
|
@ -4,6 +4,8 @@ summary: "Openstack nova controller node."
|
||||
description: |
|
||||
Cloud controller node for Openstack nova. Contains nova-schedule,
|
||||
nova-api, nova-network and nova-objectstore.
|
||||
categories:
|
||||
- openstack
|
||||
provides:
|
||||
cloud-controller:
|
||||
interface: nova
|
||||
|
@ -1,11 +0,0 @@
|
||||
nova-cloud-controller:
|
||||
nova-release: trunk
|
||||
nova-config: /etc/nova/nova.conf
|
||||
db-user: nova
|
||||
nova-db: nova
|
||||
rabbit-user: nova
|
||||
rabbit-vhost: nova
|
||||
network-manager: FlatManager
|
||||
bridge-interface: br100
|
||||
bridge-ip: 11.0.0.1
|
||||
bridge-netmask: 255.255.255.0
|
162
templates/essex/etc_nova_api-paste.ini
Normal file
162
templates/essex/etc_nova_api-paste.ini
Normal file
@ -0,0 +1,162 @@
|
||||
# essex
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
############
|
||||
# Metadata #
|
||||
############
|
||||
[composite:metadata]
|
||||
use = egg:Paste#urlmap
|
||||
/: metaversions
|
||||
/latest: meta
|
||||
/1.0: meta
|
||||
/2007-01-19: meta
|
||||
/2007-03-01: meta
|
||||
/2007-08-29: meta
|
||||
/2007-10-10: meta
|
||||
/2007-12-15: meta
|
||||
/2008-02-01: meta
|
||||
/2008-09-01: meta
|
||||
/2009-04-04: meta
|
||||
|
||||
[pipeline:metaversions]
|
||||
pipeline = ec2faultwrap logrequest metaverapp
|
||||
|
||||
[pipeline:meta]
|
||||
pipeline = ec2faultwrap logrequest metaapp
|
||||
|
||||
[app:metaverapp]
|
||||
paste.app_factory = nova.api.metadata.handler:Versions.factory
|
||||
|
||||
[app:metaapp]
|
||||
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
|
||||
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/services/Cloud: ec2cloud
|
||||
|
||||
[composite:ec2cloud]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
|
||||
deprecated = ec2faultwrap logrequest authenticate cloudrequest validator ec2executor
|
||||
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
|
||||
|
||||
[filter:ec2faultwrap]
|
||||
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:totoken]
|
||||
paste.filter_factory = nova.api.ec2:EC2Token.factory
|
||||
|
||||
[filter:ec2keystoneauth]
|
||||
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:authenticate]
|
||||
paste.filter_factory = nova.api.ec2:Authenticate.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[filter:validator]
|
||||
paste.filter_factory = nova.api.ec2:Validator.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi_compute]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: oscomputeversions
|
||||
/v1.1: openstack_compute_api_v2
|
||||
/v2: openstack_compute_api_v2
|
||||
|
||||
[composite:osapi_volume]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: osvolumeversions
|
||||
/v1: openstack_volume_api_v1
|
||||
|
||||
[composite:openstack_compute_api_v2]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap noauth ratelimit osapi_compute_app_v2
|
||||
deprecated = faultwrap auth ratelimit osapi_compute_app_v2
|
||||
keystone = faultwrap authtoken keystonecontext ratelimit osapi_compute_app_v2
|
||||
keystone_nolimit = faultwrap authtoken keystonecontext osapi_compute_app_v2
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap noauth ratelimit osapi_volume_app_v1
|
||||
deprecated = faultwrap auth ratelimit osapi_volume_app_v1
|
||||
keystone = faultwrap authtoken keystonecontext ratelimit osapi_volume_app_v1
|
||||
keystone_nolimit = faultwrap authtoken keystonecontext osapi_volume_app_v1
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:auth]
|
||||
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[app:osapi_compute_app_v2]
|
||||
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
|
||||
|
||||
[pipeline:oscomputeversions]
|
||||
pipeline = faultwrap oscomputeversionapp
|
||||
|
||||
[app:osapi_volume_app_v1]
|
||||
paste.app_factory = nova.api.openstack.volume:APIRouter.factory
|
||||
|
||||
[app:oscomputeversionapp]
|
||||
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
|
||||
|
||||
[pipeline:osvolumeversions]
|
||||
pipeline = faultwrap osvolumeversionapp
|
||||
|
||||
[app:osvolumeversionapp]
|
||||
paste.app_factory = nova.api.openstack.volume.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
{% if service_host -%}
|
||||
service_protocol = {{ service_protocol }}
|
||||
service_host = {{ service_host }}
|
||||
service_port = {{ service_port }}
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
{% endif -%}
|
||||
|
34
templates/essex/nova.conf
Normal file
34
templates/essex/nova.conf
Normal file
@ -0,0 +1,34 @@
|
||||
# essex
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
--dhcpbridge_flagfile=/etc/nova/nova.conf
|
||||
--dhcpbridge=/usr/bin/nova-dhcpbridge
|
||||
--logdir=/var/log/nova
|
||||
--state_path=/var/lib/nova
|
||||
--lock_path=/var/lock/nova
|
||||
--force_dhcp_release
|
||||
--iscsi_helper=tgtadm
|
||||
--libvirt_use_virtio_for_bridges
|
||||
--connection_type=libvirt
|
||||
--root_helper=sudo nova-rootwrap
|
||||
--verbose
|
||||
--ec2_private_dns_show_ip
|
||||
{% if database_host -%}
|
||||
--sql_connection=mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}
|
||||
{% endif -%}
|
||||
{% if rabbitmq_host -%}
|
||||
--rabbit_host={{ rabbitmq_host }}
|
||||
--rabbit_userid={{ rabbitmq_user }}
|
||||
--rabbit_password={{ rabbitmq_password }}
|
||||
--rabbit_virtual_host={{ rabbitmq_virtual_host }}
|
||||
{% endif -%}
|
||||
{% if glance_api_servers -%}
|
||||
--glance_api_servers={{ glance_api_servers }}
|
||||
{% endif -%}
|
||||
{% if rbd_pool -%}
|
||||
--rbd_pool={{ rbd_pool }}
|
||||
--rbd_user={{ rbd_user }}
|
||||
--rbd_secret_uuid={{ rbd_secret_uuid }}
|
||||
{% endif -%}
|
141
templates/folsom/etc_nova_api-paste.ini
Normal file
141
templates/folsom/etc_nova_api-paste.ini
Normal file
@ -0,0 +1,141 @@
|
||||
# folsom
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
############
|
||||
# Metadata #
|
||||
############
|
||||
[composite:metadata]
|
||||
use = egg:Paste#urlmap
|
||||
/: meta
|
||||
|
||||
[pipeline:meta]
|
||||
pipeline = ec2faultwrap logrequest metaapp
|
||||
|
||||
[app:metaapp]
|
||||
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
|
||||
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/services/Cloud: ec2cloud
|
||||
|
||||
[composite:ec2cloud]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
|
||||
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
|
||||
|
||||
[filter:ec2faultwrap]
|
||||
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:ec2keystoneauth]
|
||||
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[filter:validator]
|
||||
paste.filter_factory = nova.api.ec2:Validator.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi_compute]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: oscomputeversions
|
||||
/v1.1: openstack_compute_api_v2
|
||||
/v2: openstack_compute_api_v2
|
||||
|
||||
[composite:osapi_volume]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: osvolumeversions
|
||||
/v1: openstack_volume_api_v1
|
||||
|
||||
[composite:openstack_compute_api_v2]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
|
||||
|
||||
[composite:openstack_volume_api_v1]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth ratelimit osapi_volume_app_v1
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_volume_app_v1
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_volume_app_v1
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:osapi_compute_app_v2]
|
||||
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
|
||||
|
||||
[pipeline:oscomputeversions]
|
||||
pipeline = faultwrap oscomputeversionapp
|
||||
|
||||
[app:osapi_volume_app_v1]
|
||||
paste.app_factory = nova.api.openstack.volume:APIRouter.factory
|
||||
|
||||
[app:oscomputeversionapp]
|
||||
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
|
||||
|
||||
[pipeline:osvolumeversions]
|
||||
pipeline = faultwrap osvolumeversionapp
|
||||
|
||||
[app:osvolumeversionapp]
|
||||
paste.app_factory = nova.api.openstack.volume.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
{% if service_host -%}
|
||||
service_protocol = {{ service_protocol }}
|
||||
service_host = {{ service_host }}
|
||||
service_port = {{ service_port }}
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
{% endif -%}
|
||||
# signing_dir is configurable, but the default behavior of the authtoken
|
||||
# middleware should be sufficient. It will create a temporary directory
|
||||
# in the home directory for the user the nova process is running as.
|
||||
#signing_dir = /var/lib/nova/keystone-signing
|
||||
|
40
templates/folsom/etc_quantum_api-paste.ini
Normal file
40
templates/folsom/etc_quantum_api-paste.ini
Normal file
@ -0,0 +1,40 @@
|
||||
# folsom
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[composite:quantum]
|
||||
use = egg:Paste#urlmap
|
||||
/: quantumversions
|
||||
/v2.0: quantumapi_v2_0
|
||||
|
||||
[composite:quantumapi_v2_0]
|
||||
use = call:quantum.auth:pipeline_factory
|
||||
noauth = extensions quantumapiapp_v2_0
|
||||
keystone = authtoken keystonecontext extensions quantumapiapp_v2_0
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = quantum.auth:QuantumKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
{% if service_host -%}
|
||||
service_protocol = {{ service_protocol }}
|
||||
service_host = {{ service_host }}
|
||||
service_port = {{ service_port }}
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
{% endif -%}
|
||||
|
||||
[filter:extensions]
|
||||
paste.filter_factory = quantum.extensions.extensions:plugin_aware_extension_middleware_factory
|
||||
|
||||
[app:quantumversions]
|
||||
paste.app_factory = quantum.api.versions:Versions.factory
|
||||
|
||||
[app:quantumapiapp_v2_0]
|
||||
paste.app_factory = quantum.api.v2.router:APIRouter.factory
|
92
templates/folsom/nova.conf
Normal file
92
templates/folsom/nova.conf
Normal file
@ -0,0 +1,92 @@
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[DEFAULT]
|
||||
dhcpbridge_flagfile=/etc/nova/nova.conf
|
||||
dhcpbridge=/usr/bin/nova-dhcpbridge
|
||||
logdir=/var/log/nova
|
||||
state_path=/var/lib/nova
|
||||
lock_path=/var/lock/nova
|
||||
force_dhcp_release=True
|
||||
iscsi_helper=tgtadm
|
||||
libvirt_use_virtio_for_bridges=True
|
||||
connection_type=libvirt
|
||||
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
|
||||
verbose=True
|
||||
ec2_private_dns_show_ip=True
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
volumes_path=/var/lib/nova/volumes
|
||||
enabled_apis=ec2,osapi_compute,metadata
|
||||
auth_strategy=keystone
|
||||
compute_driver=libvirt.LibvirtDriver
|
||||
{% if keystone_ec2_url -%}
|
||||
keystone_ec2_url = {{ keystone_ec2_url }}
|
||||
{% endif -%}
|
||||
|
||||
{% if database_host -%}
|
||||
sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}
|
||||
{% endif -%}
|
||||
|
||||
{% if rabbitmq_host -%}
|
||||
rabbit_host = {{ rabbitmq_host }}
|
||||
rabbit_userid = {{ rabbitmq_user }}
|
||||
rabbit_password = {{ rabbitmq_password }}
|
||||
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
||||
{% endif -%}
|
||||
|
||||
{% if glance_api_servers -%}
|
||||
glance_api_servers = {{ glance_api_servers }}
|
||||
{% endif -%}
|
||||
|
||||
{% if rbd_pool -%}
|
||||
rbd_pool = {{ rbd_pool }}
|
||||
rbd_user = {{ rbd_user }}
|
||||
rbd_secret_uuid = {{ rbd_secret_uuid }}
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'ovs' -%}
|
||||
libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver
|
||||
libvirt_user_virtio_for_bridges = True
|
||||
{% if neutron_security_groups -%}
|
||||
security_group_api = {{ network_manager }}
|
||||
nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
{% if external_network -%}
|
||||
default_floating_pool = {{ external_network }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager_config -%}
|
||||
{% for key, value in network_manager_config.iteritems() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager and network_manager == 'quantum' -%}
|
||||
network_api_class = nova.network.quantumv2.api.API
|
||||
{% elif network_manager and network_manager == 'neutron' -%}
|
||||
network_api_class = nova.network.neutronv2.api.API
|
||||
{% else -%}
|
||||
network_manager = nova.network.manager.FlatDHCPManager
|
||||
{% endif -%}
|
||||
|
||||
{% if default_floating_pool -%}
|
||||
default_floating_pool = {{ default_floating_pool }}
|
||||
{% endif -%}
|
||||
|
||||
{% if volume_service -%}
|
||||
volume_api_class=nova.volume.cinder.API
|
||||
{% endif -%}
|
||||
|
||||
{% if user_config_flags -%}
|
||||
{% for key, value in user_config_flags.iteritems() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if listen_ports -%}
|
||||
{% for key, value in listen_ports.iteritems() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
23
templates/folsom/ovs_quantum_plugin.ini
Normal file
23
templates/folsom/ovs_quantum_plugin.ini
Normal file
@ -0,0 +1,23 @@
|
||||
# grizzly
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[OVS]
|
||||
tunnel_id_ranges = 1:1000
|
||||
tenant_network_type = gre
|
||||
enable_tunneling = True
|
||||
local_ip = {{ local_ip }}
|
||||
|
||||
[DATABASE]
|
||||
{% if database_host -%}
|
||||
sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}?quantum?charset=utf8
|
||||
reconnect_interval = 2
|
||||
{% else -%}
|
||||
connection = sqlite:////var/lib/quantum/quantum.sqlite
|
||||
{% endif -%}
|
||||
|
||||
[SECURITYGROUP]
|
||||
{% if neutron_security_groups -%}
|
||||
firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
{% endif -%}
|
47
templates/folsom/quantum.conf
Normal file
47
templates/folsom/quantum.conf
Normal file
@ -0,0 +1,47 @@
|
||||
# grizzly
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[DEFAULT]
|
||||
state_path = /var/lib/quantum
|
||||
lock_path = $state_path/lock
|
||||
bind_host = 0.0.0.0
|
||||
{% if neutron_bind_port -%}
|
||||
bind_port = {{ neutron_bind_port }}
|
||||
{% else -%}
|
||||
bind_port = 9696
|
||||
{% endif -%}
|
||||
{% if core_plugin -%}
|
||||
core_plugin = {{ core_plugin }}
|
||||
{% endif -%}
|
||||
api_paste_config = /etc/quantum/api-paste.ini
|
||||
auth_strategy = keystone
|
||||
control_exchange = quantum
|
||||
notification_driver = quantum.openstack.common.notifier.rpc_notifier
|
||||
default_notification_level = INFO
|
||||
notification_topics = notifications
|
||||
{% if rabbitmq_host -%}
|
||||
rabbit_host = {{ rabbitmq_host }}
|
||||
rabbit_userid = {{ rabbitmq_user }}
|
||||
rabbit_password = {{ rabbitmq_password }}
|
||||
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
||||
{% endif -%}
|
||||
{% if neutron_security_groups -%}
|
||||
allow_overlapping_ips = True
|
||||
{% endif -%}
|
||||
|
||||
|
||||
[QUOTAS]
|
||||
quota_driver = quantum.db.quota_db.DbQuotaDriver
|
||||
{% if neutron_security_groups -%}
|
||||
quota_items = network,subnet,port,security_group,security_group_rule
|
||||
{% endif -%}
|
||||
|
||||
[DEFAULT_SERVICETYPE]
|
||||
|
||||
[AGENT]
|
||||
root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf
|
||||
|
||||
[keystone_authtoken]
|
||||
# auth_token middleware currently set in /etc/quantum/api-paste.ini
|
124
templates/grizzly/etc_nova_api-paste.ini
Normal file
124
templates/grizzly/etc_nova_api-paste.ini
Normal file
@ -0,0 +1,124 @@
|
||||
# grizzly
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
############
|
||||
# Metadata #
|
||||
############
|
||||
[composite:metadata]
|
||||
use = egg:Paste#urlmap
|
||||
/: meta
|
||||
|
||||
[pipeline:meta]
|
||||
pipeline = ec2faultwrap logrequest metaapp
|
||||
|
||||
[app:metaapp]
|
||||
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
|
||||
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/services/Cloud: ec2cloud
|
||||
|
||||
[composite:ec2cloud]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
|
||||
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
|
||||
|
||||
[filter:ec2faultwrap]
|
||||
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:ec2keystoneauth]
|
||||
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[filter:validator]
|
||||
paste.filter_factory = nova.api.ec2:Validator.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi_compute]
|
||||
use = call:nova.api.openstack.urlmap:urlmap_factory
|
||||
/: oscomputeversions
|
||||
/v1.1: openstack_compute_api_v2
|
||||
/v2: openstack_compute_api_v2
|
||||
|
||||
[composite:openstack_compute_api_v2]
|
||||
use = call:nova.api.auth:pipeline_factory
|
||||
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
|
||||
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
|
||||
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:sizelimit]
|
||||
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
|
||||
|
||||
[app:osapi_compute_app_v2]
|
||||
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
|
||||
|
||||
[pipeline:oscomputeversions]
|
||||
pipeline = faultwrap oscomputeversionapp
|
||||
|
||||
[app:oscomputeversionapp]
|
||||
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
{% if service_host -%}
|
||||
service_protocol = {{ service_protocol }}
|
||||
service_host = {{ service_host }}
|
||||
service_port = {{ service_port }}
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
{% endif -%}
|
||||
# signing_dir is configurable, but the default behavior of the authtoken
|
||||
# middleware should be sufficient. It will create a temporary directory
|
||||
# in the home directory for the user the nova process is running as.
|
||||
#signing_dir = /var/lib/nova/keystone-signing
|
||||
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
|
||||
auth_version = v2.0
|
||||
|
||||
|
37
templates/grizzly/etc_quantum_api-paste.ini
Normal file
37
templates/grizzly/etc_quantum_api-paste.ini
Normal file
@ -0,0 +1,37 @@
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[composite:quantum]
|
||||
use = egg:Paste#urlmap
|
||||
/: quantumversions
|
||||
/v2.0: quantumapi_v2_0
|
||||
|
||||
[composite:quantumapi_v2_0]
|
||||
use = call:quantum.auth:pipeline_factory
|
||||
noauth = extensions quantumapiapp_v2_0
|
||||
keystone = authtoken keystonecontext extensions quantumapiapp_v2_0
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = quantum.auth:QuantumKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
{% if service_host -%}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = http
|
||||
{% endif -%}
|
||||
|
||||
[filter:extensions]
|
||||
paste.filter_factory = quantum.api.extensions:plugin_aware_extension_middleware_factory
|
||||
|
||||
[app:quantumversions]
|
||||
paste.app_factory = quantum.api.versions:Versions.factory
|
||||
|
||||
[app:quantumapiapp_v2_0]
|
||||
paste.app_factory = quantum.api.v2.router:APIRouter.factory
|
||||
|
56
templates/havana/neutron.conf
Normal file
56
templates/havana/neutron.conf
Normal file
@ -0,0 +1,56 @@
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[DEFAULT]
|
||||
state_path = /var/lib/neutron
|
||||
lock_path = $state_path/lock
|
||||
bind_host = 0.0.0.0
|
||||
auth_strategy = keystone
|
||||
notification_driver = neutron.openstack.common.notifier.rpc_notifier
|
||||
{% if core_plugin -%}
|
||||
core_plugin = {{ core_plugin }}
|
||||
{% endif -%}
|
||||
{% if neutron_security_groups -%}
|
||||
allow_overlapping_ips = True
|
||||
neutron_firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
{% endif -%}
|
||||
{% if rabbitmq_host -%}
|
||||
rabbit_host = {{ rabbitmq_host }}
|
||||
rabbit_userid = {{ rabbitmq_user }}
|
||||
rabbit_password = {{ rabbitmq_password }}
|
||||
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
||||
{% endif -%}
|
||||
|
||||
[quotas]
|
||||
quota_driver = neutron.db.quota_db.DbQuotaDriver
|
||||
{% if neutron_security_groups -%}
|
||||
quota_items = network,subnet,port,security_group,security_group_rule
|
||||
{% endif -%}
|
||||
|
||||
[agent]
|
||||
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
|
||||
|
||||
[keystone_authtoken]
|
||||
signing_dir = $state_path/keystone-signing
|
||||
{% if service_host -%}
|
||||
service_protocol = {{ service_protocol }}
|
||||
service_host = {{ service_host }}
|
||||
service_port = {{ service_port }}
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
{% endif -%}
|
||||
|
||||
[database]
|
||||
{% if database_host -%}
|
||||
connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}
|
||||
{% else -%}
|
||||
connection = sqlite:////var/lib/neutron/neutron.sqlite
|
||||
{% endif -%}
|
||||
|
||||
[lbaas]
|
||||
[service_providers]
|
34
templates/havana/ovs_neutron_plugin.ini
Normal file
34
templates/havana/ovs_neutron_plugin.ini
Normal file
@ -0,0 +1,34 @@
|
||||
[OVS]
|
||||
tunnel_id_ranges = 1:1000
|
||||
tenant_network_type = gre
|
||||
enable_tunneling = True
|
||||
local_ip = {{ local_ip }}
|
||||
|
||||
[database]
|
||||
{% if database_host -%}
|
||||
connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}
|
||||
{% else -%}
|
||||
connection = sqlite:////var/lib/neutron/neutron.sqlite
|
||||
{% endif -%}
|
||||
|
||||
[securitygroup]
|
||||
{% if neutron_security_groups -%}
|
||||
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
{% else -%}
|
||||
firewall_driver = neutron.agent.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
|
||||
[keystone_authtoken]
|
||||
signing_dir = $state_path/keystone-signing
|
||||
{% if service_host -%}
|
||||
service_protocol = {{ service_protocol }}
|
||||
service_host = {{ service_host }}
|
||||
service_port = {{ service_port }}
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
signing_dir = $state_path/keystone-signing
|
||||
{% endif -%}
|
@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
. novarc
|
||||
image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz"
|
||||
[[ ! -e $image ]] && wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image
|
||||
uec-publish-tarball $image images
|
||||
euca-describe-images
|
@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
apt-get -y install unzip
|
||||
|
||||
nova-manage user admin admin
|
||||
echo "User creation: $?"
|
||||
nova-manage project create novaproject admin
|
||||
echo "Project creation: $?"
|
||||
nova-manage network create novanet 11.0.0.0/24 1 255
|
||||
echo "Network creation: $?"
|
||||
nova-manage project zipfile novaproject admin
|
||||
echo "Zipfile creation: $?"
|
||||
unzip nova.zip
|
2
unit_tests/__init__.py
Normal file
2
unit_tests/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
import sys
|
||||
sys.path.append('hooks/')
|
78
unit_tests/test_nova_cc_hooks.py
Normal file
78
unit_tests/test_nova_cc_hooks.py
Normal file
@ -0,0 +1,78 @@
|
||||
from mock import MagicMock, patch
|
||||
|
||||
from test_utils import CharmTestCase
|
||||
|
||||
import nova_cc_utils as utils
|
||||
|
||||
_reg = utils.register_configs
|
||||
_map = utils.restart_map
|
||||
|
||||
utils.register_configs = MagicMock()
|
||||
utils.restart_map = MagicMock()
|
||||
|
||||
import nova_cc_hooks as hooks
|
||||
|
||||
utils.register_configs = _reg
|
||||
utils.restart_map = _map
|
||||
|
||||
|
||||
TO_PATCH = [
|
||||
'apt_update',
|
||||
'apt_install',
|
||||
'configure_installation_source',
|
||||
'charm_dir',
|
||||
'do_openstack_upgrade',
|
||||
'openstack_upgrade_available',
|
||||
'config',
|
||||
'determine_packages',
|
||||
'determine_ports',
|
||||
'open_port',
|
||||
'relation_get',
|
||||
'relation_set',
|
||||
'ssh_compute_add',
|
||||
'ssh_known_hosts_b64',
|
||||
'ssh_authorized_keys_b64',
|
||||
'save_script_rc',
|
||||
'execd_preinstall'
|
||||
]
|
||||
|
||||
|
||||
class NovaCCHooksTests(CharmTestCase):
|
||||
def setUp(self):
|
||||
super(NovaCCHooksTests, self).setUp(hooks, TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
self.relation_get.side_effect = self.test_relation.get
|
||||
self.charm_dir.return_value = '/var/lib/juju/charms/nova/charm'
|
||||
|
||||
def test_install_hook(self):
|
||||
self.determine_packages.return_value = [
|
||||
'nova-scheduler', 'nova-api-ec2']
|
||||
self.determine_ports.return_value = [80, 81, 82]
|
||||
hooks.install()
|
||||
self.apt_install.assert_called_with(
|
||||
['nova-scheduler', 'nova-api-ec2'], fatal=True)
|
||||
self.execd_preinstall.assert_called()
|
||||
|
||||
@patch.object(hooks, 'configure_https')
|
||||
def test_config_changed_no_upgrade(self, conf_https):
|
||||
self.openstack_upgrade_available.return_value = False
|
||||
hooks.config_changed()
|
||||
self.assertTrue(self.save_script_rc.called)
|
||||
|
||||
@patch.object(hooks, 'configure_https')
|
||||
def test_config_changed_with_upgrade(self, conf_https):
|
||||
self.openstack_upgrade_available.return_value = True
|
||||
hooks.config_changed()
|
||||
self.assertTrue(self.do_openstack_upgrade.called)
|
||||
self.assertTrue(self.save_script_rc.called)
|
||||
|
||||
def test_compute_changed_ssh_migration(self):
|
||||
self.test_relation.set({
|
||||
'migration_auth_type': 'ssh', 'ssh_public_key': 'fookey',
|
||||
'private-address': '10.0.0.1'})
|
||||
self.ssh_known_hosts_b64.return_value = 'hosts'
|
||||
self.ssh_authorized_keys_b64.return_value = 'keys'
|
||||
hooks.compute_changed()
|
||||
self.ssh_compute_add.assert_called_with('fookey')
|
||||
self.relation_set.assert_called_with(known_hosts='hosts',
|
||||
authorized_keys='keys')
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user