Reference charmhelpers from openstack-charmers

This commit is contained in:
Andres Rodriguez 2013-06-24 12:05:57 -04:00
parent 4aa3873ecb
commit dcc51c5181
16 changed files with 2191 additions and 1 deletions

View File

@ -1,4 +1,4 @@
branch: lp:charm-helpers
branch: lp:~openstack-charmers/charm-tools/pyrewrite-helpers
destination: hooks/charmhelpers
include:
- core

View File

View File

View File

@ -0,0 +1,196 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from utils import (
relation_ids,
relation_list,
relation_get,
render_template,
juju_log,
config_get,
install,
get_host_ip,
restart
)
from cluster_utils import https
import os
import subprocess
from base64 import b64decode
APACHE_SITE_DIR = "/etc/apache2/sites-available"
SITE_TEMPLATE = "apache2_site.tmpl"
RELOAD_CHECK = "To activate the new configuration"
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
juju_log('INFO',
"Inspecting identity-service relations for SSL certificate.")
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
juju_log('INFO',
"Inspecting identity-service relations for CA SSL certificate.")
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])
def enable_https(port_maps, namespace, cert, key, ca_cert=None):
'''
For a given number of port mappings, configures apache2
HTTPs local reverse proxying using certficates and keys provided in
either configuration data (preferred) or relation data. Assumes ports
are not in use (calling charm should ensure that).
port_maps: dict: external to internal port mappings
namespace: str: name of charm
'''
def _write_if_changed(path, new_content):
content = None
if os.path.exists(path):
with open(path, 'r') as f:
content = f.read().strip()
if content != new_content:
with open(path, 'w') as f:
f.write(new_content)
return True
else:
return False
juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps))
http_restart = False
if cert:
cert = b64decode(cert)
if key:
key = b64decode(key)
if ca_cert:
ca_cert = b64decode(ca_cert)
if not cert and not key:
juju_log('ERROR',
"Expected but could not find SSL certificate data, not "
"configuring HTTPS!")
return False
install('apache2')
if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl',
'proxy', 'proxy_http']):
http_restart = True
ssl_dir = os.path.join('/etc/apache2/ssl', namespace)
if not os.path.exists(ssl_dir):
os.makedirs(ssl_dir)
if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)):
http_restart = True
if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)):
http_restart = True
os.chmod(os.path.join(ssl_dir, 'key'), 0600)
install_ca_cert(ca_cert)
sites_dir = '/etc/apache2/sites-available'
for ext_port, int_port in port_maps.items():
juju_log('INFO',
'Creating apache2 reverse proxy vhost'
' for {}:{}'.format(ext_port,
int_port))
site = "{}_{}".format(namespace, ext_port)
site_path = os.path.join(sites_dir, site)
with open(site_path, 'w') as fsite:
context = {
"ext": ext_port,
"int": int_port,
"namespace": namespace,
"private_address": get_host_ip()
}
fsite.write(render_template(SITE_TEMPLATE,
context))
if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]):
http_restart = True
if http_restart:
restart('apache2')
return True
def disable_https(port_maps, namespace):
'''
Ensure HTTPS reverse proxying is disables for given port mappings
port_maps: dict: of ext -> int port mappings
namespace: str: name of chamr
'''
juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))
if (not os.path.exists('/etc/apache2') or
not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
return
http_restart = False
for ext_port in port_maps.keys():
if os.path.exists(os.path.join(APACHE_SITE_DIR,
"{}_{}".format(namespace,
ext_port))):
juju_log('INFO',
"Disabling HTTPS reverse proxy"
" for {} {}.".format(namespace,
ext_port))
if (RELOAD_CHECK in
subprocess.check_output(['a2dissite',
'{}_{}'.format(namespace,
ext_port)])):
http_restart = True
if http_restart:
restart(['apache2'])
def setup_https(port_maps, namespace, cert, key, ca_cert=None):
'''
Ensures HTTPS is either enabled or disabled for given port
mapping.
port_maps: dict: of ext -> int port mappings
namespace: str: name of charm
'''
if not https:
disable_https(port_maps, namespace)
else:
enable_https(port_maps, namespace, cert, key, ca_cert)

View File

@ -0,0 +1,256 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import commands
import subprocess
import os
import shutil
import utils
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
KEYFILE = '/etc/ceph/ceph.client.%s.key'
CEPH_CONF = """[global]
auth supported = %(auth)s
keyring = %(keyring)s
mon host = %(mon_hosts)s
"""
def execute(cmd):
subprocess.check_call(cmd)
def execute_shell(cmd):
subprocess.check_call(cmd, shell=True)
def install():
ceph_dir = "/etc/ceph"
if not os.path.isdir(ceph_dir):
os.mkdir(ceph_dir)
utils.install('ceph-common')
def rbd_exists(service, pool, rbd_img):
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %\
(service, pool))
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
execute(cmd)
def pool_exists(service, name):
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
return name in out
def create_pool(service, name):
cmd = [
'rados',
'--id',
service,
'mkpool',
name
]
execute(cmd)
def keyfile_path(service):
return KEYFILE % service
def keyring_path(service):
return KEYRING % service
def create_keyring(service, key):
keyring = keyring_path(service)
if os.path.exists(keyring):
utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.%s' % service,
'--add-key=%s' % key
]
execute(cmd)
utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
def create_key_file(service, key):
# create a file containing the key
keyfile = keyfile_path(service)
if os.path.exists(keyfile):
utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
fd = open(keyfile, 'w')
fd.write(key)
fd.close()
utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
def get_ceph_nodes():
hosts = []
for r_id in utils.relation_ids('ceph'):
for unit in utils.relation_list(r_id):
hosts.append(utils.relation_get('private-address',
unit=unit, rid=r_id))
return hosts
def configure(service, key, auth):
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
mon_hosts = ",".join(map(str, hosts))
keyring = keyring_path(service)
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF % locals())
modprobe_kernel_module('rbd')
def image_mapped(image_name):
(rc, out) = commands.getstatusoutput('rbd showmapped')
return image_name in out
def map_block_storage(service, pool, image):
cmd = [
'rbd',
'map',
'%s/%s' % (pool, image),
'--user',
service,
'--secret',
keyfile_path(service),
]
execute(cmd)
def filesystem_mounted(fs):
return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
def make_filesystem(blk_device, fstype='ext4'):
utils.juju_log('INFO',
'ceph: Formatting block device %s as filesystem %s.' %\
(blk_device, fstype))
cmd = ['mkfs', '-t', fstype, blk_device]
execute(cmd)
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
# mount block device into /mnt
cmd = ['mount', '-t', fstype, blk_device, '/mnt']
execute(cmd)
# copy data to /mnt
try:
copy_files(data_src_dst, '/mnt')
except:
pass
# umount block device
cmd = ['umount', '/mnt']
execute(cmd)
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
cmd = ['mount', '-t', fstype, blk_device, data_src_dst]
execute(cmd)
# ensure original ownership of new mount.
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
execute(cmd)
# TODO: re-use
def modprobe_kernel_module(module):
utils.juju_log('INFO', 'Loading kernel module')
cmd = ['modprobe', module]
execute(cmd)
cmd = 'echo %s >> /etc/modules' % module
execute_shell(cmd)
def copy_files(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
To be called from the current cluster leader.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being remounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if utils.running(svc):
utils.juju_log('INFO',
'Stopping services %s prior to migrating '\
'data' % svc)
utils.stop(svc)
place_data_on_ceph(service, blk_device, mount_point, fstype)
for svc in system_services:
utils.start(svc)

View File

@ -0,0 +1,130 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from utils import (
juju_log,
relation_ids,
relation_list,
relation_get,
get_unit_hostname,
config_get
)
import subprocess
import os
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
juju_log('INFO', 'Deferring action to CRM leader.')
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
juju_log('INFO', 'Deferring action to oldest service unit.')
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if (relation_get('https_keystone', rid=r_id, unit=unit) and
relation_get('ssl_cert', rid=r_id, unit=unit) and
relation_get('ssl_key', rid=r_id, unit=unit) and
relation_get('ca_cert', rid=r_id, unit=unit)):
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_haproxy_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
i += 1
return public_port - (i * 10)

View File

@ -0,0 +1,55 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
from utils import (
relation_ids,
relation_list,
relation_get,
unit_get,
reload,
render_template
)
import os
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
HAPROXY_DEFAULT = '/etc/default/haproxy'
def configure_haproxy(service_ports):
'''
Configure HAProxy based on the current peers in the service
cluster using the provided port map:
"swift": [ 8080, 8070 ]
HAproxy will also be reloaded/started if required
service_ports: dict: dict of lists of [ frontend, backend ]
'''
cluster_hosts = {}
cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
unit_get('private-address')
for r_id in relation_ids('cluster'):
for unit in relation_list(r_id):
cluster_hosts[unit.replace('/', '-')] = \
relation_get(attribute='private-address',
rid=r_id,
unit=unit)
context = {
'units': cluster_hosts,
'service_ports': service_ports
}
with open(HAPROXY_CONF, 'w') as f:
f.write(render_template(os.path.basename(HAPROXY_CONF),
context))
with open(HAPROXY_DEFAULT, 'w') as f:
f.write('ENABLED=1')
reload('haproxy')

View File

@ -0,0 +1,333 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Paul Collins <paul.collins@canonical.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import json
import os
import subprocess
import socket
import sys
def do_hooks(hooks):
hook = os.path.basename(sys.argv[0])
try:
hook_func = hooks[hook]
except KeyError:
juju_log('INFO',
"This charm doesn't know how to handle '{}'.".format(hook))
else:
hook_func()
def install(*pkgs):
cmd = [
'apt-get',
'-y',
'install'
]
for pkg in pkgs:
cmd.append(pkg)
subprocess.check_call(cmd)
TEMPLATES_DIR = 'templates'
try:
import jinja2
except ImportError:
install('python-jinja2')
import jinja2
try:
import dns.resolver
except ImportError:
install('python-dnspython')
import dns.resolver
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
CLOUD_ARCHIVE = \
""" # Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
CLOUD_ARCHIVE_POCKETS = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly'
}
def configure_source():
source = str(config_get('openstack-origin'))
if not source:
return
if source.startswith('ppa:'):
cmd = [
'add-apt-repository',
source
]
subprocess.check_call(cmd)
if source.startswith('cloud:'):
# CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
# cloud:precise-folsom/updates or cloud:precise-folsom/proposed
install('ubuntu-cloud-keyring')
pocket = source.split(':')[1]
pocket = pocket.split('-')[1]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
if source.startswith('deb'):
l = len(source.split('|'))
if l == 2:
(apt_line, key) = source.split('|')
cmd = [
'apt-key',
'adv', '--keyserver keyserver.ubuntu.com',
'--recv-keys', key
]
subprocess.check_call(cmd)
elif l == 1:
apt_line = source
with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
apt.write(apt_line + "\n")
cmd = [
'apt-get',
'update'
]
subprocess.check_call(cmd)
# Protocols
TCP = 'TCP'
UDP = 'UDP'
def expose(port, protocol='TCP'):
cmd = [
'open-port',
'{}/{}'.format(port, protocol)
]
subprocess.check_call(cmd)
def juju_log(severity, message):
cmd = [
'juju-log',
'--log-level', severity,
message
]
subprocess.check_call(cmd)
cache = {}
def cached(func):
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
@cached
def relation_ids(relation):
cmd = [
'relation-ids',
relation
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_list(rid):
cmd = [
'relation-list',
'-r', rid,
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
else:
return result
@cached
def relation_get(attribute, unit=None, rid=None):
cmd = [
'relation-get',
]
if rid:
cmd.append('-r')
cmd.append(rid)
cmd.append(attribute)
if unit:
cmd.append(unit)
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def relation_get_dict(relation_id=None, remote_unit=None):
"""Obtain all relation data as dict by way of JSON"""
cmd = [
'relation-get', '--format=json'
]
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
if remote_unit:
remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None)
os.environ['JUJU_REMOTE_UNIT'] = remote_unit
j = subprocess.check_output(cmd)
if remote_unit and remote_unit_orig:
os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig
d = json.loads(j)
settings = {}
# convert unicode to strings
for k, v in d.iteritems():
settings[str(k)] = str(v)
return settings
def relation_set(**kwargs):
cmd = [
'relation-set'
]
args = []
for k, v in kwargs.items():
if k == 'rid':
if v:
cmd.append('-r')
cmd.append(v)
else:
args.append('{}={}'.format(k, v))
cmd += args
subprocess.check_call(cmd)
@cached
def unit_get(attribute):
cmd = [
'unit-get',
attribute
]
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
else:
return value
@cached
def config_get(attribute):
cmd = [
'config-get',
'--format',
'json',
]
out = subprocess.check_output(cmd).strip() # IGNORE:E1103
cfg = json.loads(out)
try:
return cfg[attribute]
except KeyError:
return None
@cached
def get_unit_hostname():
return socket.gethostname()
@cached
def get_host_ip(hostname=None):
hostname = hostname or unit_get('private-address')
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
return None
def _svc_control(service, action):
subprocess.check_call(['service', service, action])
def restart(*services):
for service in services:
_svc_control(service, 'restart')
def stop(*services):
for service in services:
_svc_control(service, 'stop')
def start(*services):
for service in services:
_svc_control(service, 'start')
def reload(*services):
for service in services:
try:
_svc_control(service, 'reload')
except subprocess.CalledProcessError:
# Reload failed - either service does not support reload
# or it was not running - restart will fixup most things
_svc_control(service, 'restart')
def running(service):
try:
output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or
"is running" in output):
return True
else:
return False
def is_relation_made(relation, key='private-address'):
for r_id in (relation_ids(relation) or []):
for unit in (relation_list(r_id) or []):
if relation_get(key, rid=r_id, unit=unit):
return True
return False

View File

@ -0,0 +1,181 @@
from charmhelpers.core.hookenv import (
config,
local_unit,
log,
relation_get,
relation_ids,
related_units,
unit_get,
)
class OSContextError(Exception):
pass
def context_complete(ctxt):
_missing = []
for k, v in ctxt.iteritems():
if v is None or v == '':
_missing.append(k)
if _missing:
print 'Missing required data: %s' % ' '.join(_missing)
return False
return True
class OSContextGenerator(object):
interfaces = []
def __call__(self):
raise NotImplementedError
class SharedDBContext(OSContextGenerator):
interfaces = ['shared-db']
def __call__(self):
log('Generating template context for shared-db')
conf = config()
try:
database = conf['database']
username = conf['database-user']
except KeyError as e:
log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e)
raise OSContextError
ctxt = {}
for rid in relation_ids('shared-db'):
for unit in related_units(rid):
ctxt = {
'database_host': relation_get('db_host', rid=rid,
unit=unit),
'database': database,
'database_user': username,
'database_password': relation_get('password', rid=rid,
unit=unit)
}
if not context_complete(ctxt):
return {}
return ctxt
class IdentityServiceContext(OSContextGenerator):
interfaces = ['identity-service']
def __call__(self):
log('Generating template context for identity-service')
ctxt = {}
for rid in relation_ids('identity-service'):
for unit in related_units(rid):
ctxt = {
'service_port': relation_get('service_port', rid=rid,
unit=unit),
'service_host': relation_get('service_host', rid=rid,
unit=unit),
'auth_host': relation_get('auth_host', rid=rid, unit=unit),
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
'admin_tenant_name': relation_get('service_tenant',
rid=rid, unit=unit),
'admin_user': relation_get('service_username', rid=rid,
unit=unit),
'admin_password': relation_get('service_password', rid=rid,
unit=unit),
# XXX: Hard-coded http.
'service_protocol': 'http',
'auth_protocol': 'http',
}
if not context_complete(ctxt):
return {}
return ctxt
class AMQPContext(OSContextGenerator):
interfaces = ['amqp']
def __call__(self):
log('Generating template context for amqp')
conf = config()
try:
username = conf['rabbit-user']
vhost = conf['rabbit-vhost']
except KeyError as e:
log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e)
raise OSContextError
ctxt = {}
for rid in relation_ids('amqp'):
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
rabbitmq_host = relation_get('vip', rid=rid, unit=unit)
else:
rabbitmq_host = relation_get('private-address',
rid=rid, unit=unit)
ctxt = {
'rabbitmq_host': rabbitmq_host,
'rabbitmq_user': username,
'rabbitmq_password': relation_get('password', rid=rid,
unit=unit),
'rabbitmq_virtual_host': vhost,
}
if not context_complete(ctxt):
return {}
return ctxt
class CephContext(OSContextGenerator):
interfaces = ['ceph']
def __call__(self):
'''This generates context for /etc/ceph/ceph.conf templates'''
log('Generating tmeplate context for ceph')
mon_hosts = []
auth = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit)
ctxt = {
'mon_hosts': ' '.join(mon_hosts),
'auth': auth,
}
if not context_complete(ctxt):
return {}
return ctxt
class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
def __call__(self):
'''
Builds half a context for the haproxy template, which describes
all peers to be included in the cluster. Each charm needs to include
its own context generator that describes the port mapping.
'''
if not relation_ids('cluster'):
return {}
cluster_hosts = {}
l_unit = local_unit().replace('/', '-')
cluster_hosts[l_unit] = unit_get('private-address')
for rid in relation_ids('cluster'):
for unit in related_units(rid):
_unit = unit.replace('/', '-')
addr = relation_get('private-address', rid=rid, unit=unit)
cluster_hosts[_unit] = addr
ctxt = {
'units': cluster_hosts,
}
if len(cluster_hosts.keys()) > 1:
# Enable haproxy when we have enough peers.
log('Ensuring haproxy enabled in /etc/default/haproxy.')
with open('/etc/default/haproxy', 'w') as out:
out.write('ENABLED=1\n')
return ctxt

View File

@ -0,0 +1,228 @@
#!/usr/bin/python
# Common python helper functions used for OpenStack charms.
import apt_pkg as apt
import subprocess
import os
import sys
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
ubuntu_openstack_release = {
'oneiric': 'diablo',
'precise': 'essex',
'quantal': 'folsom',
'raring': 'grizzly',
}
openstack_codenames = {
'2011.2': 'diablo',
'2012.1': 'essex',
'2012.2': 'folsom',
'2013.1': 'grizzly',
'2013.2': 'havana',
}
# The ugly duckling
swift_codenames = {
'1.4.3': 'diablo',
'1.4.8': 'essex',
'1.7.4': 'folsom',
'1.7.6': 'grizzly',
'1.7.7': 'grizzly',
'1.8.0': 'grizzly',
}
def juju_log(msg):
subprocess.check_call(['juju-log', msg])
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg)
sys.exit(1)
def lsb_release():
'''Return /etc/lsb-release in a dict'''
lsb = open('/etc/lsb-release', 'r')
d = {}
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
return d
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src == 'distro':
try:
rel = ubuntu_openstack_release[ubuntu_rel]
except KeyError:
e = 'Could not derive openstack release for '\
'this Ubuntu release: %s' % ubuntu_rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in openstack_codenames.iteritems():
if v in src:
return v
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return openstack_codenames[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in openstack_codenames.iteritems():
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(pkg):
'''Derive OpenStack release codename from an installed package.'''
apt.init()
cache = apt.Cache()
try:
pkg = cache[pkg]
except:
e = 'Could not determine version of installed package: %s' % pkg
error_out(e)
vers = apt.UpstreamVersion(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
vers = vers[:5]
return swift_codenames[vers]
else:
vers = vers[:6]
return openstack_codenames[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg)
if 'swift' in pkg:
vers_map = swift_codenames
else:
vers_map = openstack_codenames
for version, cname in vers_map.iteritems():
if cname == codename:
return version
#e = "Could not determine OpenStack version for package: %s" % pkg
#error_out(e)
def import_key(keyid):
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
def configure_installation_source(rel):
'''Configure apt installation source.'''
if rel == 'distro':
return
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
import_key(key)
elif l == 1:
src = rel
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly'
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
# TODO: Replace key import with cloud archive keyring pkg.
import_key(CLOUD_ARCHIVE_KEY_ID)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-')
juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path)
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]

View File

@ -0,0 +1,210 @@
import logging
import os
try:
import jinja2
except ImportError:
pass
logging.basicConfig(level=logging.INFO)
"""
WIP Abstract templating system for the OpenStack charms.
The idea is that an openstack charm can register a number of config files
associated with common context generators. The context generators are
responsible for inspecting charm config/relation data/deployment state
and presenting correct context to the template. Generic context generators
could live somewhere in charmhelpers.contrib.openstack, and each
charm can implement their own specific ones as well.
Ideally a charm would register all its config files somewhere in its namespace,
eg cinder_utils.py:
from charmhelpers.contrib.openstack import templating, context
config_files = {
'/etc/cinder/cinder.conf': [context.shared_db,
context.amqp,
context.ceph],
'/etc/cinder/api-paste.ini': [context.identity_service]
}
configs = templating.OSConfigRenderer(template_dir='templates/')
[configs.register(k, v) for k, v in config_files.iteritems()]
Hooks can then render config files as need, eg:
def config_changed():
configs.render_all()
def db_changed():
configs.render('/etc/cinder/cinder.conf')
check_call(['cinder-manage', 'db', 'sync'])
This would look very similar for nova/glance/etc.
The OSTemplteLoader is responsible for creating a jinja2.ChoiceLoader that
should help reduce fragmentation of a charms' templates across OpenStack
releases, so we do not need to maintain many copies of templates or juggle
symlinks. The constructed loader lets the template be loaded from the most
recent OS release-specific template dir or a base template dir.
For example, say cinder has no changes in config structure across any OS
releases, all OS releases share the same templates from the base directory:
templates/api-paste.ini
templates/cinder.conf
Then, since Grizzly and beyond, cinder.conf's format has changed:
templates/api-paste.ini
templates/cinder.conf
templates/grizzly/cinder.conf
Grizzly and beyond will load from templates/grizzly, but any release prior will
load from templates/. If some change in Icehouse breaks config format again:
templates/api-paste.ini
templates/cinder.conf
templates/grizzly/cinder.conf
templates/icehouse/cinder.conf
Icehouse and beyond will load from icehouse/, Grizzly + Havan from grizzly/,
previous releases from the base templates/
"""
class OSConfigException(Exception):
pass
def get_loader(templates_dir, os_release):
"""
Create a jinja2.ChoiceLoader containing template dirs up to
and including os_release. If directory template directory
is missing at templates_dir, it will be omitted from the loader.
templates_dir is added to the bottom of the search list as a base
loading dir.
:param templates_dir: str: Base template directory containing release
sub-directories.
:param os_release : str: OpenStack release codename to construct template
loader.
:returns : jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
"""
tmpl_dirs = (
('essex', os.path.join(templates_dir, 'essex')),
('folsom', os.path.join(templates_dir, 'folsom')),
('grizzly', os.path.join(templates_dir, 'grizzly')),
('havana', os.path.join(templates_dir, 'havana')),
('icehouse', os.path.join(templates_dir, 'icehouse')),
)
if not os.path.isdir(templates_dir):
logging.error('Templates directory not found @ %s.' % templates_dir)
raise OSConfigException
loaders = [jinja2.FileSystemLoader(templates_dir)]
for rel, tmpl_dir in tmpl_dirs:
if os.path.isdir(tmpl_dir):
loaders.insert(0, jinja2.FileSystemLoader(tmpl_dir))
if rel == os_release:
break
logging.info('Creating choice loader with dirs: %s' %
[l.searchpath for l in loaders])
return jinja2.ChoiceLoader(loaders)
class OSConfigTemplate(object):
def __init__(self, config_file, contexts):
self.config_file = config_file
if hasattr(contexts, '__call__'):
self.contexts = [contexts]
else:
self.contexts = contexts
self._complete_contexts = []
def context(self):
ctxt = {}
for context in self.contexts:
_ctxt = context()
if _ctxt:
ctxt.update(_ctxt)
# track interfaces for every complete context.
[self._complete_contexts.append(interface)
for interface in context.interfaces
if interface not in self._complete_contexts]
return ctxt
def complete_contexts(self):
'''
Return a list of interfaces that have atisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
self.context()
return self._complete_contexts
class OSConfigRenderer(object):
def __init__(self, templates_dir, openstack_release):
if not os.path.isdir(templates_dir):
logging.error('Could not locate templates dir %s' % templates_dir)
raise OSConfigException
self.templates_dir = templates_dir
self.openstack_release = openstack_release
self.templates = {}
self._tmpl_env = None
def register(self, config_file, contexts):
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
contexts=contexts)
logging.info('Registered config file: %s' % config_file)
def _get_tmpl_env(self):
if not self._tmpl_env:
loader = get_loader(self.templates_dir, self.openstack_release)
self._tmpl_env = jinja2.Environment(loader=loader)
def render(self, config_file):
if config_file not in self.templates:
logging.error('Config not registered: %s' % config_file)
raise OSConfigException
ctxt = self.templates[config_file].context()
_tmpl = os.path.basename(config_file)
logging.info('Rendering from template: %s' % _tmpl)
self._get_tmpl_env()
_tmpl = self._tmpl_env.get_template(_tmpl)
logging.info('Loaded template from %s' % _tmpl.filename)
return _tmpl.render(ctxt)
def write(self, config_file):
if config_file not in self.templates:
logging.error('Config not registered: %s' % config_file)
raise OSConfigException
with open(config_file, 'wb') as out:
out.write(self.render(config_file))
logging.info('Wrote template %s.' % config_file)
def write_all(self):
[self.write(k) for k in self.templates.iterkeys()]
def complete_contexts(self):
'''
Returns a list of context interfaces that yield a complete context.
'''
interfaces = []
[interfaces.extend(i.complete_contexts())
for i in self.templates.itervalues()]
return interfaces

View File

View File

@ -0,0 +1,340 @@
"Interactions with the Juju environment"
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
import os
import json
import yaml
import subprocess
import UserDict
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "INFO"
DEBUG = "DEBUG"
MARKER = object()
cache = {}
def cached(func):
''' Cache return values for multiple executions of func + args
For example:
@cached
def unit_get(attribute):
pass
unit_get('test')
will cache the result of unit_get + 'test' for future calls.
'''
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
def flush(key):
''' Flushes any entries from function cache where the
key is found in the function+args '''
flush_list = []
for item in cache:
if key in item:
flush_list.append(item)
for item in flush_list:
del cache[item]
def log(message, level=None):
"Write a message to the juju log"
command = ['juju-log']
if level:
command += ['-l', level]
command += [message]
subprocess.call(command)
class Serializable(UserDict.IterableUserDict):
"Wrapper, an object that can be serialized to yaml or json"
def __init__(self, obj):
# wrap the object
UserDict.IterableUserDict.__init__(self)
self.data = obj
def __getattr__(self, attr):
# See if this object has attribute.
if attr in ("json", "yaml", "data"):
return self.__dict__[attr]
# Check for attribute in wrapped object.
got = getattr(self.data, attr, MARKER)
if got is not MARKER:
return got
# Proxy to the wrapped object via dict interface.
try:
return self.data[attr]
except KeyError:
raise AttributeError(attr)
def __getstate__(self):
# Pickle as a standard dictionary.
return self.data
def __setstate__(self, state):
# Unpickle into our wrapper.
self.data = state
def json(self):
"Serialize the object to json"
return json.dumps(self.data)
def yaml(self):
"Serialize the object to yaml"
return yaml.dump(self.data)
def execution_environment():
"""A convenient bundling of the current execution context"""
context = {}
context['conf'] = config()
context['reltype'] = relation_type()
context['relid'] = relation_id()
context['unit'] = local_unit()
context['rels'] = relations()
context['rel'] = relation_get()
context['env'] = os.environ
return context
def in_relation_hook():
"Determine whether we're running in a relation hook"
return 'JUJU_RELATION' in os.environ
def relation_type():
"The scope for the current relation hook"
return os.environ.get('JUJU_RELATION', None)
def relation_id():
"The relation ID for the current relation hook"
return os.environ.get('JUJU_RELATION_ID', None)
def local_unit():
"Local unit ID"
return os.environ['JUJU_UNIT_NAME']
def remote_unit():
"The remote unit for the current relation hook"
return os.environ['JUJU_REMOTE_UNIT']
def service_name():
"The name service group this unit belongs to"
return local_unit().split('/')[0]
@cached
def config(scope=None):
"Juju charm configuration"
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
value = json.loads(subprocess.check_output(config_cmd_line))
except ValueError:
return None
if isinstance(value, dict):
return Serializable(value)
else:
return value
@cached
def relation_get(attribute=None, unit=None, rid=None):
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
_args.append(rid)
_args.append(attribute or '-')
if unit:
_args.append(unit)
try:
value = json.loads(subprocess.check_output(_args))
except ValueError:
return None
if isinstance(value, dict):
return Serializable(value)
else:
return value
def relation_set(relation_id=None, relation_settings={}, **kwargs):
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
relation_cmd_line.append('{}={}'.format(k, v))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
@cached
def relation_ids(reltype=None):
"A list of relation_ids"
reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line))
return []
@cached
def related_units(relid=None):
"A list of related units"
relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line))
@cached
def relation_for_unit(unit=None, rid=None):
"Get the json represenation of a unit's relation"
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
if key.endswith('-list'):
relation[key] = relation[key].split()
relation['__unit__'] = unit
return Serializable(relation)
@cached
def relations_for_id(relid=None):
"Get relations of a specific relation ID"
relation_data = []
relid = relid or relation_ids()
for unit in related_units(relid):
unit_data = relation_for_unit(unit, relid)
unit_data['__relid__'] = relid
relation_data.append(unit_data)
return relation_data
@cached
def relations_of_type(reltype=None):
"Get relations of a specific type"
relation_data = []
reltype = reltype or relation_type()
for relid in relation_ids(reltype):
for relation in relations_for_id(relid):
relation['__relid__'] = relid
relation_data.append(relation)
return relation_data
@cached
def relation_types():
"Get a list of relation types supported by this charm"
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = []
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
mdf.close()
return rel_types
@cached
def relations():
rels = {}
for reltype in relation_types():
relids = {}
for relid in relation_ids(reltype):
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
for unit in related_units(relid):
reldata = relation_get(unit=unit, rid=relid)
units[unit] = reldata
relids[relid] = units
rels[reltype] = relids
return rels
def open_port(port, protocol="TCP"):
"Open a service network port"
_args = ['open-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"):
"Close a service network port"
_args = ['close-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
@cached
def unit_get(attribute):
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
def unit_private_ip():
return unit_get('private-address')
class UnregisteredHookError(Exception):
pass
class Hooks(object):
def __init__(self):
super(Hooks, self).__init__()
self._hooks = {}
def register(self, name, function):
self._hooks[name] = function
def execute(self, args):
hook_name = os.path.basename(args[0])
if hook_name in self._hooks:
self._hooks[hook_name]()
else:
raise UnregisteredHookError(hook_name)
def hook(self, *hook_names):
def wrapper(decorated):
for hook_name in hook_names:
self.register(hook_name, decorated)
else:
self.register(decorated.__name__, decorated)
return decorated
return wrapper

View File

@ -0,0 +1,261 @@
"""Tools for working with the host system"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Nick Moffitt <nick.moffitt@canonical.com>
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import apt_pkg
import os
import pwd
import grp
import subprocess
import hashlib
from hookenv import log, execution_environment
def service_start(service_name):
service('start', service_name)
def service_stop(service_name):
service('stop', service_name)
def service_restart(service_name):
service('restart', service_name)
def service_reload(service_name, restart_on_failure=False):
if not service('reload', service_name) and restart_on_failure:
service('restart', service_name)
def service(action, service_name):
cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if system_user or password is None:
cmd.append('--system')
else:
cmd.extend([
'--create-home',
'--shell', shell,
'--password', password,
])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
return user_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = [
'gpasswd', '-a',
username,
group
]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd)
def rsync(from_path, to_path, flags='-r', options=None):
"""Replicate the contents of a path"""
context = execution_environment()
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
cmd.extend(options)
cmd.append(from_path.format(**context))
cmd.append(to_path.format(**context))
log(" ".join(cmd))
return subprocess.check_output(cmd).strip()
def symlink(source, destination):
"""Create a symbolic link"""
context = execution_environment()
log("Symlinking {} as {}".format(source, destination))
cmd = [
'ln',
'-sf',
source.format(**context),
destination.format(**context)
]
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0555, force=False):
"""Create a directory"""
context = execution_environment()
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner.format(**context)).pw_uid
gid = grp.getgrnam(group.format(**context)).gr_gid
realpath = os.path.abspath(path)
if os.path.exists(realpath):
if force and not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
else:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs):
"""Create or overwrite a file with the contents of a string"""
context = execution_environment()
context.update(kwargs)
log("Writing file {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner.format(**context)).pw_uid
gid = grp.getgrnam(group.format(**context)).gr_gid
with open(path.format(**context), 'w') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
target.write(fmtstr.format(**context))
def render_template_file(source, destination, **kwargs):
"""Create or overwrite a file using a template"""
log("Rendering template {} for {}".format(source,
destination))
context = execution_environment()
with open(source.format(**context), 'r') as template:
write_file(destination.format(**context), template.read(),
**kwargs)
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
apt_pkg.init()
cache = apt_pkg.Cache()
_pkgs = []
for package in packages:
try:
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
log('Package {} has no installation candidate.'.format(package),
level='WARNING')
_pkgs.append(package)
return _pkgs
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
options = options or []
cmd = ['apt-get', '-y']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def mount(device, mountpoint, options=None, persist=False):
'''Mount a filesystem'''
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
# TODO: update fstab
pass
return True
def umount(mountpoint, persist=False):
'''Unmount a filesystem'''
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
# TODO: update fstab
pass
return True
def mounts():
'''List of all mounted volumes as [[mountpoint,device],[...]]'''
with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split()
for l in f.readlines()]]
return system_mounts
def file_hash(path):
''' Generate a md5 hash of the contents of 'path' or None if not found '''
if os.path.exists(path):
h = hashlib.md5()
with open(path, 'r') as source:
h.update(source.read()) # IGNORE:E1101 - it does have update
return h.hexdigest()
else:
return None
def restart_on_change(restart_map):
''' Restart services based on configuration files changing
This function is used a decorator, for example
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
})
def ceph_client_changed():
...
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function.
'''
def wrap(f):
def wrapped_f(*args):
checksums = {}
for path in restart_map:
checksums[path] = file_hash(path)
f(*args)
restarts = []
for path in restart_map:
if checksums[path] != file_hash(path):
restarts += restart_map[path]
for service_name in list(set(restarts)):
service('restart', service_name)
return wrapped_f
return wrap