Initial charm

This commit is contained in:
James Page 2013-09-03 17:52:02 +01:00
commit 4a9f194caa
24 changed files with 1690 additions and 0 deletions

14
Makefile Normal file
View File

@ -0,0 +1,14 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers unit_tests
@charm proof
test:
@echo Starting tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
sync:
@charm-helper-sync -c charm-helpers-sync.yaml

6
charm-helpers-sync.yaml Normal file
View File

@ -0,0 +1,6 @@
branch: lp:charm-helpers
destination: hooks/charmhelpers
include:
- core
- fetch
- contrib.hahelpers

View File

View File

View File

@ -0,0 +1,58 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -0,0 +1,294 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import commands
import os
import shutil
import time
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
INFO,
ERROR
)
from charmhelpers.fetch import (
apt_install,
)
from charmhelpers.core.host import (
mount,
mounts,
service_start,
service_stop,
umount,
)
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
KEYFILE = '/etc/ceph/ceph.client.%s.key'
CEPH_CONF = """[global]
auth supported = %(auth)s
keyring = %(keyring)s
mon host = %(mon_hosts)s
"""
def running(service):
# this local util can be dropped as soon the following branch lands
# in lp:charm-helpers
# https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
try:
output = check_output(['service', service, 'status'])
except CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def install():
ceph_dir = "/etc/ceph"
if not os.path.isdir(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
(service, pool))
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd)
def pool_exists(service, name):
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
return name in out
def create_pool(service, name):
cmd = [
'rados',
'--id',
service,
'mkpool',
name
]
check_call(cmd)
def keyfile_path(service):
return KEYFILE % service
def keyring_path(service):
return KEYRING % service
def create_keyring(service, key):
keyring = keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=INFO)
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.%s' % service,
'--add-key=%s' % key
]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
# create a file containing the key
keyfile = keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
fd = open(keyfile, 'w')
fd.write(key)
fd.close()
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth):
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
mon_hosts = ",".join(map(str, hosts))
keyring = keyring_path(service)
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF % locals())
modprobe_kernel_module('rbd')
def image_mapped(image_name):
(rc, out) = commands.getstatusoutput('rbd showmapped')
return image_name in out
def map_block_storage(service, pool, image):
cmd = [
'rbd',
'map',
'%s/%s' % (pool, image),
'--user',
service,
'--secret',
keyfile_path(service),
]
check_call(cmd)
def filesystem_mounted(fs):
return fs in [f for m, f in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
try:
copy_files(data_src_dst, '/mnt')
except:
pass
# umount block device
umount('/mnt')
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
check_call(cmd)
# TODO: re-use
def modprobe_kernel_module(module):
log('ceph: Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
cmd = 'echo %s >> /etc/modules' % module
check_call(cmd, shell=True)
def copy_files(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
To be called from the current cluster leader.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being remounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool %s.' % pool, level=INFO)
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if running(svc):
log('Stopping services %s prior to migrating data.' % svc,
level=INFO)
service_stop(svc)
place_data_on_ceph(service, blk_device, mount_point, fstype)
for svc in system_services:
service_start(svc)

View File

@ -0,0 +1,181 @@
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
import os
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
ERROR,
unit_get,
)
class HAIncompleteConfig(Exception):
pass
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if None not in [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]:
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_haproxy_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
i += 1
return public_port - (i * 10)
def get_hacluster_config():
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
conf = {}
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
return conf
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)

View File

View File

@ -0,0 +1,340 @@
"Interactions with the Juju environment"
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
import os
import json
import yaml
import subprocess
import UserDict
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "INFO"
DEBUG = "DEBUG"
MARKER = object()
cache = {}
def cached(func):
''' Cache return values for multiple executions of func + args
For example:
@cached
def unit_get(attribute):
pass
unit_get('test')
will cache the result of unit_get + 'test' for future calls.
'''
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
def flush(key):
''' Flushes any entries from function cache where the
key is found in the function+args '''
flush_list = []
for item in cache:
if key in item:
flush_list.append(item)
for item in flush_list:
del cache[item]
def log(message, level=None):
"Write a message to the juju log"
command = ['juju-log']
if level:
command += ['-l', level]
command += [message]
subprocess.call(command)
class Serializable(UserDict.IterableUserDict):
"Wrapper, an object that can be serialized to yaml or json"
def __init__(self, obj):
# wrap the object
UserDict.IterableUserDict.__init__(self)
self.data = obj
def __getattr__(self, attr):
# See if this object has attribute.
if attr in ("json", "yaml", "data"):
return self.__dict__[attr]
# Check for attribute in wrapped object.
got = getattr(self.data, attr, MARKER)
if got is not MARKER:
return got
# Proxy to the wrapped object via dict interface.
try:
return self.data[attr]
except KeyError:
raise AttributeError(attr)
def __getstate__(self):
# Pickle as a standard dictionary.
return self.data
def __setstate__(self, state):
# Unpickle into our wrapper.
self.data = state
def json(self):
"Serialize the object to json"
return json.dumps(self.data)
def yaml(self):
"Serialize the object to yaml"
return yaml.dump(self.data)
def execution_environment():
"""A convenient bundling of the current execution context"""
context = {}
context['conf'] = config()
if relation_id():
context['reltype'] = relation_type()
context['relid'] = relation_id()
context['rel'] = relation_get()
context['unit'] = local_unit()
context['rels'] = relations()
context['env'] = os.environ
return context
def in_relation_hook():
"Determine whether we're running in a relation hook"
return 'JUJU_RELATION' in os.environ
def relation_type():
"The scope for the current relation hook"
return os.environ.get('JUJU_RELATION', None)
def relation_id():
"The relation ID for the current relation hook"
return os.environ.get('JUJU_RELATION_ID', None)
def local_unit():
"Local unit ID"
return os.environ['JUJU_UNIT_NAME']
def remote_unit():
"The remote unit for the current relation hook"
return os.environ['JUJU_REMOTE_UNIT']
def service_name():
"The name service group this unit belongs to"
return local_unit().split('/')[0]
@cached
def config(scope=None):
"Juju charm configuration"
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
return json.loads(subprocess.check_output(config_cmd_line))
except ValueError:
return None
@cached
def relation_get(attribute=None, unit=None, rid=None):
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
_args.append(rid)
_args.append(attribute or '-')
if unit:
_args.append(unit)
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
def relation_set(relation_id=None, relation_settings={}, **kwargs):
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
relation_cmd_line.append('{}={}'.format(k, v))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
@cached
def relation_ids(reltype=None):
"A list of relation_ids"
reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line)) or []
return []
@cached
def related_units(relid=None):
"A list of related units"
relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line)) or []
@cached
def relation_for_unit(unit=None, rid=None):
"Get the json represenation of a unit's relation"
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
if key.endswith('-list'):
relation[key] = relation[key].split()
relation['__unit__'] = unit
return relation
@cached
def relations_for_id(relid=None):
"Get relations of a specific relation ID"
relation_data = []
relid = relid or relation_ids()
for unit in related_units(relid):
unit_data = relation_for_unit(unit, relid)
unit_data['__relid__'] = relid
relation_data.append(unit_data)
return relation_data
@cached
def relations_of_type(reltype=None):
"Get relations of a specific type"
relation_data = []
reltype = reltype or relation_type()
for relid in relation_ids(reltype):
for relation in relations_for_id(relid):
relation['__relid__'] = relid
relation_data.append(relation)
return relation_data
@cached
def relation_types():
"Get a list of relation types supported by this charm"
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = []
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
mdf.close()
return rel_types
@cached
def relations():
rels = {}
for reltype in relation_types():
relids = {}
for relid in relation_ids(reltype):
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
for unit in related_units(relid):
reldata = relation_get(unit=unit, rid=relid)
units[unit] = reldata
relids[relid] = units
rels[reltype] = relids
return rels
def open_port(port, protocol="TCP"):
"Open a service network port"
_args = ['open-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"):
"Close a service network port"
_args = ['close-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
@cached
def unit_get(attribute):
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
def unit_private_ip():
return unit_get('private-address')
class UnregisteredHookError(Exception):
pass
class Hooks(object):
def __init__(self):
super(Hooks, self).__init__()
self._hooks = {}
def register(self, name, function):
self._hooks[name] = function
def execute(self, args):
hook_name = os.path.basename(args[0])
if hook_name in self._hooks:
self._hooks[hook_name]()
else:
raise UnregisteredHookError(hook_name)
def hook(self, *hook_names):
def wrapper(decorated):
for hook_name in hook_names:
self.register(hook_name, decorated)
else:
self.register(decorated.__name__, decorated)
if '_' in decorated.__name__:
self.register(
decorated.__name__.replace('_', '-'), decorated)
return decorated
return wrapper
def charm_dir():
return os.environ.get('CHARM_DIR')

View File

@ -0,0 +1,241 @@
"""Tools for working with the host system"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Nick Moffitt <nick.moffitt@canonical.com>
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os
import pwd
import grp
import random
import string
import subprocess
import hashlib
from collections import OrderedDict
from hookenv import log
def service_start(service_name):
return service('start', service_name)
def service_stop(service_name):
return service('stop', service_name)
def service_restart(service_name):
return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False):
service_result = service('reload', service_name)
if not service_result and restart_on_failure:
service_result = service('restart', service_name)
return service_result
def service(action, service_name):
cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0
def service_running(service):
try:
output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if system_user or password is None:
cmd.append('--system')
else:
cmd.extend([
'--create-home',
'--shell', shell,
'--password', password,
])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
return user_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = [
'gpasswd', '-a',
username,
group
]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd)
def rsync(from_path, to_path, flags='-r', options=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
cmd.extend(options)
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd).strip()
def symlink(source, destination):
"""Create a symbolic link"""
log("Symlinking {} as {}".format(source, destination))
cmd = [
'ln',
'-sf',
source,
destination,
]
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0555, force=False):
"""Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
if os.path.exists(realpath):
if force and not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
else:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
def write_file(path, content, owner='root', group='root', perms=0444):
"""Create or overwrite a file with the contents of a string"""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
with open(path, 'w') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
target.write(content)
def mount(device, mountpoint, options=None, persist=False):
'''Mount a filesystem'''
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
# TODO: update fstab
pass
return True
def umount(mountpoint, persist=False):
'''Unmount a filesystem'''
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
# TODO: update fstab
pass
return True
def mounts():
'''List of all mounted volumes as [[mountpoint,device],[...]]'''
with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split()
for l in f.readlines()]]
return system_mounts
def file_hash(path):
''' Generate a md5 hash of the contents of 'path' or None if not found '''
if os.path.exists(path):
h = hashlib.md5()
with open(path, 'r') as source:
h.update(source.read()) # IGNORE:E1101 - it does have update
return h.hexdigest()
else:
return None
def restart_on_change(restart_map):
''' Restart services based on configuration files changing
This function is used a decorator, for example
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
})
def ceph_client_changed():
...
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function.
'''
def wrap(f):
def wrapped_f(*args):
checksums = {}
for path in restart_map:
checksums[path] = file_hash(path)
f(*args)
restarts = []
for path in restart_map:
if checksums[path] != file_hash(path):
restarts += restart_map[path]
for service_name in list(OrderedDict.fromkeys(restarts)):
service('restart', service_name)
return wrapped_f
return wrap
def lsb_release():
'''Return /etc/lsb-release in a dict'''
d = {}
with open('/etc/lsb-release', 'r') as lsb:
for l in lsb:
k, v = l.split('=')
d[k.strip()] = v.strip()
return d
def pwgen(length=None):
'''Generate a random pasword.'''
if length is None:
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))

View File

@ -0,0 +1,209 @@
import importlib
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
)
from urlparse import (
urlparse,
urlunparse,
)
import subprocess
from charmhelpers.core.hookenv import (
config,
log,
)
import apt_pkg
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
"""
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
apt_pkg.init()
cache = apt_pkg.Cache()
_pkgs = []
for package in packages:
try:
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
log('Package {} has no installation candidate.'.format(package),
level='WARNING')
_pkgs.append(package)
return _pkgs
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
options = options or []
cmd = ['apt-get', '-y']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '-y', 'purge']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Purging {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None):
if ((source.startswith('ppa:') or
source.startswith('http:'))):
subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(pocket))
elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
if key:
subprocess.check_call(['apt-key', 'import', key])
class SourceConfigError(Exception):
pass
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""
Configure multiple sources from charm configuration
Example config:
install_sources:
- "ppa:foo"
- "http://example.com/repo precise main"
install_keys:
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load(config(sources_var))
keys = safe_load(config(keys_var))
if isinstance(sources, basestring) and isinstance(keys, basestring):
add_source(sources, keys)
else:
if not len(sources) == len(keys):
msg = 'Install sources and keys lists are different lengths'
raise SourceConfigError(msg)
for src_num in range(len(sources)):
add_source(sources[src_num], keys[src_num])
if update:
apt_update(fatal=True)
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
)
class UnhandledSource(Exception):
pass
def install_remote(source):
"""
Install a file tree from a remote source
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules
Options supported are submodule-specific"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source)
except UnhandledSource:
pass
if not installed_to:
raise UnhandledSource("No handler found for source {}".format(source))
return installed_to
def install_from_config(config_var_name):
charm_config = config()
source = charm_config[config_var_name]
return install_remote(source)
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
plugin_list = []
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(importlib.import_module(package), classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(handler_name))
return plugin_list

View File

@ -0,0 +1,48 @@
import os
import urllib2
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.host import mkdir
class ArchiveUrlFetchHandler(BaseFetchHandler):
"""Handler for archives via generic URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
return "Wrong source type"
if get_archive_handler(self.base_url(source)):
return True
return False
def download(self, source, dest):
# propogate all exceptions
# URLError, OSError, etc
response = urllib2.urlopen(source)
try:
with open(dest, 'w') as dest_file:
dest_file.write(response.read())
except Exception as e:
if os.path.isfile(dest):
os.unlink(dest)
raise e
def install(self, source):
url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
try:
self.download(source, dld_file)
except urllib2.URLError as e:
raise UnhandledSource(e.reason)
except OSError as e:
raise UnhandledSource(e.strerror)
return extract(dld_file)

View File

@ -0,0 +1,49 @@
import os
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.core.host import mkdir
try:
from bzrlib.branch import Branch
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-bzrlib")
from bzrlib.branch import Branch
class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('bzr+ssh', 'lp'):
return False
else:
return True
def branch(self, source, dest):
url_parts = self.parse_url(source)
# If we use lp:branchname scheme we need to load plugins
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if url_parts.scheme == "lp":
from bzrlib.plugin import load_plugins
load_plugins()
try:
remote_branch = Branch.open(source)
remote_branch.bzrdir.sprout(dest).open_branch()
except Exception as e:
raise e
def install(self, source):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try:
self.branch(source, dest_dir)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -0,0 +1 @@
percona_hooks.py

1
hooks/config-changed Symbolic link
View File

@ -0,0 +1 @@
percona_hooks.py

1
hooks/install Symbolic link
View File

@ -0,0 +1 @@
percona_hooks.py

70
hooks/percona_hooks.py Executable file
View File

@ -0,0 +1,70 @@
#!/usr/bin/python
import sys
import os
from charmhelpers.core.hookenv import (
Hooks, UnregisteredHookError,
log
)
from charmhelpers.core.host import (
restart_on_change
)
from charmhelpers.fetch import (
apt_update,
apt_install,
)
from percona_utils import (
PACKAGES,
MY_CNF,
setup_percona_repo,
render_template,
get_host_ip,
get_cluster_hosts,
configure_sstuser
)
from charmhelpers.contrib.hahelpers.cluster import (
peer_units,
oldest_peer
)
hooks = Hooks()
@hooks.hook('install')
def install():
setup_percona_repo()
apt_update(fatal=True)
apt_install(PACKAGES, fatal=True)
configure_sstuser()
@hooks.hook('cluster-relation-changed')
@hooks.hook('upgrade-charm')
@hooks.hook('config-changed')
@restart_on_change({MY_CNF: ['mysql']})
def cluster_changed():
hosts = get_cluster_hosts()
clustered = False
if len(hosts) > 1:
clustered = True
with open(MY_CNF, 'w') as conf:
conf.write(render_template(os.path.basename(MY_CNF),
{'cluster_name': 'juju_cluster',
'private_address': get_host_ip(),
'clustered': clustered,
'cluster_hosts': ",".join(hosts)}))
# This is horrid but stops the bootstrap node
# restarting itself when new nodes start joining
if clustered and oldest_peer(peer_units()):
sys.exit(0)
def main():
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
if __name__ == '__main__':
main()

89
hooks/percona_utils.py Normal file
View File

@ -0,0 +1,89 @@
''' General utilities for percona '''
import subprocess
import socket
from charmhelpers.core.host import (
lsb_release
)
from charmhelpers.core.hookenv import (
unit_get,
relation_ids,
related_units,
relation_get,
)
from charmhelpers.fetch import (
apt_install,
filter_installed_packages
)
try:
import jinja2
except ImportError:
apt_install(filter_installed_packages(['python-jinja2']),
fatal=True)
import jinja2
try:
import dns.resolver
except ImportError:
apt_install(filter_installed_packages(['python-dnspython']),
fatal=True)
import dns.resolver
PACKAGES = [
'percona-xtradb-cluster-server-5.5',
'percona-xtradb-cluster-client-5.5'
]
KEY = "keys/repo.percona.com"
REPO = """deb http://repo.percona.com/apt {release} main
deb-src http://repo.percona.com/apt {release} main"""
MY_CNF = "/etc/mysql/my.cnf"
def setup_percona_repo():
with open('/etc/apt/sources.list.d/percona.list', 'w') as sources:
sources.write(REPO.format(release=lsb_release()['DISTRIB_CODENAME']))
subprocess.check_call(['apt-key', 'add', KEY])
TEMPLATES_DIR = 'templates'
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir))
template = templates.get_template(template_name)
return template.render(context)
def get_host_ip(hostname=None):
hostname = hostname or unit_get('private-address')
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
# This may throw an NXDOMAIN exception; in which case
# things are badly broken so just let it kill the hook
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
def get_cluster_hosts():
hosts = [get_host_ip()]
for relid in relation_ids('cluster'):
for unit in related_units(relid):
hosts.append(get_host_ip(
relation_get('private-address',
unit, relid))
)
return hosts
SQL_SST_USER_SETUP = """mysql -u root << EOF
CREATE USER 'sstuser'@'localhost' IDENTIFIED BY 's3cretPass';
GRANT RELOAD, LOCK TABLES, REPLICATION CLIENT ON *.* TO 'sstuser'@'localhost';
EOF"""
def configure_sstuser():
subprocess.check_call(SQL_SST_USER_SETUP, shell=True)

1
hooks/upgrade-charm Symbolic link
View File

@ -0,0 +1 @@
percona_hooks.py

30
keys/repo.percona.com Normal file
View File

@ -0,0 +1,30 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.11 (GNU/Linux)
mQGiBEsm3aERBACyB1E9ixebIMRGtmD45c6c/wi2IVIa6O3G1f6cyHH4ump6ejOi
AX63hhEs4MUCGO7KnON1hpjuNN7MQZtGTJC0iX97X2Mk+IwB1KmBYN9sS/OqhA5C
itj2RAkug4PFHR9dy21v0flj66KjBS3GpuOadpcrZ/k0g7Zi6t7kDWV0hwCgxCa2
f/ESC2MN3q3j9hfMTBhhDCsD/3+iOxtDAUlPMIH50MdK5yqagdj8V/sxaHJ5u/zw
YQunRlhB9f9QUFfhfnjRn8wjeYasMARDctCde5nbx3Pc+nRIXoB4D1Z1ZxRzR/lb
7S4i8KRr9xhommFnDv/egkx+7X1aFp1f2wN2DQ4ecGF4EAAVHwFz8H4eQgsbLsa6
7DV3BACj1cBwCf8tckWsvFtQfCP4CiBB50Ku49MU2Nfwq7durfIiePF4IIYRDZgg
kHKSfP3oUZBGJx00BujtTobERraaV7lIRIwETZao76MqGt9K1uIqw4NT/jAbi9ce
rFaOmAkaujbcB11HYIyjtkAGq9mXxaVqCC3RPWGr+fqAx/akBLQ2UGVyY29uYSBN
eVNRTCBEZXZlbG9wbWVudCBUZWFtIDxteXNxbC1kZXZAcGVyY29uYS5jb20+iGAE
ExECACAFAksm3aECGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAcTL3NzS79
Kpk/AKCQKSEgwX9r8jR+6tAnCVpzyUFOQwCfX+fw3OAoYeFZB3eu2oT8OBTiVYu5
Ag0ESybdoRAIAKKUV8rbqlB8qwZdWlmrwQqg3o7OpoAJ53/QOIySDmqy5TmNEPLm
lHkwGqEqfbFYoTbOCEEJi2yFLg9UJCSBM/sfPaqb2jGP7fc0nZBgUBnFuA9USX72
O0PzVAF7rCnWaIz76iY+AMI6xKeRy91TxYo/yenF1nRSJ+rExwlPcHgI685GNuFG
chAExMTgbnoPx1ka1Vqbe6iza+FnJq3f4p9luGbZdSParGdlKhGqvVUJ3FLeLTqt
caOn5cN2ZsdakE07GzdSktVtdYPT5BNMKgOAxhXKy11IPLj2Z5C33iVYSXjpTelJ
b2qHvcg9XDMhmYJyE3O4AWFh2no3Jf4ypIcABA0IAJO8ms9ov6bFqFTqA0UW2gWQ
cKFN4Q6NPV6IW0rV61ONLUc0VFXvYDtwsRbUmUYkB/L/R9fHj4lRUDbGEQrLCoE+
/HyYvr2rxP94PT6Bkjk/aiCCPAKZRj5CFUKRpShfDIiow9qxtqv7yVd514Qqmjb4
eEihtcjltGAoS54+6C3lbjrHUQhLwPGqlAh8uZKzfSZq0C06kTxiEqsG6VDDYWy6
L7qaMwOqWdQtdekKiCk8w/FoovsMYED2qlWEt0i52G+0CjoRFx2zNsN3v4dWiIhk
ZSL00Mx+g3NA7pQ1Yo5Vhok034mP8L2fBLhhWaK3LG63jYvd0HLkUFhNG+xjkpeI
SQQYEQIACQUCSybdoQIbDAAKCRAcTL3NzS79KlacAJ9H6emL/8dsoquhE9PNnKCI
eMTmmQCfXRLIoNjJa20VEwJDzR7YVdBEiQI=
=AD5m
-----END PGP PUBLIC KEY BLOCK-----

19
metadata.yaml Normal file
View File

@ -0,0 +1,19 @@
name: percona-cluster
summary: Percona XtraDB Cluster - Active/Active MySQL
maintainer: James Page <james.page@ubuntu.com>
description: |
Percona XtraDB Cluster provides an active/active MySQL
compatible alternative implemented using the Galera
synchronous replication extensions.
categories:
- databases
provides:
db:
interface: mysql
db-admin:
interface: mysql-root
shared-db:
interface: mysql-shared
peers:
cluster:
interface: percona-cluster

1
revision Normal file
View File

@ -0,0 +1 @@
10

37
templates/my.cnf Normal file
View File

@ -0,0 +1,37 @@
[mysqld]
datadir=/var/lib/mysql
user=mysql
# Path to Galera library
wsrep_provider=/usr/lib/libgalera_smm.so
{% if not clustered %}
# Empty gcomm address is being used when cluster is getting bootstrapped
wsrep_cluster_address=gcomm://
{% else %}
# Cluster connection URL contains the IPs of node#1, node#2 and node#3
wsrep_cluster_address=gcomm://{{ cluster_hosts }}
{% endif %}
# In order for Galera to work correctly binlog format should be ROW
binlog_format=ROW
# MyISAM storage engine has only experimental support
default_storage_engine=InnoDB
# This is a recommended tuning variable for performance
innodb_locks_unsafe_for_binlog=1
# This changes how InnoDB autoincrement locks are managed and is a requirement for Galera
innodb_autoinc_lock_mode=2
# Node #1 address
wsrep_node_address={{ private_address }}
# SST method
wsrep_sst_method=xtrabackup
# Cluster name
wsrep_cluster_name={{ cluster_name }}
# Authentication for SST method
wsrep_sst_auth="sstuser:s3cretPass"