Initial move to charm-helpers
This commit is contained in:
parent
cbfc8914df
commit
2d0b3fe9b8
@ -1,7 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?>
|
||||
|
||||
<pydev_project>
|
||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
|
4
charm-helpers-sync.yaml
Normal file
4
charm-helpers-sync.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
branch: lp:charm-helpers
|
||||
destination: hooks/charmhelpers
|
||||
include:
|
||||
- core
|
@ -10,7 +10,7 @@
|
||||
import json
|
||||
import subprocess
|
||||
import time
|
||||
import utils
|
||||
#import utils
|
||||
import os
|
||||
import apt_pkg as apt
|
||||
|
||||
@ -18,6 +18,8 @@ LEADER = 'leader'
|
||||
PEON = 'peon'
|
||||
QUORUM = [LEADER, PEON]
|
||||
|
||||
PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs']
|
||||
|
||||
|
||||
def is_quorum():
|
||||
asok = "/var/run/ceph/ceph-mon.{}.asok".format(utils.get_unit_hostname())
|
||||
|
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/core/__init__.py
Normal file
0
hooks/charmhelpers/core/__init__.py
Normal file
320
hooks/charmhelpers/core/hookenv.py
Normal file
320
hooks/charmhelpers/core/hookenv.py
Normal file
@ -0,0 +1,320 @@
|
||||
"Interactions with the Juju environment"
|
||||
# Copyright 2013 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
import subprocess
|
||||
import UserDict
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
INFO = "INFO"
|
||||
DEBUG = "DEBUG"
|
||||
MARKER = object()
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
def cached(func):
|
||||
''' Cache return values for multiple executions of func + args
|
||||
|
||||
For example:
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
pass
|
||||
|
||||
unit_get('test')
|
||||
|
||||
will cache the result of unit_get + 'test' for future calls.
|
||||
'''
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
try:
|
||||
return cache[key]
|
||||
except KeyError:
|
||||
res = func(*args, **kwargs)
|
||||
cache[key] = res
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
|
||||
def flush(key):
|
||||
''' Flushes any entries from function cache where the
|
||||
key is found in the function+args '''
|
||||
flush_list = []
|
||||
for item in cache:
|
||||
if key in item:
|
||||
flush_list.append(item)
|
||||
for item in flush_list:
|
||||
del cache[item]
|
||||
|
||||
|
||||
def log(message, level=None):
|
||||
"Write a message to the juju log"
|
||||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
command += [message]
|
||||
subprocess.call(command)
|
||||
|
||||
|
||||
class Serializable(UserDict.IterableUserDict):
|
||||
"Wrapper, an object that can be serialized to yaml or json"
|
||||
|
||||
def __init__(self, obj):
|
||||
# wrap the object
|
||||
UserDict.IterableUserDict.__init__(self)
|
||||
self.data = obj
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# See if this object has attribute.
|
||||
if attr in ("json", "yaml", "data"):
|
||||
return self.__dict__[attr]
|
||||
# Check for attribute in wrapped object.
|
||||
got = getattr(self.data, attr, MARKER)
|
||||
if got is not MARKER:
|
||||
return got
|
||||
# Proxy to the wrapped object via dict interface.
|
||||
try:
|
||||
return self.data[attr]
|
||||
except KeyError:
|
||||
raise AttributeError(attr)
|
||||
|
||||
def json(self):
|
||||
"Serialize the object to json"
|
||||
return json.dumps(self.data)
|
||||
|
||||
def yaml(self):
|
||||
"Serialize the object to yaml"
|
||||
return yaml.dump(self.data)
|
||||
|
||||
|
||||
def execution_environment():
|
||||
"""A convenient bundling of the current execution context"""
|
||||
context = {}
|
||||
context['conf'] = config()
|
||||
context['reltype'] = relation_type()
|
||||
context['relid'] = relation_id()
|
||||
context['unit'] = local_unit()
|
||||
context['rels'] = relations()
|
||||
context['rel'] = relation_get()
|
||||
context['env'] = os.environ
|
||||
return context
|
||||
|
||||
|
||||
def in_relation_hook():
|
||||
"Determine whether we're running in a relation hook"
|
||||
return 'JUJU_RELATION' in os.environ
|
||||
|
||||
|
||||
def relation_type():
|
||||
"The scope for the current relation hook"
|
||||
return os.environ.get('JUJU_RELATION', None)
|
||||
|
||||
|
||||
def relation_id():
|
||||
"The relation ID for the current relation hook"
|
||||
return os.environ.get('JUJU_RELATION_ID', None)
|
||||
|
||||
|
||||
def local_unit():
|
||||
"Local unit ID"
|
||||
return os.environ['JUJU_UNIT_NAME']
|
||||
|
||||
|
||||
def remote_unit():
|
||||
"The remote unit for the current relation hook"
|
||||
return os.environ['JUJU_REMOTE_UNIT']
|
||||
|
||||
|
||||
@cached
|
||||
def config(scope=None):
|
||||
"Juju charm configuration"
|
||||
config_cmd_line = ['config-get']
|
||||
if scope is not None:
|
||||
config_cmd_line.append(scope)
|
||||
config_cmd_line.append('--format=json')
|
||||
try:
|
||||
return Serializable(json.loads(
|
||||
subprocess.check_output(config_cmd_line)
|
||||
))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get(attribute=None, unit=None, rid=None):
|
||||
_args = ['relation-get', '--format=json']
|
||||
if rid:
|
||||
_args.append('-r')
|
||||
_args.append(rid)
|
||||
_args.append(attribute or '-')
|
||||
if unit:
|
||||
_args.append(unit)
|
||||
try:
|
||||
return Serializable(json.loads(subprocess.check_output(_args)))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
for k, v in relation_settings.items():
|
||||
relation_cmd_line.append('{}={}'.format(k, v))
|
||||
for k, v in kwargs.items():
|
||||
relation_cmd_line.append('{}={}'.format(k, v))
|
||||
subprocess.check_call(relation_cmd_line)
|
||||
# Flush cache of any relation-gets for local unit
|
||||
flush(local_unit())
|
||||
|
||||
|
||||
@cached
|
||||
def relation_ids(reltype=None):
|
||||
"A list of relation_ids"
|
||||
reltype = reltype or relation_type()
|
||||
relid_cmd_line = ['relation-ids', '--format=json']
|
||||
if reltype is not None:
|
||||
relid_cmd_line.append(reltype)
|
||||
return json.loads(subprocess.check_output(relid_cmd_line))
|
||||
return []
|
||||
|
||||
|
||||
@cached
|
||||
def related_units(relid=None):
|
||||
"A list of related units"
|
||||
relid = relid or relation_id()
|
||||
units_cmd_line = ['relation-list', '--format=json']
|
||||
if relid is not None:
|
||||
units_cmd_line.extend(('-r', relid))
|
||||
return json.loads(subprocess.check_output(units_cmd_line))
|
||||
|
||||
|
||||
@cached
|
||||
def relation_for_unit(unit=None, rid=None):
|
||||
"Get the json represenation of a unit's relation"
|
||||
unit = unit or remote_unit()
|
||||
relation = relation_get(unit=unit, rid=rid)
|
||||
for key in relation:
|
||||
if key.endswith('-list'):
|
||||
relation[key] = relation[key].split()
|
||||
relation['__unit__'] = unit
|
||||
return Serializable(relation)
|
||||
|
||||
|
||||
@cached
|
||||
def relations_for_id(relid=None):
|
||||
"Get relations of a specific relation ID"
|
||||
relation_data = []
|
||||
relid = relid or relation_ids()
|
||||
for unit in related_units(relid):
|
||||
unit_data = relation_for_unit(unit, relid)
|
||||
unit_data['__relid__'] = relid
|
||||
relation_data.append(unit_data)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relations_of_type(reltype=None):
|
||||
"Get relations of a specific type"
|
||||
relation_data = []
|
||||
reltype = reltype or relation_type()
|
||||
for relid in relation_ids(reltype):
|
||||
for relation in relations_for_id(relid):
|
||||
relation['__relid__'] = relid
|
||||
relation_data.append(relation)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relation_types():
|
||||
"Get a list of relation types supported by this charm"
|
||||
charmdir = os.environ.get('CHARM_DIR', '')
|
||||
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
||||
md = yaml.safe_load(mdf)
|
||||
rel_types = []
|
||||
for key in ('provides', 'requires', 'peers'):
|
||||
section = md.get(key)
|
||||
if section:
|
||||
rel_types.extend(section.keys())
|
||||
mdf.close()
|
||||
return rel_types
|
||||
|
||||
|
||||
@cached
|
||||
def relations():
|
||||
rels = {}
|
||||
for reltype in relation_types():
|
||||
relids = {}
|
||||
for relid in relation_ids(reltype):
|
||||
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
||||
for unit in related_units(relid):
|
||||
reldata = relation_get(unit=unit, rid=relid)
|
||||
units[unit] = reldata
|
||||
relids[relid] = units
|
||||
rels[reltype] = relids
|
||||
return rels
|
||||
|
||||
|
||||
def open_port(port, protocol="TCP"):
|
||||
"Open a service network port"
|
||||
_args = ['open-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
def close_port(port, protocol="TCP"):
|
||||
"Close a service network port"
|
||||
_args = ['close-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
_args = ['unit-get', '--format=json', attribute]
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def unit_private_ip():
|
||||
return unit_get('private-address')
|
||||
|
||||
|
||||
class UnregisteredHookError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Hooks(object):
|
||||
def __init__(self):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
|
||||
def register(self, name, function):
|
||||
self._hooks[name] = function
|
||||
|
||||
def execute(self, args):
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
self._hooks[hook_name]()
|
||||
else:
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
def hook(self, *hook_names):
|
||||
def wrapper(decorated):
|
||||
for hook_name in hook_names:
|
||||
self.register(hook_name, decorated)
|
||||
else:
|
||||
self.register(decorated.__name__, decorated)
|
||||
return decorated
|
||||
return wrapper
|
261
hooks/charmhelpers/core/host.py
Normal file
261
hooks/charmhelpers/core/host.py
Normal file
@ -0,0 +1,261 @@
|
||||
"""Tools for working with the host system"""
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Nick Moffitt <nick.moffitt@canonical.com>
|
||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||
|
||||
import apt_pkg
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
import subprocess
|
||||
import hashlib
|
||||
|
||||
from hookenv import log, execution_environment
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
service('start', service_name)
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
service('stop', service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
service('restart', service_name)
|
||||
|
||||
|
||||
def service_reload(service_name, restart_on_failure=False):
|
||||
if not service('reload', service_name) and restart_on_failure:
|
||||
service('restart', service_name)
|
||||
|
||||
|
||||
def service(action, service_name):
|
||||
cmd = ['service', service_name, action]
|
||||
return subprocess.call(cmd) == 0
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
"""Add a user"""
|
||||
try:
|
||||
user_info = pwd.getpwnam(username)
|
||||
log('user {0} already exists!'.format(username))
|
||||
except KeyError:
|
||||
log('creating user {0}'.format(username))
|
||||
cmd = ['useradd']
|
||||
if system_user or password is None:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--create-home',
|
||||
'--shell', shell,
|
||||
'--password', password,
|
||||
])
|
||||
cmd.append(username)
|
||||
subprocess.check_call(cmd)
|
||||
user_info = pwd.getpwnam(username)
|
||||
return user_info
|
||||
|
||||
|
||||
def add_user_to_group(username, group):
|
||||
"""Add a user to a group"""
|
||||
cmd = [
|
||||
'gpasswd', '-a',
|
||||
username,
|
||||
group
|
||||
]
|
||||
log("Adding user {} to group {}".format(username, group))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def rsync(from_path, to_path, flags='-r', options=None):
|
||||
"""Replicate the contents of a path"""
|
||||
context = execution_environment()
|
||||
options = options or ['--delete', '--executability']
|
||||
cmd = ['/usr/bin/rsync', flags]
|
||||
cmd.extend(options)
|
||||
cmd.append(from_path.format(**context))
|
||||
cmd.append(to_path.format(**context))
|
||||
log(" ".join(cmd))
|
||||
return subprocess.check_output(cmd).strip()
|
||||
|
||||
|
||||
def symlink(source, destination):
|
||||
"""Create a symbolic link"""
|
||||
context = execution_environment()
|
||||
log("Symlinking {} as {}".format(source, destination))
|
||||
cmd = [
|
||||
'ln',
|
||||
'-sf',
|
||||
source.format(**context),
|
||||
destination.format(**context)
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
||||
"""Create a directory"""
|
||||
context = execution_environment()
|
||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
uid = pwd.getpwnam(owner.format(**context)).pw_uid
|
||||
gid = grp.getgrnam(group.format(**context)).gr_gid
|
||||
realpath = os.path.abspath(path)
|
||||
if os.path.exists(realpath):
|
||||
if force and not os.path.isdir(realpath):
|
||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||
os.unlink(realpath)
|
||||
else:
|
||||
os.makedirs(realpath, perms)
|
||||
os.chown(realpath, uid, gid)
|
||||
|
||||
|
||||
def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs):
|
||||
"""Create or overwrite a file with the contents of a string"""
|
||||
context = execution_environment()
|
||||
context.update(kwargs)
|
||||
log("Writing file {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
uid = pwd.getpwnam(owner.format(**context)).pw_uid
|
||||
gid = grp.getgrnam(group.format(**context)).gr_gid
|
||||
with open(path.format(**context), 'w') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
target.write(fmtstr.format(**context))
|
||||
|
||||
|
||||
def render_template_file(source, destination, **kwargs):
|
||||
"""Create or overwrite a file using a template"""
|
||||
log("Rendering template {} for {}".format(source,
|
||||
destination))
|
||||
context = execution_environment()
|
||||
with open(source.format(**context), 'r') as template:
|
||||
write_file(destination.format(**context), template.read(),
|
||||
**kwargs)
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
"""Returns a list of packages that require installation"""
|
||||
apt_pkg.init()
|
||||
cache = apt_pkg.Cache()
|
||||
_pkgs = []
|
||||
for package in packages:
|
||||
try:
|
||||
p = cache[package]
|
||||
p.current_ver or _pkgs.append(package)
|
||||
except KeyError:
|
||||
log('Package {} has no installation candidate.'.format(package),
|
||||
level='WARNING')
|
||||
_pkgs.append(package)
|
||||
return _pkgs
|
||||
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
options = options or []
|
||||
cmd = ['apt-get', '-y']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Installing {} with options: {}".format(packages,
|
||||
options))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_update(fatal=False):
|
||||
"""Update local apt cache"""
|
||||
cmd = ['apt-get', 'update']
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def mount(device, mountpoint, options=None, persist=False):
|
||||
'''Mount a filesystem'''
|
||||
cmd_args = ['mount']
|
||||
if options is not None:
|
||||
cmd_args.extend(['-o', options])
|
||||
cmd_args.extend([device, mountpoint])
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||
return False
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def umount(mountpoint, persist=False):
|
||||
'''Unmount a filesystem'''
|
||||
cmd_args = ['umount', mountpoint]
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||
return False
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def mounts():
|
||||
'''List of all mounted volumes as [[mountpoint,device],[...]]'''
|
||||
with open('/proc/mounts') as f:
|
||||
# [['/mount/point','/dev/path'],[...]]
|
||||
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||
for l in f.readlines()]]
|
||||
return system_mounts
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
''' Generate a md5 hash of the contents of 'path' or None if not found '''
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
return h.hexdigest()
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def restart_on_change(restart_map):
|
||||
''' Restart services based on configuration files changing
|
||||
|
||||
This function is used a decorator, for example
|
||||
|
||||
@restart_on_change({
|
||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||
})
|
||||
def ceph_client_changed():
|
||||
...
|
||||
|
||||
In this example, the cinder-api and cinder-volume services
|
||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||
ceph_client_changed function.
|
||||
'''
|
||||
def wrap(f):
|
||||
def wrapped_f(*args):
|
||||
checksums = {}
|
||||
for path in restart_map:
|
||||
checksums[path] = file_hash(path)
|
||||
f(*args)
|
||||
restarts = []
|
||||
for path in restart_map:
|
||||
if checksums[path] != file_hash(path):
|
||||
restarts += restart_map[path]
|
||||
for service_name in list(set(restarts)):
|
||||
service('restart', service_name)
|
||||
return wrapped_f
|
||||
return wrap
|
247
hooks/hooks.py
247
hooks/hooks.py
@ -15,7 +15,35 @@ import shutil
|
||||
import sys
|
||||
|
||||
import ceph
|
||||
import utils
|
||||
#import utils
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
ERROR,
|
||||
config,
|
||||
relation_ids,
|
||||
related_units,
|
||||
relation_get,
|
||||
relation_set,
|
||||
remote_unit,
|
||||
Hooks,
|
||||
UnregisteredHookError
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
apt_install,
|
||||
apt_update,
|
||||
filter_installed_packages,
|
||||
mkdir
|
||||
)
|
||||
|
||||
from utils import (
|
||||
render_template,
|
||||
configure_source,
|
||||
get_host_ip,
|
||||
get_unit_hostname
|
||||
)
|
||||
|
||||
|
||||
hooks = Hooks()
|
||||
|
||||
|
||||
def install_upstart_scripts():
|
||||
@ -25,54 +53,55 @@ def install_upstart_scripts():
|
||||
shutil.copy(x, '/etc/init/')
|
||||
|
||||
|
||||
@hooks.hook('install')
|
||||
def install():
|
||||
utils.juju_log('INFO', 'Begin install hook.')
|
||||
utils.configure_source()
|
||||
utils.install('ceph', 'gdisk', 'ntp', 'btrfs-tools', 'python-ceph', 'xfsprogs')
|
||||
log('Begin install hook.')
|
||||
configure_source()
|
||||
apt_update(fatal=True)
|
||||
apt_install(packages=ceph.PACKAGES, fatal=True)
|
||||
install_upstart_scripts()
|
||||
utils.juju_log('INFO', 'End install hook.')
|
||||
log('End install hook.')
|
||||
|
||||
|
||||
def emit_cephconf():
|
||||
cephcontext = {
|
||||
'auth_supported': utils.config_get('auth-supported'),
|
||||
'auth_supported': config('auth-supported'),
|
||||
'mon_hosts': ' '.join(get_mon_hosts()),
|
||||
'fsid': utils.config_get('fsid'),
|
||||
'fsid': config('fsid'),
|
||||
'version': ceph.get_ceph_version()
|
||||
}
|
||||
|
||||
with open('/etc/ceph/ceph.conf', 'w') as cephconf:
|
||||
cephconf.write(utils.render_template('ceph.conf', cephcontext))
|
||||
cephconf.write(render_template('ceph.conf', cephcontext))
|
||||
|
||||
JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped'
|
||||
|
||||
|
||||
@hooks.hook('config-changed')
|
||||
def config_changed():
|
||||
utils.juju_log('INFO', 'Begin config-changed hook.')
|
||||
log('Begin config-changed hook.')
|
||||
|
||||
utils.juju_log('INFO', 'Monitor hosts are ' + repr(get_mon_hosts()))
|
||||
log('Monitor hosts are ' + repr(get_mon_hosts()))
|
||||
|
||||
# Pre-flight checks
|
||||
if not utils.config_get('fsid'):
|
||||
utils.juju_log('CRITICAL', 'No fsid supplied, cannot proceed.')
|
||||
if not config('fsid'):
|
||||
log('No fsid supplied, cannot proceed.', level=ERROR)
|
||||
sys.exit(1)
|
||||
if not utils.config_get('monitor-secret'):
|
||||
utils.juju_log('CRITICAL',
|
||||
'No monitor-secret supplied, cannot proceed.')
|
||||
if not config('monitor-secret'):
|
||||
log('No monitor-secret supplied, cannot proceed.', level=ERROR)
|
||||
sys.exit(1)
|
||||
if utils.config_get('osd-format') not in ceph.DISK_FORMATS:
|
||||
utils.juju_log('CRITICAL',
|
||||
'Invalid OSD disk format configuration specified')
|
||||
if config('osd-format') not in ceph.DISK_FORMATS:
|
||||
log('Invalid OSD disk format configuration specified', level=ERROR)
|
||||
sys.exit(1)
|
||||
|
||||
emit_cephconf()
|
||||
|
||||
e_mountpoint = utils.config_get('ephemeral-unmount')
|
||||
e_mountpoint = config('ephemeral-unmount')
|
||||
if (e_mountpoint and
|
||||
filesystem_mounted(e_mountpoint)):
|
||||
subprocess.call(['umount', e_mountpoint])
|
||||
|
||||
osd_journal = utils.config_get('osd-journal')
|
||||
osd_journal = config('osd-journal')
|
||||
if (osd_journal and
|
||||
not os.path.exists(JOURNAL_ZAPPED) and
|
||||
os.path.exists(osd_journal)):
|
||||
@ -80,31 +109,31 @@ def config_changed():
|
||||
with open(JOURNAL_ZAPPED, 'w') as zapped:
|
||||
zapped.write('DONE')
|
||||
|
||||
for dev in utils.config_get('osd-devices').split(' '):
|
||||
for dev in config('osd-devices').split(' '):
|
||||
osdize(dev)
|
||||
|
||||
# Support use of single node ceph
|
||||
if (not ceph.is_bootstrapped() and
|
||||
int(utils.config_get('monitor-count')) == 1):
|
||||
int(config('monitor-count')) == 1):
|
||||
bootstrap_monitor_cluster()
|
||||
ceph.wait_for_bootstrap()
|
||||
|
||||
if ceph.is_bootstrapped():
|
||||
ceph.rescan_osd_devices()
|
||||
|
||||
utils.juju_log('INFO', 'End config-changed hook.')
|
||||
log('End config-changed hook.')
|
||||
|
||||
|
||||
def get_mon_hosts():
|
||||
hosts = []
|
||||
hosts.append('{}:6789'.format(utils.get_host_ip()))
|
||||
hosts.append('{}:6789'.format(get_host_ip()))
|
||||
|
||||
for relid in utils.relation_ids('mon'):
|
||||
for unit in utils.relation_list(relid):
|
||||
for relid in relation_ids('mon'):
|
||||
for unit in related_units(relid):
|
||||
hosts.append(
|
||||
'{}:6789'.format(utils.get_host_ip(
|
||||
utils.relation_get('private-address',
|
||||
unit, relid)))
|
||||
'{}:6789'.format(get_host_ip(
|
||||
relation_get('private-address',
|
||||
unit, relid)))
|
||||
)
|
||||
|
||||
hosts.sort()
|
||||
@ -112,7 +141,7 @@ def get_mon_hosts():
|
||||
|
||||
|
||||
def update_monfs():
|
||||
hostname = utils.get_unit_hostname()
|
||||
hostname = get_unit_hostname()
|
||||
monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
|
||||
upstart = '{}/upstart'.format(monfs)
|
||||
if (os.path.exists(monfs) and
|
||||
@ -124,20 +153,19 @@ def update_monfs():
|
||||
|
||||
|
||||
def bootstrap_monitor_cluster():
|
||||
hostname = utils.get_unit_hostname()
|
||||
hostname = get_unit_hostname()
|
||||
path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
|
||||
done = '{}/done'.format(path)
|
||||
upstart = '{}/upstart'.format(path)
|
||||
secret = utils.config_get('monitor-secret')
|
||||
secret = config('monitor-secret')
|
||||
keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname)
|
||||
|
||||
if os.path.exists(done):
|
||||
utils.juju_log('INFO',
|
||||
'bootstrap_monitor_cluster: mon already initialized.')
|
||||
log('bootstrap_monitor_cluster: mon already initialized.')
|
||||
else:
|
||||
# Ceph >= 0.61.3 needs this for ceph-mon fs creation
|
||||
os.makedirs('/var/run/ceph', mode=0755)
|
||||
os.makedirs(path)
|
||||
mkdir('/var/run/ceph', perms=0755)
|
||||
mkdir(path)
|
||||
# end changes for Ceph >= 0.61.3
|
||||
try:
|
||||
subprocess.check_call(['ceph-authtool', keyring,
|
||||
@ -162,7 +190,7 @@ def bootstrap_monitor_cluster():
|
||||
|
||||
|
||||
def reformat_osd():
|
||||
if utils.config_get('osd-reformat'):
|
||||
if config('osd-reformat'):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -170,31 +198,28 @@ def reformat_osd():
|
||||
|
||||
def osdize(dev):
|
||||
if not os.path.exists(dev):
|
||||
utils.juju_log('INFO',
|
||||
'Path {} does not exist - bailing'.format(dev))
|
||||
log('Path {} does not exist - bailing'.format(dev))
|
||||
return
|
||||
|
||||
if (ceph.is_osd_disk(dev) and not
|
||||
reformat_osd()):
|
||||
utils.juju_log('INFO',
|
||||
'Looks like {} is already an OSD, skipping.'
|
||||
log('Looks like {} is already an OSD, skipping.'
|
||||
.format(dev))
|
||||
return
|
||||
|
||||
if device_mounted(dev):
|
||||
utils.juju_log('INFO',
|
||||
'Looks like {} is in use, skipping.'.format(dev))
|
||||
log('Looks like {} is in use, skipping.'.format(dev))
|
||||
return
|
||||
|
||||
cmd = ['ceph-disk-prepare']
|
||||
# Later versions of ceph support more options
|
||||
if ceph.get_ceph_version() >= "0.48.3":
|
||||
osd_format = utils.config_get('osd-format')
|
||||
osd_format = config('osd-format')
|
||||
if osd_format:
|
||||
cmd.append('--fs-type')
|
||||
cmd.append(osd_format)
|
||||
cmd.append(dev)
|
||||
osd_journal = utils.config_get('osd-journal')
|
||||
osd_journal = config('osd-journal')
|
||||
if (osd_journal and
|
||||
os.path.exists(osd_journal)):
|
||||
cmd.append(osd_journal)
|
||||
@ -213,11 +238,13 @@ def filesystem_mounted(fs):
|
||||
return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
|
||||
|
||||
|
||||
@hooks.hook('mon-relation-departed')
|
||||
@hooks.hook('mon-relation-joined')
|
||||
def mon_relation():
|
||||
utils.juju_log('INFO', 'Begin mon-relation hook.')
|
||||
log('Begin mon-relation hook.')
|
||||
emit_cephconf()
|
||||
|
||||
moncount = int(utils.config_get('monitor-count'))
|
||||
moncount = int(config('monitor-count'))
|
||||
if len(get_mon_hosts()) >= moncount:
|
||||
bootstrap_monitor_cluster()
|
||||
ceph.wait_for_bootstrap()
|
||||
@ -226,127 +253,115 @@ def mon_relation():
|
||||
notify_radosgws()
|
||||
notify_client()
|
||||
else:
|
||||
utils.juju_log('INFO',
|
||||
'Not enough mons ({}), punting.'.format(
|
||||
log('Not enough mons ({}), punting.'.format(
|
||||
len(get_mon_hosts())))
|
||||
|
||||
utils.juju_log('INFO', 'End mon-relation hook.')
|
||||
log('End mon-relation hook.')
|
||||
|
||||
|
||||
def notify_osds():
|
||||
utils.juju_log('INFO', 'Begin notify_osds.')
|
||||
log('Begin notify_osds.')
|
||||
|
||||
for relid in utils.relation_ids('osd'):
|
||||
utils.relation_set(fsid=utils.config_get('fsid'),
|
||||
osd_bootstrap_key=ceph.get_osd_bootstrap_key(),
|
||||
auth=utils.config_get('auth-supported'),
|
||||
rid=relid)
|
||||
for relid in relation_ids('osd'):
|
||||
relation_set(relation_id=relid,
|
||||
fsid=config('fsid'),
|
||||
osd_bootstrap_key=ceph.get_osd_bootstrap_key(),
|
||||
auth=config('auth-supported'))
|
||||
|
||||
utils.juju_log('INFO', 'End notify_osds.')
|
||||
log('End notify_osds.')
|
||||
|
||||
|
||||
def notify_radosgws():
|
||||
utils.juju_log('INFO', 'Begin notify_radosgws.')
|
||||
log('Begin notify_radosgws.')
|
||||
|
||||
for relid in utils.relation_ids('radosgw'):
|
||||
utils.relation_set(radosgw_key=ceph.get_radosgw_key(),
|
||||
auth=utils.config_get('auth-supported'),
|
||||
rid=relid)
|
||||
for relid in relation_ids('radosgw'):
|
||||
relation_set(relation_id=relid,
|
||||
radosgw_key=ceph.get_radosgw_key(),
|
||||
auth=config('auth-supported'))
|
||||
|
||||
utils.juju_log('INFO', 'End notify_radosgws.')
|
||||
log('End notify_radosgws.')
|
||||
|
||||
|
||||
def notify_client():
|
||||
utils.juju_log('INFO', 'Begin notify_client.')
|
||||
log('Begin notify_client.')
|
||||
|
||||
for relid in utils.relation_ids('client'):
|
||||
units = utils.relation_list(relid)
|
||||
for relid in relation_ids('client'):
|
||||
units = related_units(relid)
|
||||
if len(units) > 0:
|
||||
service_name = units[0].split('/')[0]
|
||||
utils.relation_set(key=ceph.get_named_key(service_name),
|
||||
auth=utils.config_get('auth-supported'),
|
||||
rid=relid)
|
||||
relation_set(relation_id=relid,
|
||||
key=ceph.get_named_key(service_name),
|
||||
auth=config('auth-supported'))
|
||||
|
||||
utils.juju_log('INFO', 'End notify_client.')
|
||||
log('End notify_client.')
|
||||
|
||||
|
||||
@hooks.hook('osd-relation-joined')
|
||||
def osd_relation():
|
||||
utils.juju_log('INFO', 'Begin osd-relation hook.')
|
||||
log('Begin osd-relation hook.')
|
||||
|
||||
if ceph.is_quorum():
|
||||
utils.juju_log('INFO',
|
||||
'mon cluster in quorum - providing fsid & keys')
|
||||
utils.relation_set(fsid=utils.config_get('fsid'),
|
||||
osd_bootstrap_key=ceph.get_osd_bootstrap_key(),
|
||||
auth=utils.config_get('auth-supported'))
|
||||
log('mon cluster in quorum - providing fsid & keys')
|
||||
relation_set(fsid=config('fsid'),
|
||||
osd_bootstrap_key=ceph.get_osd_bootstrap_key(),
|
||||
auth=config('auth-supported'))
|
||||
else:
|
||||
utils.juju_log('INFO',
|
||||
'mon cluster not in quorum - deferring fsid provision')
|
||||
log('mon cluster not in quorum - deferring fsid provision')
|
||||
|
||||
utils.juju_log('INFO', 'End osd-relation hook.')
|
||||
log('End osd-relation hook.')
|
||||
|
||||
|
||||
@hooks.hook('radosgw-relation-joined')
|
||||
def radosgw_relation():
|
||||
utils.juju_log('INFO', 'Begin radosgw-relation hook.')
|
||||
log('Begin radosgw-relation hook.')
|
||||
|
||||
utils.install('radosgw') # Install radosgw for admin tools
|
||||
# Install radosgw for admin tools
|
||||
apt_install(packages=filter_installed_packages(['radosgw']))
|
||||
|
||||
if ceph.is_quorum():
|
||||
utils.juju_log('INFO',
|
||||
'mon cluster in quorum - \
|
||||
providing radosgw with keys')
|
||||
utils.relation_set(radosgw_key=ceph.get_radosgw_key(),
|
||||
auth=utils.config_get('auth-supported'))
|
||||
log('mon cluster in quorum - providing radosgw with keys')
|
||||
relation_set(radosgw_key=ceph.get_radosgw_key(),
|
||||
auth=config('auth-supported'))
|
||||
else:
|
||||
utils.juju_log('INFO',
|
||||
'mon cluster not in quorum - deferring key provision')
|
||||
log('mon cluster not in quorum - deferring key provision')
|
||||
|
||||
utils.juju_log('INFO', 'End radosgw-relation hook.')
|
||||
log('End radosgw-relation hook.')
|
||||
|
||||
|
||||
@hooks.hook('client-relation-joined')
|
||||
def client_relation():
|
||||
utils.juju_log('INFO', 'Begin client-relation hook.')
|
||||
log('Begin client-relation hook.')
|
||||
|
||||
if ceph.is_quorum():
|
||||
utils.juju_log('INFO',
|
||||
'mon cluster in quorum - \
|
||||
providing client with keys')
|
||||
service_name = os.environ['JUJU_REMOTE_UNIT'].split('/')[0]
|
||||
utils.relation_set(key=ceph.get_named_key(service_name),
|
||||
auth=utils.config_get('auth-supported'))
|
||||
log('mon cluster in quorum - providing client with keys')
|
||||
service_name = remote_unit().split('/')[0]
|
||||
relation_set(key=ceph.get_named_key(service_name),
|
||||
auth=config('auth-supported'))
|
||||
else:
|
||||
utils.juju_log('INFO',
|
||||
'mon cluster not in quorum - deferring key provision')
|
||||
log('mon cluster not in quorum - deferring key provision')
|
||||
|
||||
utils.juju_log('INFO', 'End client-relation hook.')
|
||||
log('End client-relation hook.')
|
||||
|
||||
|
||||
@hooks.hook('upgrade-charm')
|
||||
def upgrade_charm():
|
||||
utils.juju_log('INFO', 'Begin upgrade-charm hook.')
|
||||
log('Begin upgrade-charm hook.')
|
||||
emit_cephconf()
|
||||
utils.install('xfsprogs')
|
||||
apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True)
|
||||
install_upstart_scripts()
|
||||
update_monfs()
|
||||
utils.juju_log('INFO', 'End upgrade-charm hook.')
|
||||
log('End upgrade-charm hook.')
|
||||
|
||||
|
||||
@hooks.hook('start')
|
||||
def start():
|
||||
# In case we're being redeployed to the same machines, try
|
||||
# to make sure everything is running as soon as possible.
|
||||
subprocess.call(['start', 'ceph-mon-all-starter'])
|
||||
subprocess.call(['start', 'ceph-mon-all'])
|
||||
ceph.rescan_osd_devices()
|
||||
|
||||
|
||||
utils.do_hooks({
|
||||
'config-changed': config_changed,
|
||||
'install': install,
|
||||
'mon-relation-departed': mon_relation,
|
||||
'mon-relation-joined': mon_relation,
|
||||
'osd-relation-joined': osd_relation,
|
||||
'radosgw-relation-joined': radosgw_relation,
|
||||
'client-relation-joined': client_relation,
|
||||
'start': start,
|
||||
'upgrade-charm': upgrade_charm,
|
||||
})
|
||||
|
||||
sys.exit(0)
|
||||
try:
|
||||
hooks.execute(sys.argv)
|
||||
except UnregisteredHookError as e:
|
||||
log('Unknown hook {} - skipping.'.format(e))
|
||||
|
Loading…
x
Reference in New Issue
Block a user