[yolanda] sync charmhelpers and add syslog support

This commit is contained in:
Edward Hope-Morley 2014-02-20 18:25:58 +00:00
commit af8854f447
22 changed files with 1148 additions and 106 deletions

View File

@ -5,4 +5,5 @@ include:
- fetch
- contrib.openstack
- contrib.hahelpers
- contrib.storage
- payload.execd

View File

@ -26,6 +26,12 @@ options:
description: |
Default role for Horizon operations that will be created in
Keystone upon introduction of an identity-service relation.
use-syslog:
type: boolean
default: False
description: |
By default, all services will log into their corresponding log files.
Setting this to True will force all services to log to the syslog.
vip:
type: string
description: "Virtual IP to use to front openstack dashboard ha configuration"

View File

@ -126,17 +126,17 @@ def determine_api_port(public_port):
return public_port - (i * 10)
def determine_haproxy_port(public_port):
def determine_apache_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
if len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)

View File

@ -0,0 +1,17 @@
''' Helper for managing alternatives for file conflict resolution '''
import subprocess
import shutil
import os
def install_alternative(name, target, source, priority=50):
''' Install alternative configuration '''
if (os.path.exists(target) and not os.path.islink(target)):
# Move existing file/directory away before installing
shutil.move(target, '{}.bak'.format(target))
cmd = [
'update-alternatives', '--force', '--install',
target, name, source, str(priority)
]
subprocess.check_call(cmd)

View File

@ -23,15 +23,12 @@ from charmhelpers.core.hookenv import (
unit_get,
unit_private_ip,
ERROR,
WARNING,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
determine_haproxy_port,
https,
is_clustered,
peer_units,
)
from charmhelpers.contrib.hahelpers.apache import (
@ -68,6 +65,43 @@ def context_complete(ctxt):
return True
def config_flags_parser(config_flags):
if config_flags.find('==') >= 0:
log("config_flags is not in expected format (key=value)",
level=ERROR)
raise OSContextError
# strip the following from each value.
post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then
# split on '='.
split = config_flags.strip(' =').split('=')
limit = len(split)
flags = {}
for i in xrange(0, limit - 1):
current = split[i]
next = split[i + 1]
vindex = next.rfind(',')
if (i == limit - 2) or (vindex < 0):
value = next
else:
value = next[:vindex]
if i == 0:
key = current
else:
# if this not the first entry, expect an embedded key.
index = current.rfind(',')
if index < 0:
log("invalid config value(s) at index %s" % (i),
level=ERROR)
raise OSContextError
key = current[index + 1:]
# Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags
class OSContextGenerator(object):
interfaces = []
@ -182,10 +216,17 @@ class AMQPContext(OSContextGenerator):
# Sufficient information found = break out!
break
# Used for active/active rabbitmq >= grizzly
ctxt['rabbitmq_hosts'] = []
for unit in related_units(rid):
ctxt['rabbitmq_hosts'].append(relation_get('private-address',
rid=rid, unit=unit))
if ('clustered' not in ctxt or relation_get('ha-vip-only') == 'True') and \
len(related_units(rid)) > 1:
if relation_get('ha_queues'):
ctxt['rabbitmq_ha_queues'] = relation_get('ha_queues')
else:
ctxt['rabbitmq_ha_queues'] = False
rabbitmq_hosts = []
for unit in related_units(rid):
rabbitmq_hosts.append(relation_get('private-address',
rid=rid, unit=unit))
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
if not context_complete(ctxt):
return {}
else:
@ -286,6 +327,7 @@ class ImageServiceContext(OSContextGenerator):
class ApacheSSLContext(OSContextGenerator):
"""
Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context
@ -341,11 +383,9 @@ class ApacheSSLContext(OSContextGenerator):
'private_address': unit_get('private-address'),
'endpoints': []
}
for ext_port in self.external_ports:
if peer_units() or is_clustered():
int_port = determine_haproxy_port(ext_port)
else:
int_port = determine_api_port(ext_port)
for api_port in self.external_ports:
ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port)
portmap = (int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap)
return ctxt
@ -385,16 +425,33 @@ class NeutronContext(object):
def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
ovs_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'ovs',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return ovs_ctxt
def nvp_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
nvp_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'nvp',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': config
}
return nvp_ctxt
def __call__(self):
self._ensure_packages()
@ -408,34 +465,40 @@ class NeutronContext(object):
if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt())
elif self.plugin == 'nvp':
ctxt.update(self.nvp_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
flags = config_flags_parser(alchemy_flags)
ctxt['neutron_alchemy_flags'] = flags
self._save_flag_file()
return ctxt
class OSConfigFlagContext(OSContextGenerator):
'''
Responsible adding user-defined config-flags in charm config to a
to a template context.
'''
"""
Responsible for adding user-defined config-flags in charm config to a
template context.
NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support
comma-separated lists as values.
"""
def __call__(self):
config_flags = config('config-flags')
if not config_flags or config_flags in ['None', '']:
if not config_flags:
return {}
config_flags = config_flags.split(',')
flags = {}
for flag in config_flags:
if '=' not in flag:
log('Improperly formatted config-flag, expected k=v '
'got %s' % flag, level=WARNING)
continue
k, v = flag.split('=')
flags[k.strip()] = v
ctxt = {'user_config_flags': flags}
return ctxt
flags = config_flags_parser(config_flags)
return {'user_config_flags': flags}
class SubordinateConfigContext(OSContextGenerator):
"""
Responsible for inspecting relations to subordinates that
may be exporting required config via a json blob.
@ -476,6 +539,7 @@ class SubordinateConfigContext(OSContextGenerator):
}
"""
def __init__(self, service, config_file, interface):
"""
:param service : Service name key to query in any subordinate
@ -520,3 +584,12 @@ class SubordinateConfigContext(OSContextGenerator):
ctxt['sections'] = {}
return ctxt
class SyslogContext(OSContextGenerator):
def __call__(self):
ctxt = {
'use_syslog': config('use-syslog')
}
return ctxt

View File

@ -34,13 +34,23 @@ def quantum_plugins():
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
},
'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron')],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
}
}
@ -60,13 +70,23 @@ def neutron_plugins():
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
['quantum-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
},
'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron')],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
}
}

View File

@ -13,25 +13,35 @@ from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
ERROR,
INFO
)
from charmhelpers.core.host import (
lsb_release,
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.fetch import (
apt_install,
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse')
])
@ -57,6 +67,8 @@ SWIFT_CODENAMES = OrderedDict([
('1.9.0', 'havana'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
@ -67,7 +79,7 @@ def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src == 'distro':
if src in ['distro', 'distro-proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
@ -190,7 +202,7 @@ def os_release(package, base='essex'):
def import_key(keyid):
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
@ -202,6 +214,10 @@ def configure_installation_source(rel):
'''Configure apt installation source.'''
if rel == 'distro':
return
elif rel == 'distro-proposed':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
@ -245,6 +261,9 @@ def configure_installation_source(rel):
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
}
try:
@ -299,6 +318,62 @@ def openstack_upgrade_available(package):
return apt.version_compare(available_vers, cur_vers) == 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev,
level=ERROR)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
def is_ip(address):
"""
Returns True if address is a valid IP address.
@ -340,7 +415,7 @@ def get_host_ip(hostname):
return ns_query(hostname)
def get_hostname(address):
def get_hostname(address, fqdn=True):
"""
Resolves hostname for given IP, or returns the input
if it is already a hostname.
@ -359,7 +434,11 @@ def get_hostname(address):
if not result:
return None
# strip trailing .
if result.endswith('.'):
return result[:-1]
return result
if fqdn:
# strip trailing .
if result.endswith('.'):
return result[:-1]
else:
return result
else:
return result.split('.')[0]

View File

@ -0,0 +1,383 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import os
import shutil
import json
import time
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
INFO,
WARNING,
ERROR
)
from charmhelpers.core.host import (
mount,
mounts,
service_start,
service_stop,
service_running,
umount,
)
from charmhelpers.fetch import (
apt_install,
)
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
CEPH_CONF = """[global]
auth supported = {auth}
keyring = {keyring}
mon host = {mon_hosts}
"""
def install():
''' Basic Ceph client installation '''
ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
''' Check to see if a RADOS block device exists '''
try:
out = check_output(['rbd', 'list', '--id', service,
'--pool', pool])
except CalledProcessError:
return False
else:
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
''' Create a new RADOS block device '''
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd)
def pool_exists(service, name):
''' Check to see if a RADOS pool already exists '''
try:
out = check_output(['rados', '--id', service, 'lspools'])
except CalledProcessError:
return False
else:
return name in out
def get_osds(service):
'''
Return a list of all Ceph Object Storage Daemons
currently in the cluster
'''
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json']))
else:
return None
def create_pool(service, name, replicas=2):
''' Create a new RADOS pool '''
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 / replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'create',
name, str(pgnum)
]
check_call(cmd)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'set', name,
'size', str(replicas)
]
check_call(cmd)
def delete_pool(service, name):
''' Delete a RADOS pool from ceph '''
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'delete',
name, '--yes-i-really-really-mean-it'
]
check_call(cmd)
def _keyfile_path(service):
return KEYFILE.format(service)
def _keyring_path(service):
return KEYRING.format(service)
def create_keyring(service, key):
''' Create a new Ceph keyring containing key'''
keyring = _keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
return
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.{}'.format(service),
'--add-key={}'.format(key)
]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
''' Create a file containing key '''
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
''' Query named relation 'ceph' to detemine current nodes '''
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth):
''' Perform basic configuration of Ceph '''
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF.format(auth=auth,
keyring=_keyring_path(service),
mon_hosts=",".join(map(str, hosts))))
modprobe('rbd')
def image_mapped(name):
''' Determine whether a RADOS block device is mapped locally '''
try:
out = check_output(['rbd', 'showmapped'])
except CalledProcessError:
return False
else:
return name in out
def map_block_storage(service, pool, image):
''' Map a RADOS block device for local use '''
cmd = [
'rbd',
'map',
'{}/{}'.format(pool, image),
'--user',
service,
'--secret',
_keyfile_path(service),
]
check_call(cmd)
def filesystem_mounted(fs):
''' Determine whether a filesytems is already mounted '''
return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
''' Make a new filesystem on the specified block device '''
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst):
''' Migrate data in data_src_dst to blk_device and then remount '''
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
copy_files(data_src_dst, '/mnt')
# umount block device
umount('/mnt')
# Grab user/group ID's from original source
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
# TODO: persist is currently a NO-OP in core.host
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
os.chown(data_src_dst, uid, gid)
# TODO: re-use
def modprobe(module):
''' Load a kernel module and configure for auto-load on reboot '''
log('ceph: Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def copy_files(src, dst, symlinks=False, ignore=None):
''' Copy files from src to dst '''
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being re-mounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool))
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img))
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if service_running(svc):
log('ceph: Stopping services {} prior to migrating data.'
.format(svc))
service_stop(svc)
place_data_on_block_device(blk_device, mount_point)
for svc in system_services:
log('ceph: Starting service {} after migrating data.'
.format(svc))
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
'''
Ensures a ceph keyring is created for a named service
and optionally ensures user and group ownership.
Returns False if no ceph key is available in relation state.
'''
key = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False
create_keyring(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
return True
def ceph_version():
''' Retrieve the local version of ceph '''
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None

View File

@ -0,0 +1,62 @@
import os
import re
from subprocess import (
check_call,
check_output,
)
##################################################
# loopback device helpers.
##################################################
def loopback_devices():
'''
Parse through 'losetup -a' output to determine currently mapped
loopback devices. Output is expected to look like:
/dev/loop0: [0807]:961814 (/tmp/my.img)
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
return loopbacks
def create_loopback(file_path):
'''
Create a loopback device for a given backing file.
:returns: str: Full path to new loopback device (eg, /dev/loop0)
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems():
if f == file_path:
return d
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in loopback_devices().iteritems():
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path)

View File

@ -0,0 +1,88 @@
from subprocess import (
CalledProcessError,
check_call,
check_output,
Popen,
PIPE,
)
##################################################
# LVM helpers.
##################################################
def deactivate_lvm_volume_group(block_device):
'''
Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume
'''
vg = list_lvm_volume_group(block_device)
if vg:
cmd = ['vgchange', '-an', vg]
check_call(cmd)
def is_lvm_physical_volume(block_device):
'''
Determine whether a block device is initialized as an LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: boolean: True if block device is a PV, False if not.
'''
try:
check_output(['pvdisplay', block_device])
return True
except CalledProcessError:
return False
def remove_lvm_physical_volume(block_device):
'''
Remove LVM PV signatures from a given block device.
:param block_device: str: Full path of block device to scrub.
'''
p = Popen(['pvremove', '-ff', block_device],
stdin=PIPE)
p.communicate(input='y\n')
def list_lvm_volume_group(block_device):
'''
List LVM volume group associated with a given block device.
Assumes block device is a valid LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: str: Name of volume group associated with block device or None
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
if l.strip().startswith('VG Name'):
vg = ' '.join(l.split()).split(' ').pop()
return vg
def create_lvm_physical_volume(block_device):
'''
Initialize a block device as an LVM physical volume.
:param block_device: str: Full path of block device to initialize.
'''
check_call(['pvcreate', block_device])
def create_lvm_volume_group(volume_group, block_device):
'''
Create an LVM volume group backed by a given block device.
Assumes block device has already been initialized as an LVM PV.
:param volume_group: str: Name of volume group to create.
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])

View File

@ -0,0 +1,25 @@
from os import stat
from stat import S_ISBLK
from subprocess import (
check_call
)
def is_block_device(path):
'''
Confirm device at path is a valid block device node.
:returns: boolean: True if path is a block device, False if not.
'''
return S_ISBLK(stat(path).st_mode)
def zap_disk(block_device):
'''
Clear a block device of partition table. Relies on sgdisk, which is
installed as pat of the 'gdisk' package in Ubuntu.
:param block_device: str: Full path of block device to clean.
'''
check_call(['sgdisk', '--zap-all', '--mbrtogpt', block_device])

View File

@ -8,7 +8,9 @@ import os
import json
import yaml
import subprocess
import sys
import UserDict
from subprocess import CalledProcessError
CRITICAL = "CRITICAL"
ERROR = "ERROR"
@ -21,7 +23,7 @@ cache = {}
def cached(func):
''' Cache return values for multiple executions of func + args
"""Cache return values for multiple executions of func + args
For example:
@ -32,7 +34,7 @@ def cached(func):
unit_get('test')
will cache the result of unit_get + 'test' for future calls.
'''
"""
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
@ -46,8 +48,8 @@ def cached(func):
def flush(key):
''' Flushes any entries from function cache where the
key is found in the function+args '''
"""Flushes any entries from function cache where the
key is found in the function+args """
flush_list = []
for item in cache:
if key in item:
@ -57,7 +59,7 @@ def flush(key):
def log(message, level=None):
"Write a message to the juju log"
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
@ -66,7 +68,7 @@ def log(message, level=None):
class Serializable(UserDict.IterableUserDict):
"Wrapper, an object that can be serialized to yaml or json"
"""Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj):
# wrap the object
@ -96,11 +98,11 @@ class Serializable(UserDict.IterableUserDict):
self.data = state
def json(self):
"Serialize the object to json"
"""Serialize the object to json"""
return json.dumps(self.data)
def yaml(self):
"Serialize the object to yaml"
"""Serialize the object to yaml"""
return yaml.dump(self.data)
@ -119,38 +121,43 @@ def execution_environment():
def in_relation_hook():
"Determine whether we're running in a relation hook"
"""Determine whether we're running in a relation hook"""
return 'JUJU_RELATION' in os.environ
def relation_type():
"The scope for the current relation hook"
"""The scope for the current relation hook"""
return os.environ.get('JUJU_RELATION', None)
def relation_id():
"The relation ID for the current relation hook"
"""The relation ID for the current relation hook"""
return os.environ.get('JUJU_RELATION_ID', None)
def local_unit():
"Local unit ID"
"""Local unit ID"""
return os.environ['JUJU_UNIT_NAME']
def remote_unit():
"The remote unit for the current relation hook"
"""The remote unit for the current relation hook"""
return os.environ['JUJU_REMOTE_UNIT']
def service_name():
"The name service group this unit belongs to"
"""The name service group this unit belongs to"""
return local_unit().split('/')[0]
def hook_name():
"""The name of the currently executing hook"""
return os.path.basename(sys.argv[0])
@cached
def config(scope=None):
"Juju charm configuration"
"""Juju charm configuration"""
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
@ -163,6 +170,7 @@ def config(scope=None):
@cached
def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
@ -174,9 +182,14 @@ def relation_get(attribute=None, unit=None, rid=None):
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
except CalledProcessError, e:
if e.returncode == 2:
return None
raise
def relation_set(relation_id=None, relation_settings={}, **kwargs):
"""Set relation information for the current unit"""
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
@ -192,7 +205,7 @@ def relation_set(relation_id=None, relation_settings={}, **kwargs):
@cached
def relation_ids(reltype=None):
"A list of relation_ids"
"""A list of relation_ids"""
reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
@ -203,7 +216,7 @@ def relation_ids(reltype=None):
@cached
def related_units(relid=None):
"A list of related units"
"""A list of related units"""
relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
@ -213,7 +226,7 @@ def related_units(relid=None):
@cached
def relation_for_unit(unit=None, rid=None):
"Get the json represenation of a unit's relation"
"""Get the json represenation of a unit's relation"""
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
@ -225,7 +238,7 @@ def relation_for_unit(unit=None, rid=None):
@cached
def relations_for_id(relid=None):
"Get relations of a specific relation ID"
"""Get relations of a specific relation ID"""
relation_data = []
relid = relid or relation_ids()
for unit in related_units(relid):
@ -237,7 +250,7 @@ def relations_for_id(relid=None):
@cached
def relations_of_type(reltype=None):
"Get relations of a specific type"
"""Get relations of a specific type"""
relation_data = []
reltype = reltype or relation_type()
for relid in relation_ids(reltype):
@ -249,7 +262,7 @@ def relations_of_type(reltype=None):
@cached
def relation_types():
"Get a list of relation types supported by this charm"
"""Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
@ -264,6 +277,7 @@ def relation_types():
@cached
def relations():
"""Get a nested dictionary of relation data for all related units"""
rels = {}
for reltype in relation_types():
relids = {}
@ -277,15 +291,35 @@ def relations():
return rels
@cached
def is_relation_made(relation, keys='private-address'):
'''
Determine whether a relation is established by checking for
presence of key(s). If a list of keys is provided, they
must all be present for the relation to be identified as made
'''
if isinstance(keys, str):
keys = [keys]
for r_id in relation_ids(relation):
for unit in related_units(r_id):
context = {}
for k in keys:
context[k] = relation_get(k, rid=r_id,
unit=unit)
if None not in context.values():
return True
return False
def open_port(port, protocol="TCP"):
"Open a service network port"
"""Open a service network port"""
_args = ['open-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"):
"Close a service network port"
"""Close a service network port"""
_args = ['close-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
@ -293,6 +327,7 @@ def close_port(port, protocol="TCP"):
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args))
@ -301,22 +336,46 @@ def unit_get(attribute):
def unit_private_ip():
"""Get this unit's private IP address"""
return unit_get('private-address')
class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
pass
class Hooks(object):
"""A convenient handler for hook functions.
Example:
hooks = Hooks()
# register a hook, taking its name from the function name
@hooks.hook()
def install():
...
# register a hook, providing a custom hook name
@hooks.hook("config-changed")
def config_changed():
...
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self):
super(Hooks, self).__init__()
self._hooks = {}
def register(self, name, function):
"""Register a hook"""
self._hooks[name] = function
def execute(self, args):
"""Execute a registered hook based on args[0]"""
hook_name = os.path.basename(args[0])
if hook_name in self._hooks:
self._hooks[hook_name]()
@ -324,6 +383,7 @@ class Hooks(object):
raise UnregisteredHookError(hook_name)
def hook(self, *hook_names):
"""Decorator, registering them as hooks"""
def wrapper(decorated):
for hook_name in hook_names:
self.register(hook_name, decorated)
@ -337,4 +397,5 @@ class Hooks(object):
def charm_dir():
"""Return the root directory of the current charm"""
return os.environ.get('CHARM_DIR')

View File

@ -19,18 +19,22 @@ from hookenv import log
def service_start(service_name):
"""Start a system service"""
return service('start', service_name)
def service_stop(service_name):
"""Stop a system service"""
return service('stop', service_name)
def service_restart(service_name):
"""Restart a system service"""
return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False):
"""Reload a system service, optionally falling back to restart if reload fails"""
service_result = service('reload', service_name)
if not service_result and restart_on_failure:
service_result = service('restart', service_name)
@ -38,11 +42,13 @@ def service_reload(service_name, restart_on_failure=False):
def service(action, service_name):
"""Control a system service"""
cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0
def service_running(service):
"""Determine whether a system service is running"""
try:
output = subprocess.check_output(['service', service, 'status'])
except subprocess.CalledProcessError:
@ -55,7 +61,7 @@ def service_running(service):
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user"""
"""Add a user to the system"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
@ -138,7 +144,7 @@ def write_file(path, content, owner='root', group='root', perms=0444):
def mount(device, mountpoint, options=None, persist=False):
'''Mount a filesystem'''
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
@ -155,7 +161,7 @@ def mount(device, mountpoint, options=None, persist=False):
def umount(mountpoint, persist=False):
'''Unmount a filesystem'''
"""Unmount a filesystem"""
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
@ -169,7 +175,7 @@ def umount(mountpoint, persist=False):
def mounts():
'''List of all mounted volumes as [[mountpoint,device],[...]]'''
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split()
@ -178,7 +184,7 @@ def mounts():
def file_hash(path):
''' Generate a md5 hash of the contents of 'path' or None if not found '''
"""Generate a md5 hash of the contents of 'path' or None if not found """
if os.path.exists(path):
h = hashlib.md5()
with open(path, 'r') as source:
@ -188,8 +194,8 @@ def file_hash(path):
return None
def restart_on_change(restart_map):
''' Restart services based on configuration files changing
def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing
This function is used a decorator, for example
@ -202,7 +208,7 @@ def restart_on_change(restart_map):
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function.
'''
"""
def wrap(f):
def wrapped_f(*args):
checksums = {}
@ -213,14 +219,20 @@ def restart_on_change(restart_map):
for path in restart_map:
if checksums[path] != file_hash(path):
restarts += restart_map[path]
for service_name in list(OrderedDict.fromkeys(restarts)):
service('restart', service_name)
services_list = list(OrderedDict.fromkeys(restarts))
if not stopstart:
for service_name in services_list:
service('restart', service_name)
else:
for action in ['stop', 'start']:
for service_name in services_list:
service(action, service_name)
return wrapped_f
return wrap
def lsb_release():
'''Return /etc/lsb-release in a dict'''
"""Return /etc/lsb-release in a dict"""
d = {}
with open('/etc/lsb-release', 'r') as lsb:
for l in lsb:
@ -230,7 +242,7 @@ def lsb_release():
def pwgen(length=None):
'''Generate a random pasword.'''
"""Generate a random pasword."""
if length is None:
length = random.choice(range(35, 45))
alphanumeric_chars = [
@ -239,3 +251,47 @@ def pwgen(length=None):
random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
def list_nics(nic_type):
'''Return a list of nics of given type(s)'''
if isinstance(nic_type, basestring):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
interfaces.append(line.split()[1].replace(":", ""))
return interfaces
def set_nic_mtu(nic, mtu):
'''Set MTU on a network interface'''
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).split('\n')
mtu = ""
for line in ip_output:
words = line.split()
if 'mtu' in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd)
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr

View File

@ -13,6 +13,7 @@ from charmhelpers.core.hookenv import (
log,
)
import apt_pkg
import os
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
@ -20,6 +21,40 @@ deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
"""
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precise-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
# Icehouse
'icehouse': 'precise-updates/icehouse',
'precise-icehouse': 'precise-updates/icehouse',
'precise-icehouse/updates': 'precise-updates/icehouse',
'precise-updates/icehouse': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
}
def filter_installed_packages(packages):
@ -40,8 +75,10 @@ def filter_installed_packages(packages):
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
options = options or []
cmd = ['apt-get', '-y']
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
@ -50,10 +87,14 @@ def apt_install(packages, options=None, fatal=False):
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd)
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd)
subprocess.call(cmd, env=env)
def apt_update(fatal=False):
@ -67,7 +108,7 @@ def apt_update(fatal=False):
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '-y', 'purge']
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring):
cmd.append(packages)
else:
@ -79,22 +120,45 @@ def apt_purge(packages, fatal=False):
subprocess.call(cmd)
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None):
if ((source.startswith('ppa:') or
source.startswith('http:'))):
if (source.startswith('ppa:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(pocket))
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
if key:
subprocess.check_call(['apt-key', 'import', key])
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'keyserver.ubuntu.com', '--recv',
key])
class SourceConfigError(Exception):
@ -118,8 +182,11 @@ def configure_sources(update=False,
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load(config(sources_var))
keys = safe_load(config(keys_var))
if isinstance(sources, basestring) and isinstance(keys, basestring):
keys = config(keys_var)
if keys is not None:
keys = safe_load(keys)
if isinstance(sources, basestring) and (
keys is None or isinstance(keys, basestring)):
add_source(sources, keys)
else:
if not len(sources) == len(keys):
@ -172,7 +239,9 @@ def install_from_config(config_var_name):
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
@ -200,10 +269,13 @@ def plugins(fetch_handlers=None):
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(importlib.import_module(package), classname)
handler_class = getattr(
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(handler_name))
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list

View File

@ -12,6 +12,7 @@ except ImportError:
apt_install("python-bzrlib")
from bzrlib.branch import Branch
class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source):
@ -46,4 +47,3 @@ class BzrUrlFetchHandler(BaseFetchHandler):
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -1,6 +1,7 @@
# vim: set ts=4:et
import horizon_contexts
import charmhelpers.contrib.openstack.templating as templating
import charmhelpers.contrib.openstack.context as context
import subprocess
import os
from collections import OrderedDict
@ -38,15 +39,18 @@ TEMPLATES = 'templates'
CONFIG_FILES = OrderedDict([
(LOCAL_SETTINGS, {
'hook_contexts': [horizon_contexts.HorizonContext(),
horizon_contexts.IdentityServiceContext()],
horizon_contexts.IdentityServiceContext(),
context.SyslogContext()],
'services': ['apache2']
}),
(APACHE_CONF, {
'hook_contexts': [horizon_contexts.HorizonContext()],
'hook_contexts': [horizon_contexts.HorizonContext(),
context.SyslogContext()],
'services': ['apache2'],
}),
(APACHE_24_CONF, {
'hook_contexts': [horizon_contexts.HorizonContext()],
'hook_contexts': [horizon_contexts.HorizonContext(),
context.SyslogContext()],
'services': ['apache2'],
}),
(APACHE_SSL, {

View File

@ -1 +1 @@
31
32

View File

@ -1,8 +1,10 @@
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
{% if use_syslog %}
from logging.handlers import SysLogHandler
{% endif %}
DEBUG = {{ debug }}
TEMPLATE_DEBUG = DEBUG
@ -176,6 +178,12 @@ LOGGING = {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
{% if use_syslog %}
'syslog': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
},
{% endif %}
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
@ -194,27 +202,59 @@ LOGGING = {
'propagate': False,
},
'horizon': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'openstack_dashboard': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'openstack_auth': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'novaclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'keystoneclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'glanceclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'nose.plugins.manager': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
}
}

View File

@ -1,6 +1,9 @@
import os
from django.utils.translation import ugettext_lazy as _
{% if use_syslog %}
from logging.handlers import SysLogHandler
{% endif %}
from openstack_dashboard import exceptions
@ -248,6 +251,12 @@ LOGGING = {
'level': 'INFO',
'class': 'logging.StreamHandler',
},
{% if use_syslog %}
'syslog': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
}
{% endif %}
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
@ -261,35 +270,75 @@ LOGGING = {
'propagate': False,
},
'horizon': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'openstack_dashboard': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'openstack_auth': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'novaclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'cinderclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'keystoneclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'glanceclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'heatclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'nose.plugins.manager': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
}
}

View File

@ -79,42 +79,48 @@ class TestHorizonContexts(CharmTestCase):
self.assertEquals(horizon_contexts.HorizonContext()(),
{'compress_offline': True, 'debug': False,
'default_role': 'Member', 'webroot': '/horizon',
'ubuntu_theme': True, 'secret': 'secret'})
'ubuntu_theme': True,
'secret': 'secret'})
def test_HorizonContext_debug(self):
self.test_config.set('debug', 'yes')
self.assertEquals(horizon_contexts.HorizonContext()(),
{'compress_offline': True, 'debug': True,
'default_role': 'Member', 'webroot': '/horizon',
'ubuntu_theme': True, 'secret': 'secret'})
'ubuntu_theme': True,
'secret': 'secret'})
def test_HorizonContext_theme(self):
self.test_config.set('ubuntu-theme', False)
self.assertEquals(horizon_contexts.HorizonContext()(),
{'compress_offline': True, 'debug': False,
'default_role': 'Member', 'webroot': '/horizon',
'ubuntu_theme': False, 'secret': 'secret'})
'ubuntu_theme': False,
'secret': 'secret'})
def test_HorizonContext_compression(self):
self.test_config.set('offline-compression', 'no')
self.assertEquals(horizon_contexts.HorizonContext()(),
{'compress_offline': False, 'debug': False,
'default_role': 'Member', 'webroot': '/horizon',
'ubuntu_theme': True, 'secret': 'secret'})
'ubuntu_theme': True,
'secret': 'secret'})
def test_HorizonContext_role(self):
self.test_config.set('default-role', 'foo')
self.assertEquals(horizon_contexts.HorizonContext()(),
{'compress_offline': True, 'debug': False,
'default_role': 'foo', 'webroot': '/horizon',
'ubuntu_theme': True, 'secret': 'secret'})
'ubuntu_theme': True,
'secret': 'secret'})
def test_HorizonContext_webroot(self):
self.test_config.set('webroot', '/')
self.assertEquals(horizon_contexts.HorizonContext()(),
{'compress_offline': True, 'debug': False,
'default_role': 'Member', 'webroot': '/',
'ubuntu_theme': True, 'secret': 'secret'})
'ubuntu_theme': True,
'secret': 'secret'})
def test_IdentityServiceContext_not_related(self):
self.relation_ids.return_value = []