Merge yolandas HA branch into trunk

This commit is contained in:
James Page 2014-03-05 10:13:53 +00:00
commit 83c18d12af
30 changed files with 316 additions and 2061 deletions

View File

@ -2,14 +2,9 @@
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks
# @flake8 --exclude hooks/charmhelpers unit_tests
@flake8 --exclude lib/charmhelpers hooks
@charm proof
# Disabling since we do not yet have unit tests
#test:
# @echo Starting tests...
# @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
sync:
@charm-helper-sync -c charm-helpers.yaml

View File

@ -1,6 +1,8 @@
destination: lib/charmhelpers
branch: lp:charm-helpers
include:
- fetch
- core
- contrib.charmsupport
- contrib.openstack
- contrib.storage

View File

@ -54,6 +54,16 @@ options:
description: |
Default multicast port number that will be used to communicate between
HA Cluster nodes.
ha-vip-only:
type: boolean
default: False
descriptions: |
By default, without pairing with hacluster charm, rabbitmq will deploy
in active/active/active... HA. When pairied with hacluster charm, it
will deploy as active/passive. By enabling this option, pairing with
hacluster charm will keep rabbit in active/active setup, but in addition
it will deploy a VIP that can be used by services that cannot work
with mutiple AMQPs (like Glance in pre-Icehouse).
rbd-size:
type: string
default: 5G
@ -83,3 +93,26 @@ options:
description: |
If True, services that support it will log to syslog instead of their normal
log location.
max-cluster-tries:
type: int
default: 3
description: |
Number of tries to cluster with other units before giving up.
If errors clustering happening in more than these units, charm will
end with an error
source:
type: string
description: |
Optional configuration to support use of additional sources such as:
.
- ppa:myteam/ppa
- cloud:precise-proposed/folsom
- http://my.archive.com/ubuntu main
.
The last option should be used in conjunction with the key configuration
option.
key:
type: string
description: |
Key ID to import to the apt keyring to support use with arbitary source
configuration from outside of Launchpad archives or PPA's.

View File

@ -10,7 +10,9 @@
#
import json
import grp
import os
import pwd
import subprocess
import socket
import sys
@ -327,3 +329,55 @@ def is_relation_made(relation, key='private-address'):
if relation_get(key, rid=r_id, unit=unit):
return True
return False
def get_homedir(user):
try:
user = pwd.getpwnam(user)
return user.pw_dir
except KeyError:
log('Could not get homedir for user %s: user exists?', ERROR)
raise Exception
def is_newer():
l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
return (l_unit_no > r_unit_no)
def chown(path, owner='root', group='root', recursive=False):
"""Changes owner of given path, recursively if needed"""
if os.path.exists(path):
juju_log('INFO', 'Changing ownership of path %s to %s:%s' %
(path, owner, group))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if recursive:
for root, dirs, files in os.walk(path):
for dir in dirs:
os.chown(os.path.join(root, dir), uid, gid)
for file in files:
os.chown(os.path.join(root, file), uid, gid)
else:
os.chown(path, uid, gid)
else:
juju_log('ERROR', '%s path does not exist' % path)
def chmod(path, perms, recursive=False):
"""Changes perms of given path, recursively if needed"""
if os.path.exists(path):
juju_log('INFO', 'Changing perms of path %s ' % path)
if recursive:
for root, dirs, files in os.walk(path):
for dir in dirs:
os.chmod(os.path.join(root, dir), perms)
for file in files:
os.chmod(os.path.join(root, file), perms)
else:
os.chmod(path, perms)
else:
juju_log('ERROR', '%s path does not exist' % path)

View File

@ -1,13 +1,9 @@
import os
import pwd
import grp
import re
import sys
import subprocess
import glob
import lib.utils as utils
import lib.unison as unison
import lib.cluster_utils as cluster
import apt_pkg as apt
import _pythonpath
@ -18,25 +14,30 @@ from charmhelpers.contrib.openstack.utils import (
error_out
)
PACKAGES = ['pwgen', 'rabbitmq-server', 'python-amqplib', 'unison']
from charmhelpers.core.hookenv import config, relation_ids, relation_get, relation_set, local_unit
PACKAGES = ['pwgen', 'rabbitmq-server', 'python-amqplib']
RABBITMQ_CTL = '/usr/sbin/rabbitmqctl'
COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
ENV_CONF = '/etc/rabbitmq/rabbitmq-env.conf'
RABBITMQ_CONF = '/etc/rabbitmq/rabbitmq.config'
SSH_USER = 'juju_rabbit'
RABBIT_USER = 'rabbitmq'
LIB_PATH = '/var/lib/rabbitmq/'
def vhost_exists(vhost):
cmd = [RABBITMQ_CTL, 'list_vhosts']
out = subprocess.check_output(cmd)
for line in out.split('\n')[1:]:
if line == vhost:
utils.juju_log('INFO', 'vhost (%s) already exists.' % vhost)
return True
return False
try:
cmd = [RABBITMQ_CTL, 'list_vhosts']
out = subprocess.check_output(cmd)
for line in out.split('\n')[1:]:
if line == vhost:
utils.juju_log('INFO', 'vhost (%s) already exists.' % vhost)
return True
return False
except:
# if no vhosts, just raises an exception
return False
def create_vhost(vhost):
@ -88,51 +89,65 @@ def service(action):
subprocess.check_call(cmd)
def rabbit_version():
def compare_version(base_version):
apt.init()
cache = apt.Cache()
pkg = cache['rabbitmq-server']
if pkg.current_ver:
return apt.upstream_version(pkg.current_ver.ver_str)
return (apt.version_compare(pkg.current_ver.ver_str, base_version) >= 0)
else:
return None
return False
def cluster_with():
vers = rabbit_version()
if vers >= '3.0.1-1':
utils.juju_log('INFO', 'Clustering with new node')
if compare_version('3.0.1-1'):
cluster_cmd = 'join_cluster'
cmd = [RABBITMQ_CTL, 'set_policy HA \'^(?!amq\.).*\' '
'\'{"ha-mode": "all"}\'']
cmd = [RABBITMQ_CTL, 'set_policy', 'HA', '^(?!amq\.).*', '{"ha-mode": "all"}']
subprocess.check_call(cmd)
else:
cluster_cmd = 'cluster'
out = subprocess.check_output([RABBITMQ_CTL, 'cluster_status'])
utils.juju_log('INFO', 'cluster status is %s' % str(out))
current_host = subprocess.check_output(['hostname']).strip()
# check all peers and try to cluster with them
available_nodes = []
first_hostname = utils.relation_get('host')
available_nodes.append(first_hostname)
# check if node is already clustered
total_nodes = 1
running_nodes = []
m = re.search("\{running_nodes,\[(.*)\]\}", out.strip())
if m is not None:
running_nodes = m.group(1).split(',')
running_nodes = [x.replace("'", '') for x in running_nodes]
total_nodes = len(running_nodes)
for r_id in (utils.relation_ids('cluster') or []):
for unit in (utils.relation_list(r_id) or []):
address = utils.relation_get('private_address',
rid=r_id, unit=unit)
if address is not None:
node = get_hostname(address, fqdn=False)
if current_host != node:
available_nodes.append(node)
if total_nodes > 1:
utils.juju_log('INFO', 'Node is already clustered, skipping')
else:
# check all peers and try to cluster with them
available_nodes = []
num_tries = 0
for r_id in (utils.relation_ids('cluster') or []):
for unit in (utils.relation_list(r_id) or []):
address = utils.relation_get('private-address',
rid=r_id, unit=unit)
if address is not None:
node = get_hostname(address, fqdn=False)
if current_host != node:
available_nodes.append(node)
num_tries += 1
# iterate over all the nodes, join to the first available
for node in available_nodes:
utils.juju_log('INFO',
'Clustering with remote rabbit host (%s).' % node)
for line in out.split('\n'):
if re.search(node, line):
# iterate over all the nodes, join to the first available
if len(available_nodes) == 0:
utils.juju_log('INFO', 'Master node still not ready, retrying')
return False
max_tries = config('max-cluster-tries')
for node in available_nodes:
utils.juju_log('INFO',
'Clustering with remote rabbit host (%s).' % node)
if node in running_nodes:
utils.juju_log('INFO',
'Host already clustered with %s.' % node)
return
return False
try:
cmd = [RABBITMQ_CTL, 'stop_app']
@ -142,14 +157,18 @@ def cluster_with():
cmd = [RABBITMQ_CTL, 'start_app']
subprocess.check_call(cmd)
utils.juju_log('INFO', 'Host clustered with %s.' % node)
return
return True
except:
# continue to the next node
pass
# continue to the next node
num_tries += 1
# error, no nodes available for clustering
utils.juju_log('ERROR', 'No nodes available for clustering')
sys.exit(1)
# error, no nodes available for clustering
utils.juju_log('ERROR', 'No nodes available for clustering, retrying')
if num_tries > max_tries:
utils.juju_log('ERROR', 'Max tries number exhausted, exiting')
sys.exit(1)
return False
def break_cluster():
@ -224,16 +243,14 @@ ssl_cert_file = "/etc/rabbitmq/rabbit-server-cert.pem"
def enable_ssl(ssl_key, ssl_cert, ssl_port):
uid = pwd.getpwnam("root").pw_uid
gid = grp.getgrnam("rabbitmq").gr_gid
with open(ssl_key_file, 'w') as key_file:
key_file.write(ssl_key)
os.chmod(ssl_key_file, 0640)
os.chown(ssl_key_file, uid, gid)
utils.chmod(ssl_key_file, 0640)
utils.chown(ssl_key_file, "root", RABBIT_USER)
with open(ssl_cert_file, 'w') as cert_file:
cert_file.write(ssl_cert)
os.chmod(ssl_cert_file, 0640)
os.chown(ssl_cert_file, uid, gid)
utils.chmod(ssl_cert_file, 0640)
utils.chown(ssl_cert_file, "root", RABBIT_USER)
with open(RABBITMQ_CONF, 'w') as rmq_conf:
rmq_conf.write(utils.render_template(os.path.basename(RABBITMQ_CONF),
{"ssl_port": ssl_port,
@ -272,27 +289,21 @@ def execute(cmd, die=False, echo=False):
rc = p.returncode
if die and rc != 0:
error_out("ERROR: command %s return non-zero.\n" % cmd)
utils.juju_log('INFO', "ERROR: command %s return non-zero.\n" % cmd)
return (stdout, stderr, rc)
def synchronize_service_credentials():
'''
Broadcast service credentials to peers or consume those that have been
broadcasted by peer, depending on hook context.
'''
if not os.path.isdir(LIB_PATH):
return
peers = cluster.peer_units()
if peers and not cluster.oldest_peer(peers):
utils.juju_log('INFO', 'Deferring action to oldest service unit.')
return
def get_clustered_attribute(attribute_name):
cluster_rels = relation_ids('cluster')
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
password = relation_get(attribute=attribute_name, rid=cluster_rid, unit=local_unit())
return password
else:
return None
utils.juju_log('INFO', 'Synchronizing service passwords to all peers.')
try:
unison.sync_to_peers(peer_interface='cluster',
paths=[LIB_PATH], user=SSH_USER,
verbose=True)
except Exception:
# to skip files without perms safely
pass
def set_clustered_attribute(attribute_name, value):
cluster_rels = relation_ids('cluster')
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid, relation_settings={attribute_name: value})

View File

@ -12,13 +12,15 @@ import lib.utils as utils
import lib.cluster_utils as cluster
import lib.ceph_utils as ceph
import lib.openstack_common as openstack
import lib.unison as unison
import _pythonpath
_ = _pythonpath
from charmhelpers.fetch import (
add_source,
apt_update)
from charmhelpers.core import hookenv
from charmhelpers.core.host import rsync
from charmhelpers.core.host import rsync, mkdir, pwgen
from charmhelpers.contrib.charmsupport.nrpe import NRPE
@ -28,38 +30,31 @@ RABBIT_DIR = '/var/lib/rabbitmq'
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
def ensure_unison_rabbit_permissions():
rabbit.execute("chmod g+wrx %s" % rabbit.LIB_PATH)
rabbit.execute("chmod g+wrx %s*.passwd" % rabbit.LIB_PATH)
def install():
pre_install_hooks()
add_source(utils.config_get('source'), utils.config_get('key'))
apt_update(fatal=True)
utils.install(*rabbit.PACKAGES)
utils.expose(5672)
# ensure user + permissions for peer relations that
# may be syncing data there via SSH_USER.
unison.ensure_user(user=rabbit.SSH_USER, group=rabbit.RABBIT_USER)
ensure_unison_rabbit_permissions()
utils.chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
utils.chmod(RABBIT_DIR, 0775)
def configure_amqp(username, vhost):
password_file = os.path.join(RABBIT_DIR, '%s.passwd' % username)
if os.path.exists(password_file):
password = open(password_file).read().strip()
else:
cmd = ['pwgen', '64', '1']
password = subprocess.check_output(cmd).strip()
with open(password_file, 'wb') as out:
out.write(password)
# get and update service password
password = rabbit.get_clustered_attribute('%s.passwd' % username)
if not password:
# update password in cluster
password = pwgen(length=64)
rabbit.set_clustered_attribute('%s.passwd' % username, password)
rabbit.create_vhost(vhost)
rabbit.create_user(username, password)
rabbit.grant_permissions(username, vhost)
# update vhost
rabbit.create_vhost(vhost)
rabbit.create_user(username, password)
rabbit.grant_permissions(username, vhost)
return password
def amqp_changed(relation_id=None, remote_unit=None):
if not cluster.eligible_leader('res_rabbitmq_vip'):
msg = 'amqp_changed(): Deferring amqp_changed to eligible_leader.'
@ -96,73 +91,59 @@ def amqp_changed(relation_id=None, remote_unit=None):
configure_amqp(queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['hostname'] = utils.unit_get('private-address')
if cluster.is_clustered():
relation_settings['clustered'] = 'true'
if utils.is_relation_made('ha'):
# active/passive settings
relation_settings['vip'] = utils.config_get('vip')
relation_settings['ha-vip-only'] = utils.config_get('ha-vip-only')
if relation_id:
relation_settings['rid'] = relation_id
utils.relation_set(**relation_settings)
# sync new creds to all peers
rabbit.synchronize_service_credentials()
# set if need HA queues or not
relation_settings['ha_queues'] = (rabbit.compare_version('3.0.1-1'))
utils.relation_set(**relation_settings)
def cluster_joined():
unison.ssh_authorized_peers(user=rabbit.SSH_USER,
group='rabbit',
peer_interface='cluster',
ensure_local_user=True)
if utils.is_relation_made('ha'):
if utils.is_relation_made('ha') and \
utils.config_get('ha-vip-only') is False:
utils.juju_log('INFO',
'hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
if l_unit_no > r_unit_no:
if utils.is_newer():
utils.juju_log('INFO', 'cluster_joined: Relation greater.')
return
rabbit.COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
if not os.path.isfile(rabbit.COOKIE_PATH):
utils.juju_log('ERROR', 'erlang cookie missing from %s' %
rabbit.COOKIE_PATH)
return
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
# add parent host to the relation
local_hostname = subprocess.check_output(['hostname']).strip()
utils.relation_set(cookie=cookie, host=local_hostname)
rabbit.set_clustered_attribute('cookie', cookie)
def cluster_changed():
unison.ssh_authorized_peers(user=rabbit.SSH_USER,
group='rabbit',
peer_interface='cluster',
ensure_local_user=True)
rabbit.synchronize_service_credentials()
# sync passwords
rdata = hookenv.relation_get()
echo_data = {}
for attribute, value in rdata.iteritems():
if '.passwd' in attribute or attribute == 'cookie':
echo_data[attribute] = value
if len(echo_data) > 0:
hookenv.relation_set(relation_settings=echo_data)
if utils.is_relation_made('ha'):
utils.juju_log('INFO',
'hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
if l_unit_no < r_unit_no:
utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
return
cookie = utils.relation_get('cookie')
if cookie is None:
if 'cookie' not in echo_data:
utils.juju_log('INFO',
'cluster_joined: cookie not yet set.')
return
# sync cookie
cookie = echo_data['cookie']
if open(rabbit.COOKIE_PATH, 'r').read().strip() == cookie:
utils.juju_log('INFO', 'Cookie already synchronized with peer.')
else:
@ -172,19 +153,28 @@ def cluster_changed():
out.write(cookie)
rabbit.service('start')
# cluster with other nodes
rabbit.cluster_with()
def cluster_departed():
if utils.is_relation_made('ha'):
if utils.is_relation_made('ha') and \
utils.config_get('ha-vip-only') is False:
utils.juju_log('INFO',
'hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
l_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1]
r_unit_no = os.getenv('JUJU_REMOTE_UNIT').split('/')[1]
if l_unit_no < r_unit_no:
# cluster with node
if utils.is_newer():
if rabbit.cluster_with():
# resync nrpe user after clustering
update_nrpe_checks()
def cluster_departed():
if utils.is_relation_made('ha') and \
utils.config_get('ha-vip-only') is False:
utils.juju_log('INFO',
'hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
if not utils.is_newer():
utils.juju_log('INFO', 'cluster_joined: Relation lesser.')
return
rabbit.break_cluster()
@ -197,20 +187,26 @@ def ha_joined():
vip_iface = utils.config_get('vip_iface')
vip_cidr = utils.config_get('vip_cidr')
rbd_name = utils.config_get('rbd-name')
vip_only = utils.config_get('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name]:
vip_cidr, rbd_name] and vip_only is False:
utils.juju_log('ERROR', 'Insufficient configuration data to '
'configure hacluster.')
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
utils.juju_log('ERROR', 'Insufficient configuration data to '
'configure VIP-only hacluster.')
sys.exit(1)
if not utils.is_relation_made('ceph', 'auth'):
if not utils.is_relation_made('ceph', 'auth') and vip_only is False:
utils.juju_log('INFO',
'ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name:
if rabbit.get_node_name() != name and vip_only is False:
utils.juju_log('INFO', 'Stopping rabbitmq-server.')
utils.stop('rabbitmq-server')
rabbit.set_node_name('%s@localhost' % SERVICE_NAME)
@ -221,31 +217,44 @@ def ha_joined():
relation_settings['corosync_bindiface'] = corosync_bindiface
relation_settings['corosync_mcastport'] = corosync_mcastport
relation_settings['resources'] = {
'res_rabbitmq_rbd': 'ocf:ceph:rbd',
'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
'res_rabbitmq-server': 'lsb:rabbitmq-server',
}
if vip_only is True:
relation_settings['resources'] = {
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
}
relation_settings['resource_params'] = {
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
}
relation_settings['groups'] = {
'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
else:
relation_settings['resources'] = {
'res_rabbitmq_rbd': 'ocf:ceph:rbd',
'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
'res_rabbitmq-server': 'lsb:rabbitmq-server',
}
relation_settings['resource_params'] = {
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_rabbitmq-server': 'op start start-delay="5s" '
'op monitor interval="5s"',
}
relation_settings['resource_params'] = {
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph.keyfile_path(SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_rabbitmq-server': 'op start start-delay="5s" '
'op monitor interval="5s"',
}
relation_settings['groups'] = {
'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
relation_settings['groups'] = {
'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
for rel_id in utils.relation_ids('ha'):
utils.relation_set(rid=rel_id, **relation_settings)
@ -264,10 +273,11 @@ def ha_changed():
utils.juju_log('INFO', 'ha_changed(): We are now HA clustered. '
'Advertising our VIP (%s) to all AMQP clients.' %
vip)
# need to re-authenticate all clients since node-name changed.
for rid in utils.relation_ids('amqp'):
for unit in utils.relation_list(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
if utils.config_get('ha-vip-only') is False:
# need to re-authenticate all clients since node-name changed.
for rid in utils.relation_ids('amqp'):
for unit in utils.relation_list(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
def ceph_joined():
@ -322,16 +332,16 @@ def update_nrpe_checks():
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
user = 'naigos'
vhost = 'nagios'
password_file = os.path.join(RABBIT_DIR, '%s.passwd' % user)
if os.path.exists(password_file):
password = open(password_file).read().strip()
else:
cmd = ['pwgen', '64', '1']
password = subprocess.check_output(cmd).strip()
with open(password_file, 'wb') as out:
out.write(password)
# create unique user and vhost for each unit
current_unit = hookenv.local_unit().replace('/', '-')
user = 'nagios-%s' % current_unit
vhost = 'nagios-%s' % current_unit
password = rabbit.get_clustered_attribute('%s.passwd' % user)
if not password:
utils.juju_log('INFO', 'Setting password for nagios unit: %s' % user)
password = pwgen(length=64)
rabbit.set_clustered_attribute('%s.passwd' % user, password)
rabbit.create_vhost(vhost)
rabbit.create_user(user, password)
@ -349,6 +359,9 @@ def update_nrpe_checks():
def upgrade_charm():
pre_install_hooks()
add_source(utils.config_get('source'), utils.config_get('key'))
apt_update(fatal=True)
# Ensure older passwd files in /var/lib/juju are moved to
# /var/lib/rabbitmq which will end up replicated if clustered.
for f in [f for f in os.listdir('/var/lib/juju')
@ -356,18 +369,32 @@ def upgrade_charm():
if f.endswith('.passwd'):
s = os.path.join('/var/lib/juju', f)
d = os.path.join('/var/lib/rabbitmq', f)
# propagate to cluster if needed
username = os.path.basename(s)
password = rabbit.get_clustered_attribute(username)
if password is None:
with open(s, 'r') as h:
stored_password = h.read()
if stored_password:
rabbit.set_clustered_attribute(username, stored_password)
utils.juju_log('INFO',
'upgrade_charm: Migrating stored passwd'
' from %s to %s.' % (s, d))
shutil.move(s, d)
# explicitly update buggy file name naigos.passwd
old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
if os.path.isfile(old):
new = os.path.join('var/lib/rabbitmq', 'nagios.passwd')
shutil.move(old, new)
MAN_PLUGIN = 'rabbitmq_management'
def config_changed():
unison.ensure_user(user=rabbit.SSH_USER, group='rabbit')
ensure_unison_rabbit_permissions()
if utils.config_get('management_plugin') is True:
rabbit.enable_plugin(MAN_PLUGIN)
utils.open_port(55672)
@ -392,7 +419,8 @@ def config_changed():
os.remove(rabbit.RABBITMQ_CONF)
utils.close_port(utils.config_get('ssl_port'))
if cluster.eligible_leader('res_rabbitmq_vip'):
if cluster.eligible_leader('res_rabbitmq_vip') or \
utils.config_get('ha-vip-only') is True:
utils.restart('rabbitmq-server')
update_nrpe_checks()

View File

@ -1,57 +0,0 @@
==========
Commandant
==========
-----------------------------------------------------
Automatic command-line interfaces to Python functions
-----------------------------------------------------
One of the benefits of ``libvirt`` is the uniformity of the interface: the C API (as well as the bindings in other languages) is a set of functions that accept parameters that are nearly identical to the command-line arguments. If you run ``virsh``, you get an interactive command prompt that supports all of the same commands that your shell scripts use as ``virsh`` subcommands.
Command execution and stdio manipulation is the greatest common factor across all development systems in the POSIX environment. By exposing your functions as commands that manipulate streams of text, you can make life easier for all the Ruby and Erlang and Go programmers in your life.
Goals
=====
* Single decorator to expose a function as a command.
* now two decorators - one "automatic" and one that allows authors to manipulate the arguments for fine-grained control.(MW)
* Automatic analysis of function signature through ``inspect.getargspec()``
* Command argument parser built automatically with ``argparse``
* Interactive interpreter loop object made with ``Cmd``
* Options to output structured return value data via ``pprint``, ``yaml`` or ``json`` dumps.
Other Important Features that need writing
------------------------------------------
* Help and Usage documentation can be automatically generated, but it will be important to let users override this behaviour
* The decorator should allow specifying further parameters to the parser's add_argument() calls, to specify types or to make arguments behave as boolean flags, etc.
- Filename arguments are important, as good practice is for functions to accept file objects as parameters.
- choices arguments help to limit bad input before the function is called
* Some automatic behaviour could make for better defaults, once the user can override them.
- We could automatically detect arguments that default to False or True, and automatically support --no-foo for foo=True.
- We could automatically support hyphens as alternates for underscores
- Arguments defaulting to sequence types could support the ``append`` action.
-----------------------------------------------------
Implementing subcommands
-----------------------------------------------------
(WIP)
So as to avoid dependencies on the cli module, subcommands should be defined separately from their implementations. The recommmendation would be to place definitions into separate modules near the implementations which they expose.
Some examples::
from charmhelpers.cli import CommandLine
from charmhelpers.payload import execd
from charmhelpers.foo import bar
cli = CommandLine()
cli.subcommand(execd.execd_run)
@cli.subcommand_builder("bar", help="Bar baz qux")
def barcmd_builder(subparser):
subparser.add_argument('argument1', help="yackety")
return bar

View File

@ -1,147 +0,0 @@
import inspect
import itertools
import argparse
import sys
class OutputFormatter(object):
def __init__(self, outfile=sys.stdout):
self.formats = (
"raw",
"json",
"py",
"yaml",
"csv",
"tab",
)
self.outfile = outfile
def add_arguments(self, argument_parser):
formatgroup = argument_parser.add_mutually_exclusive_group()
choices = self.supported_formats
formatgroup.add_argument("--format", metavar='FMT',
help="Select output format for returned data, "
"where FMT is one of: {}".format(choices),
choices=choices, default='raw')
for fmt in self.formats:
fmtfunc = getattr(self, fmt)
formatgroup.add_argument("-{}".format(fmt[0]),
"--{}".format(fmt), action='store_const',
const=fmt, dest='format',
help=fmtfunc.__doc__)
@property
def supported_formats(self):
return self.formats
def raw(self, output):
"""Output data as raw string (default)"""
self.outfile.write(str(output))
def py(self, output):
"""Output data as a nicely-formatted python data structure"""
import pprint
pprint.pprint(output, stream=self.outfile)
def json(self, output):
"""Output data in JSON format"""
import json
json.dump(output, self.outfile)
def yaml(self, output):
"""Output data in YAML format"""
import yaml
yaml.safe_dump(output, self.outfile)
def csv(self, output):
"""Output data as excel-compatible CSV"""
import csv
csvwriter = csv.writer(self.outfile)
csvwriter.writerows(output)
def tab(self, output):
"""Output data in excel-compatible tab-delimited format"""
import csv
csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
csvwriter.writerows(output)
def format_output(self, output, fmt='raw'):
fmtfunc = getattr(self, fmt)
fmtfunc(output)
class CommandLine(object):
argument_parser = None
subparsers = None
formatter = None
def __init__(self):
if not self.argument_parser:
self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
if not self.formatter:
self.formatter = OutputFormatter()
self.formatter.add_arguments(self.argument_parser)
if not self.subparsers:
self.subparsers = self.argument_parser.add_subparsers(help='Commands')
def subcommand(self, command_name=None):
"""
Decorate a function as a subcommand. Use its arguments as the
command-line arguments"""
def wrapper(decorated):
cmd_name = command_name or decorated.__name__
subparser = self.subparsers.add_parser(cmd_name,
description=decorated.__doc__)
for args, kwargs in describe_arguments(decorated):
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=decorated)
return decorated
return wrapper
def subcommand_builder(self, command_name, description=None):
"""
Decorate a function that builds a subcommand. Builders should accept a
single argument (the subparser instance) and return the function to be
run as the command."""
def wrapper(decorated):
subparser = self.subparsers.add_parser(command_name)
func = decorated(subparser)
subparser.set_defaults(func=func)
subparser.description = description or func.__doc__
return wrapper
def run(self):
"Run cli, processing arguments and executing subcommands."
arguments = self.argument_parser.parse_args()
argspec = inspect.getargspec(arguments.func)
vargs = []
kwargs = {}
if argspec.varargs:
vargs = getattr(arguments, argspec.varargs)
for arg in argspec.args:
kwargs[arg] = getattr(arguments, arg)
self.formatter.format_output(arguments.func(*vargs, **kwargs), arguments.format)
cmdline = CommandLine()
def describe_arguments(func):
"""
Analyze a function's signature and return a data structure suitable for
passing in as arguments to an argparse parser's add_argument() method."""
argspec = inspect.getargspec(func)
# we should probably raise an exception somewhere if func includes **kwargs
if argspec.defaults:
positional_args = argspec.args[:-len(argspec.defaults)]
keyword_names = argspec.args[-len(argspec.defaults):]
for arg, default in itertools.izip(keyword_names, argspec.defaults):
yield ('--{}'.format(arg),), {'default': default}
else:
positional_args = argspec.args
for arg in positional_args:
yield (arg,), {}
if argspec.varargs:
yield (argspec.varargs,), {'nargs': '*'}

View File

@ -1,2 +0,0 @@
from . import CommandLine
import host

View File

@ -1,15 +0,0 @@
from . import cmdline
from charmhelpers.core import host
@cmdline.subcommand()
def mounts():
"List mounts"
return host.mounts()
@cmdline.subcommand_builder('service', description="Control system services")
def service(subparser):
subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
subparser.add_argument("service_name", help="Name of the service to control")
return host.service

View File

@ -1,165 +0,0 @@
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
"""Charm Helpers ansible - declare the state of your machines.
This helper enables you to declare your machine state, rather than
program it procedurally (and have to test each change to your procedures).
Your install hook can be as simple as:
{{{
import charmhelpers.contrib.ansible
def install():
charmhelpers.contrib.ansible.install_ansible_support()
charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
}}}
and won't need to change (nor will its tests) when you change the machine
state.
All of your juju config and relation-data are available as template
variables within your playbooks and templates. An install playbook looks
something like:
{{{
---
- hosts: localhost
user: root
tasks:
- name: Add private repositories.
template:
src: ../templates/private-repositories.list.jinja2
dest: /etc/apt/sources.list.d/private.list
- name: Update the cache.
apt: update_cache=yes
- name: Install dependencies.
apt: pkg={{ item }}
with_items:
- python-mimeparse
- python-webob
- sunburnt
- name: Setup groups.
group: name={{ item.name }} gid={{ item.gid }}
with_items:
- { name: 'deploy_user', gid: 1800 }
- { name: 'service_user', gid: 1500 }
...
}}}
Read more online about playbooks[1] and standard ansible modules[2].
[1] http://www.ansibleworks.com/docs/playbooks.html
[2] http://www.ansibleworks.com/docs/modules.html
"""
import os
import subprocess
import charmhelpers.contrib.templating.contexts
import charmhelpers.core.host
import charmhelpers.core.hookenv
import charmhelpers.fetch
charm_dir = os.environ.get('CHARM_DIR', '')
ansible_hosts_path = '/etc/ansible/hosts'
# Ansible will automatically include any vars in the following
# file in its inventory when run locally.
ansible_vars_path = '/etc/ansible/host_vars/localhost'
def install_ansible_support(from_ppa=True):
"""Installs the ansible package.
By default it is installed from the PPA [1] linked from
the ansible website [2].
[1] https://launchpad.net/~rquillo/+archive/ansible
[2] http://www.ansibleworks.com/docs/gettingstarted.html#ubuntu-and-debian
If from_ppa is false, you must ensure that the package is available
from a configured repository.
"""
if from_ppa:
charmhelpers.fetch.add_source('ppa:rquillo/ansible')
charmhelpers.fetch.apt_update(fatal=True)
charmhelpers.fetch.apt_install('ansible')
with open(ansible_hosts_path, 'w+') as hosts_file:
hosts_file.write('localhost ansible_connection=local')
def apply_playbook(playbook, tags=None):
tags = tags or []
tags = ",".join(tags)
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
ansible_vars_path, namespace_separator='__',
allow_hyphens_in_keys=False)
call = [
'ansible-playbook',
'-c',
'local',
playbook,
]
if tags:
call.extend(['--tags', '{}'.format(tags)])
subprocess.check_call(call)
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
"""Run a playbook with the hook-name as the tag.
This helper builds on the standard hookenv.Hooks helper,
but additionally runs the playbook with the hook-name specified
using --tags (ie. running all the tasks tagged with the hook-name).
Example:
hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
# All the tasks within my_machine_state.yaml tagged with 'install'
# will be run automatically after do_custom_work()
@hooks.hook()
def install():
do_custom_work()
# For most of your hooks, you won't need to do anything other
# than run the tagged tasks for the hook:
@hooks.hook('config-changed', 'start', 'stop')
def just_use_playbook():
pass
# As a convenience, you can avoid the above noop function by specifying
# the hooks which are handled by ansible-only and they'll be registered
# for you:
# hooks = AnsibleHooks(
# 'playbooks/my_machine_state.yaml',
# default_hooks=['config-changed', 'start', 'stop'])
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self, playbook_path, default_hooks=None):
"""Register any hooks handled by ansible."""
super(AnsibleHooks, self).__init__()
self.playbook_path = playbook_path
default_hooks = default_hooks or []
noop = lambda *args, **kwargs: None
for hook in default_hooks:
self.register(hook, noop)
def execute(self, args):
"""Execute the hook followed by the playbook using the hook as tag."""
super(AnsibleHooks, self).execute(args)
hook_name = os.path.basename(args[0])
charmhelpers.contrib.ansible.apply_playbook(
self.playbook_path, tags=[hook_name])

View File

@ -1,4 +0,0 @@
Source lp:charm-tools/trunk
charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py
charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py

View File

@ -1,184 +0,0 @@
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import warnings
warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning)
"""Helper functions for writing Juju charms in Python."""
__metaclass__ = type
__all__ = [
#'get_config', # core.hookenv.config()
#'log', # core.hookenv.log()
#'log_entry', # core.hookenv.log()
#'log_exit', # core.hookenv.log()
#'relation_get', # core.hookenv.relation_get()
#'relation_set', # core.hookenv.relation_set()
#'relation_ids', # core.hookenv.relation_ids()
#'relation_list', # core.hookenv.relation_units()
#'config_get', # core.hookenv.config()
#'unit_get', # core.hookenv.unit_get()
#'open_port', # core.hookenv.open_port()
#'close_port', # core.hookenv.close_port()
#'service_control', # core.host.service()
'unit_info', # client-side, NOT IMPLEMENTED
'wait_for_machine', # client-side, NOT IMPLEMENTED
'wait_for_page_contents', # client-side, NOT IMPLEMENTED
'wait_for_relation', # client-side, NOT IMPLEMENTED
'wait_for_unit', # client-side, NOT IMPLEMENTED
]
import operator
from shelltoolbox import (
command,
)
import tempfile
import time
import urllib2
import yaml
SLEEP_AMOUNT = 0.1
# We create a juju_status Command here because it makes testing much,
# much easier.
juju_status = lambda: command('juju')('status')
# re-implemented as charmhelpers.fetch.configure_sources()
#def configure_source(update=False):
# source = config_get('source')
# if ((source.startswith('ppa:') or
# source.startswith('cloud:') or
# source.startswith('http:'))):
# run('add-apt-repository', source)
# if source.startswith("http:"):
# run('apt-key', 'import', config_get('key'))
# if update:
# run('apt-get', 'update')
# DEPRECATED: client-side only
def make_charm_config_file(charm_config):
charm_config_file = tempfile.NamedTemporaryFile()
charm_config_file.write(yaml.dump(charm_config))
charm_config_file.flush()
# The NamedTemporaryFile instance is returned instead of just the name
# because we want to take advantage of garbage collection-triggered
# deletion of the temp file when it goes out of scope in the caller.
return charm_config_file
# DEPRECATED: client-side only
def unit_info(service_name, item_name, data=None, unit=None):
if data is None:
data = yaml.safe_load(juju_status())
service = data['services'].get(service_name)
if service is None:
# XXX 2012-02-08 gmb:
# This allows us to cope with the race condition that we
# have between deploying a service and having it come up in
# `juju status`. We could probably do with cleaning it up so
# that it fails a bit more noisily after a while.
return ''
units = service['units']
if unit is not None:
item = units[unit][item_name]
else:
# It might seem odd to sort the units here, but we do it to
# ensure that when no unit is specified, the first unit for the
# service (or at least the one with the lowest number) is the
# one whose data gets returned.
sorted_unit_names = sorted(units.keys())
item = units[sorted_unit_names[0]][item_name]
return item
# DEPRECATED: client-side only
def get_machine_data():
return yaml.safe_load(juju_status())['machines']
# DEPRECATED: client-side only
def wait_for_machine(num_machines=1, timeout=300):
"""Wait `timeout` seconds for `num_machines` machines to come up.
This wait_for... function can be called by other wait_for functions
whose timeouts might be too short in situations where only a bare
Juju setup has been bootstrapped.
:return: A tuple of (num_machines, time_taken). This is used for
testing.
"""
# You may think this is a hack, and you'd be right. The easiest way
# to tell what environment we're working in (LXC vs EC2) is to check
# the dns-name of the first machine. If it's localhost we're in LXC
# and we can just return here.
if get_machine_data()[0]['dns-name'] == 'localhost':
return 1, 0
start_time = time.time()
while True:
# Drop the first machine, since it's the Zookeeper and that's
# not a machine that we need to wait for. This will only work
# for EC2 environments, which is why we return early above if
# we're in LXC.
machine_data = get_machine_data()
non_zookeeper_machines = [
machine_data[key] for key in machine_data.keys()[1:]]
if len(non_zookeeper_machines) >= num_machines:
all_machines_running = True
for machine in non_zookeeper_machines:
if machine.get('instance-state') != 'running':
all_machines_running = False
break
if all_machines_running:
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for service to start')
time.sleep(SLEEP_AMOUNT)
return num_machines, time.time() - start_time
# DEPRECATED: client-side only
def wait_for_unit(service_name, timeout=480):
"""Wait `timeout` seconds for a given service name to come up."""
wait_for_machine(num_machines=1)
start_time = time.time()
while True:
state = unit_info(service_name, 'agent-state')
if 'error' in state or state == 'started':
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for service to start')
time.sleep(SLEEP_AMOUNT)
if state != 'started':
raise RuntimeError('unit did not start, agent-state: ' + state)
# DEPRECATED: client-side only
def wait_for_relation(service_name, relation_name, timeout=120):
"""Wait `timeout` seconds for a given relation to come up."""
start_time = time.time()
while True:
relation = unit_info(service_name, 'relations').get(relation_name)
if relation is not None and relation['state'] == 'up':
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for relation to be up')
time.sleep(SLEEP_AMOUNT)
# DEPRECATED: client-side only
def wait_for_page_contents(url, contents, timeout=120, validate=None):
if validate is None:
validate = operator.contains
start_time = time.time()
while True:
try:
stream = urllib2.urlopen(url)
except (urllib2.HTTPError, urllib2.URLError):
pass
else:
page = stream.read()
if validate(page, contents):
return page
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for contents of ' + url)
time.sleep(SLEEP_AMOUNT)

View File

@ -1,58 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -1,183 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
import os
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
ERROR,
unit_get,
)
class HAIncompleteConfig(Exception):
pass
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
if (None not in rel_state) and ('' not in rel_state):
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_haproxy_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
i += 1
return public_port - (i * 10)
def get_hacluster_config():
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
conf = {}
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
return conf
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)

View File

@ -1,4 +0,0 @@
Source: lp:charms/juju-gui
juju-gui/hooks/utils.py -> charm-helpers/charmhelpers/contrib/jujugui/utils.py
juju-gui/tests/test_utils.py -> charm-helpers/tests/contrib/jujugui/test_utils.py

View File

@ -1,602 +0,0 @@
"""Juju GUI charm utilities."""
__all__ = [
'AGENT',
'APACHE',
'API_PORT',
'CURRENT_DIR',
'HAPROXY',
'IMPROV',
'JUJU_DIR',
'JUJU_GUI_DIR',
'JUJU_GUI_SITE',
'JUJU_PEM',
'WEB_PORT',
'bzr_checkout',
'chain',
'cmd_log',
'fetch_api',
'fetch_gui',
'find_missing_packages',
'first_path_in_dir',
'get_api_address',
'get_npm_cache_archive_url',
'get_release_file_url',
'get_staging_dependencies',
'get_zookeeper_address',
'legacy_juju',
'log_hook',
'merge',
'parse_source',
'prime_npm_cache',
'render_to_file',
'save_or_create_certificates',
'setup_apache',
'setup_gui',
'start_agent',
'start_gui',
'start_improv',
'write_apache_config',
]
from contextlib import contextmanager
import errno
import json
import os
import logging
import shutil
from subprocess import CalledProcessError
import tempfile
from urlparse import urlparse
import apt
import tempita
from launchpadlib.launchpad import Launchpad
from shelltoolbox import (
Serializer,
apt_get_install,
command,
environ,
install_extra_repositories,
run,
script_name,
search_file,
su,
)
from charmhelpers.core.host import (
service_start,
)
from charmhelpers.core.hookenv import (
log,
config,
unit_get,
)
AGENT = 'juju-api-agent'
APACHE = 'apache2'
IMPROV = 'juju-api-improv'
HAPROXY = 'haproxy'
API_PORT = 8080
WEB_PORT = 8000
CURRENT_DIR = os.getcwd()
JUJU_DIR = os.path.join(CURRENT_DIR, 'juju')
JUJU_GUI_DIR = os.path.join(CURRENT_DIR, 'juju-gui')
JUJU_GUI_SITE = '/etc/apache2/sites-available/juju-gui'
JUJU_GUI_PORTS = '/etc/apache2/ports.conf'
JUJU_PEM = 'juju.includes-private-key.pem'
BUILD_REPOSITORIES = ('ppa:chris-lea/node.js-legacy',)
DEB_BUILD_DEPENDENCIES = (
'bzr', 'imagemagick', 'make', 'nodejs', 'npm',
)
DEB_STAGE_DEPENDENCIES = (
'zookeeper',
)
# Store the configuration from on invocation to the next.
config_json = Serializer('/tmp/config.json')
# Bazaar checkout command.
bzr_checkout = command('bzr', 'co', '--lightweight')
# Whether or not the charm is deployed using juju-core.
# If juju-core has been used to deploy the charm, an agent.conf file must
# be present in the charm parent directory.
legacy_juju = lambda: not os.path.exists(
os.path.join(CURRENT_DIR, '..', 'agent.conf'))
def _get_build_dependencies():
"""Install deb dependencies for building."""
log('Installing build dependencies.')
cmd_log(install_extra_repositories(*BUILD_REPOSITORIES))
cmd_log(apt_get_install(*DEB_BUILD_DEPENDENCIES))
def get_api_address(unit_dir):
"""Return the Juju API address stored in the uniter agent.conf file."""
import yaml # python-yaml is only installed if juju-core is used.
# XXX 2013-03-27 frankban bug=1161443:
# currently the uniter agent.conf file does not include the API
# address. For now retrieve it from the machine agent file.
base_dir = os.path.abspath(os.path.join(unit_dir, '..'))
for dirname in os.listdir(base_dir):
if dirname.startswith('machine-'):
agent_conf = os.path.join(base_dir, dirname, 'agent.conf')
break
else:
raise IOError('Juju agent configuration file not found.')
contents = yaml.load(open(agent_conf))
return contents['apiinfo']['addrs'][0]
def get_staging_dependencies():
"""Install deb dependencies for the stage (improv) environment."""
log('Installing stage dependencies.')
cmd_log(apt_get_install(*DEB_STAGE_DEPENDENCIES))
def first_path_in_dir(directory):
"""Return the full path of the first file/dir in *directory*."""
return os.path.join(directory, os.listdir(directory)[0])
def _get_by_attr(collection, attr, value):
"""Return the first item in collection having attr == value.
Return None if the item is not found.
"""
for item in collection:
if getattr(item, attr) == value:
return item
def get_release_file_url(project, series_name, release_version):
"""Return the URL of the release file hosted in Launchpad.
The returned URL points to a release file for the given project, series
name and release version.
The argument *project* is a project object as returned by launchpadlib.
The arguments *series_name* and *release_version* are strings. If
*release_version* is None, the URL of the latest release will be returned.
"""
series = _get_by_attr(project.series, 'name', series_name)
if series is None:
raise ValueError('%r: series not found' % series_name)
# Releases are returned by Launchpad in reverse date order.
releases = list(series.releases)
if not releases:
raise ValueError('%r: series does not contain releases' % series_name)
if release_version is not None:
release = _get_by_attr(releases, 'version', release_version)
if release is None:
raise ValueError('%r: release not found' % release_version)
releases = [release]
for release in releases:
for file_ in release.files:
if str(file_).endswith('.tgz'):
return file_.file_link
raise ValueError('%r: file not found' % release_version)
def get_zookeeper_address(agent_file_path):
"""Retrieve the Zookeeper address contained in the given *agent_file_path*.
The *agent_file_path* is a path to a file containing a line similar to the
following::
env JUJU_ZOOKEEPER="address"
"""
line = search_file('JUJU_ZOOKEEPER', agent_file_path).strip()
return line.split('=')[1].strip('"')
@contextmanager
def log_hook():
"""Log when a hook starts and stops its execution.
Also log to stdout possible CalledProcessError exceptions raised executing
the hook.
"""
script = script_name()
log(">>> Entering {}".format(script))
try:
yield
except CalledProcessError as err:
log('Exception caught:')
log(err.output)
raise
finally:
log("<<< Exiting {}".format(script))
def parse_source(source):
"""Parse the ``juju-gui-source`` option.
Return a tuple of two elements representing info on how to deploy Juju GUI.
Examples:
- ('stable', None): latest stable release;
- ('stable', '0.1.0'): stable release v0.1.0;
- ('trunk', None): latest trunk release;
- ('trunk', '0.1.0+build.1'): trunk release v0.1.0 bzr revision 1;
- ('branch', 'lp:juju-gui'): release is made from a branch;
- ('url', 'http://example.com/gui'): release from a downloaded file.
"""
if source.startswith('url:'):
source = source[4:]
# Support file paths, including relative paths.
if urlparse(source).scheme == '':
if not source.startswith('/'):
source = os.path.join(os.path.abspath(CURRENT_DIR), source)
source = "file://%s" % source
return 'url', source
if source in ('stable', 'trunk'):
return source, None
if source.startswith('lp:') or source.startswith('http://'):
return 'branch', source
if 'build' in source:
return 'trunk', source
return 'stable', source
def render_to_file(template_name, context, destination):
"""Render the given *template_name* into *destination* using *context*.
The tempita template language is used to render contents
(see http://pythonpaste.org/tempita/).
The argument *template_name* is the name or path of the template file:
it may be either a path relative to ``../config`` or an absolute path.
The argument *destination* is a file path.
The argument *context* is a dict-like object.
"""
template_path = os.path.abspath(template_name)
template = tempita.Template.from_filename(template_path)
with open(destination, 'w') as stream:
stream.write(template.substitute(context))
results_log = None
def _setupLogging():
global results_log
if results_log is not None:
return
cfg = config()
logging.basicConfig(
filename=cfg['command-log-file'],
level=logging.INFO,
format="%(asctime)s: %(name)s@%(levelname)s %(message)s")
results_log = logging.getLogger('juju-gui')
def cmd_log(results):
global results_log
if not results:
return
if results_log is None:
_setupLogging()
# Since 'results' may be multi-line output, start it on a separate line
# from the logger timestamp, etc.
results_log.info('\n' + results)
def start_improv(staging_env, ssl_cert_path,
config_path='/etc/init/juju-api-improv.conf'):
"""Start a simulated juju environment using ``improv.py``."""
log('Setting up staging start up script.')
context = {
'juju_dir': JUJU_DIR,
'keys': ssl_cert_path,
'port': API_PORT,
'staging_env': staging_env,
}
render_to_file('config/juju-api-improv.conf.template', context, config_path)
log('Starting the staging backend.')
with su('root'):
service_start(IMPROV)
def start_agent(
ssl_cert_path, config_path='/etc/init/juju-api-agent.conf',
read_only=False):
"""Start the Juju agent and connect to the current environment."""
# Retrieve the Zookeeper address from the start up script.
unit_dir = os.path.realpath(os.path.join(CURRENT_DIR, '..'))
agent_file = '/etc/init/juju-{0}.conf'.format(os.path.basename(unit_dir))
zookeeper = get_zookeeper_address(agent_file)
log('Setting up API agent start up script.')
context = {
'juju_dir': JUJU_DIR,
'keys': ssl_cert_path,
'port': API_PORT,
'zookeeper': zookeeper,
'read_only': read_only
}
render_to_file('config/juju-api-agent.conf.template', context, config_path)
log('Starting API agent.')
with su('root'):
service_start(AGENT)
def start_gui(
console_enabled, login_help, readonly, in_staging, ssl_cert_path,
charmworld_url, serve_tests, haproxy_path='/etc/haproxy/haproxy.cfg',
config_js_path=None, secure=True, sandbox=False):
"""Set up and start the Juju GUI server."""
with su('root'):
run('chown', '-R', 'ubuntu:', JUJU_GUI_DIR)
# XXX 2013-02-05 frankban bug=1116320:
# External insecure resources are still loaded when testing in the
# debug environment. For now, switch to the production environment if
# the charm is configured to serve tests.
if in_staging and not serve_tests:
build_dirname = 'build-debug'
else:
build_dirname = 'build-prod'
build_dir = os.path.join(JUJU_GUI_DIR, build_dirname)
log('Generating the Juju GUI configuration file.')
is_legacy_juju = legacy_juju()
user, password = None, None
if (is_legacy_juju and in_staging) or sandbox:
user, password = 'admin', 'admin'
else:
user, password = None, None
api_backend = 'python' if is_legacy_juju else 'go'
if secure:
protocol = 'wss'
else:
log('Running in insecure mode! Port 80 will serve unencrypted.')
protocol = 'ws'
context = {
'raw_protocol': protocol,
'address': unit_get('public-address'),
'console_enabled': json.dumps(console_enabled),
'login_help': json.dumps(login_help),
'password': json.dumps(password),
'api_backend': json.dumps(api_backend),
'readonly': json.dumps(readonly),
'user': json.dumps(user),
'protocol': json.dumps(protocol),
'sandbox': json.dumps(sandbox),
'charmworld_url': json.dumps(charmworld_url),
}
if config_js_path is None:
config_js_path = os.path.join(
build_dir, 'juju-ui', 'assets', 'config.js')
render_to_file('config/config.js.template', context, config_js_path)
write_apache_config(build_dir, serve_tests)
log('Generating haproxy configuration file.')
if is_legacy_juju:
# The PyJuju API agent is listening on localhost.
api_address = '127.0.0.1:{0}'.format(API_PORT)
else:
# Retrieve the juju-core API server address.
api_address = get_api_address(os.path.join(CURRENT_DIR, '..'))
context = {
'api_address': api_address,
'api_pem': JUJU_PEM,
'legacy_juju': is_legacy_juju,
'ssl_cert_path': ssl_cert_path,
# In PyJuju environments, use the same certificate for both HTTPS and
# WebSocket connections. In juju-core the system already has the proper
# certificate installed.
'web_pem': JUJU_PEM,
'web_port': WEB_PORT,
'secure': secure
}
render_to_file('config/haproxy.cfg.template', context, haproxy_path)
log('Starting Juju GUI.')
def write_apache_config(build_dir, serve_tests=False):
log('Generating the apache site configuration file.')
context = {
'port': WEB_PORT,
'serve_tests': serve_tests,
'server_root': build_dir,
'tests_root': os.path.join(JUJU_GUI_DIR, 'test', ''),
}
render_to_file('config/apache-ports.template', context, JUJU_GUI_PORTS)
render_to_file('config/apache-site.template', context, JUJU_GUI_SITE)
def get_npm_cache_archive_url(Launchpad=Launchpad):
"""Figure out the URL of the most recent NPM cache archive on Launchpad."""
launchpad = Launchpad.login_anonymously('Juju GUI charm', 'production')
project = launchpad.projects['juju-gui']
# Find the URL of the most recently created NPM cache archive.
npm_cache_url = get_release_file_url(project, 'npm-cache', None)
return npm_cache_url
def prime_npm_cache(npm_cache_url):
"""Download NPM cache archive and prime the NPM cache with it."""
# Download the cache archive and then uncompress it into the NPM cache.
npm_cache_archive = os.path.join(CURRENT_DIR, 'npm-cache.tgz')
cmd_log(run('curl', '-L', '-o', npm_cache_archive, npm_cache_url))
npm_cache_dir = os.path.expanduser('~/.npm')
# The NPM cache directory probably does not exist, so make it if not.
try:
os.mkdir(npm_cache_dir)
except OSError, e:
# If the directory already exists then ignore the error.
if e.errno != errno.EEXIST: # File exists.
raise
uncompress = command('tar', '-x', '-z', '-C', npm_cache_dir, '-f')
cmd_log(uncompress(npm_cache_archive))
def fetch_gui(juju_gui_source, logpath):
"""Retrieve the Juju GUI release/branch."""
# Retrieve a Juju GUI release.
origin, version_or_branch = parse_source(juju_gui_source)
if origin == 'branch':
# Make sure we have the dependencies necessary for us to actually make
# a build.
_get_build_dependencies()
# Create a release starting from a branch.
juju_gui_source_dir = os.path.join(CURRENT_DIR, 'juju-gui-source')
log('Retrieving Juju GUI source checkout from %s.' % version_or_branch)
cmd_log(run('rm', '-rf', juju_gui_source_dir))
cmd_log(bzr_checkout(version_or_branch, juju_gui_source_dir))
log('Preparing a Juju GUI release.')
logdir = os.path.dirname(logpath)
fd, name = tempfile.mkstemp(prefix='make-distfile-', dir=logdir)
log('Output from "make distfile" sent to %s' % name)
with environ(NO_BZR='1'):
run('make', '-C', juju_gui_source_dir, 'distfile',
stdout=fd, stderr=fd)
release_tarball = first_path_in_dir(
os.path.join(juju_gui_source_dir, 'releases'))
else:
log('Retrieving Juju GUI release.')
if origin == 'url':
file_url = version_or_branch
else:
# Retrieve a release from Launchpad.
launchpad = Launchpad.login_anonymously(
'Juju GUI charm', 'production')
project = launchpad.projects['juju-gui']
file_url = get_release_file_url(project, origin, version_or_branch)
log('Downloading release file from %s.' % file_url)
release_tarball = os.path.join(CURRENT_DIR, 'release.tgz')
cmd_log(run('curl', '-L', '-o', release_tarball, file_url))
return release_tarball
def fetch_api(juju_api_branch):
"""Retrieve the Juju branch."""
# Retrieve Juju API source checkout.
log('Retrieving Juju API source checkout.')
cmd_log(run('rm', '-rf', JUJU_DIR))
cmd_log(bzr_checkout(juju_api_branch, JUJU_DIR))
def setup_gui(release_tarball):
"""Set up Juju GUI."""
# Uncompress the release tarball.
log('Installing Juju GUI.')
release_dir = os.path.join(CURRENT_DIR, 'release')
cmd_log(run('rm', '-rf', release_dir))
os.mkdir(release_dir)
uncompress = command('tar', '-x', '-z', '-C', release_dir, '-f')
cmd_log(uncompress(release_tarball))
# Link the Juju GUI dir to the contents of the release tarball.
cmd_log(run('ln', '-sf', first_path_in_dir(release_dir), JUJU_GUI_DIR))
def setup_apache():
"""Set up apache."""
log('Setting up apache.')
if not os.path.exists(JUJU_GUI_SITE):
cmd_log(run('touch', JUJU_GUI_SITE))
cmd_log(run('chown', 'ubuntu:', JUJU_GUI_SITE))
cmd_log(
run('ln', '-s', JUJU_GUI_SITE,
'/etc/apache2/sites-enabled/juju-gui'))
if not os.path.exists(JUJU_GUI_PORTS):
cmd_log(run('touch', JUJU_GUI_PORTS))
cmd_log(run('chown', 'ubuntu:', JUJU_GUI_PORTS))
with su('root'):
run('a2dissite', 'default')
run('a2ensite', 'juju-gui')
def save_or_create_certificates(
ssl_cert_path, ssl_cert_contents, ssl_key_contents):
"""Generate the SSL certificates.
If both *ssl_cert_contents* and *ssl_key_contents* are provided, use them
as certificates; otherwise, generate them.
Also create a pem file, suitable for use in the haproxy configuration,
concatenating the key and the certificate files.
"""
crt_path = os.path.join(ssl_cert_path, 'juju.crt')
key_path = os.path.join(ssl_cert_path, 'juju.key')
if not os.path.exists(ssl_cert_path):
os.makedirs(ssl_cert_path)
if ssl_cert_contents and ssl_key_contents:
# Save the provided certificates.
with open(crt_path, 'w') as cert_file:
cert_file.write(ssl_cert_contents)
with open(key_path, 'w') as key_file:
key_file.write(ssl_key_contents)
else:
# Generate certificates.
# See http://superuser.com/questions/226192/openssl-without-prompt
cmd_log(run(
'openssl', 'req', '-new', '-newkey', 'rsa:4096',
'-days', '365', '-nodes', '-x509', '-subj',
# These are arbitrary test values for the certificate.
'/C=GB/ST=Juju/L=GUI/O=Ubuntu/CN=juju.ubuntu.com',
'-keyout', key_path, '-out', crt_path))
# Generate the pem file.
pem_path = os.path.join(ssl_cert_path, JUJU_PEM)
if os.path.exists(pem_path):
os.remove(pem_path)
with open(pem_path, 'w') as pem_file:
shutil.copyfileobj(open(key_path), pem_file)
shutil.copyfileobj(open(crt_path), pem_file)
def find_missing_packages(*packages):
"""Given a list of packages, return the packages which are not installed.
"""
cache = apt.Cache()
missing = set()
for pkg_name in packages:
try:
pkg = cache[pkg_name]
except KeyError:
missing.add(pkg_name)
continue
if pkg.is_installed:
continue
missing.add(pkg_name)
return missing
## Backend support decorators
def chain(name):
"""Helper method to compose a set of mixin objects into a callable.
Each method is called in the context of its mixin instance, and its
argument is the Backend instance.
"""
# Chain method calls through all implementing mixins.
def method(self):
for mixin in self.mixins:
a_callable = getattr(type(mixin), name, None)
if a_callable:
a_callable(mixin, self)
method.__name__ = name
return method
def merge(name):
"""Helper to merge a property from a set of strategy objects
into a unified set.
"""
# Return merged property from every providing mixin as a set.
@property
def method(self):
result = set()
for mixin in self.mixins:
segment = getattr(type(mixin), name, None)
if segment and isinstance(segment, (list, tuple, set)):
result |= set(segment)
return result
return method

View File

@ -1,75 +0,0 @@
''' Helpers for interacting with OpenvSwitch '''
import subprocess
import os
from charmhelpers.core.hookenv import (
log, WARNING
)
from charmhelpers.core.host import (
service
)
def add_bridge(name):
''' Add the named bridge to openvswitch '''
log('Creating bridge {}'.format(name))
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
def del_bridge(name):
''' Delete the named bridge from openvswitch '''
log('Deleting bridge {}'.format(name))
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
def add_bridge_port(name, port):
''' Add a port to the named openvswitch bridge '''
log('Adding port {} to bridge {}'.format(port, name))
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
name, port])
subprocess.check_call(["ip", "link", "set", port, "up"])
def del_bridge_port(name, port):
''' Delete a port from the named openvswitch bridge '''
log('Deleting port {} from bridge {}'.format(port, name))
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
name, port])
subprocess.check_call(["ip", "link", "set", port, "down"])
def set_manager(manager):
''' Set the controller for the local openvswitch '''
log('Setting manager for local ovs to {}'.format(manager))
subprocess.check_call(['ovs-vsctl', 'set-manager',
'ssl:{}'.format(manager)])
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
def get_certificate():
''' Read openvswitch certificate from disk '''
if os.path.exists(CERT_PATH):
log('Reading ovs certificate from {}'.format(CERT_PATH))
with open(CERT_PATH, 'r') as cert:
full_cert = cert.read()
begin_marker = "-----BEGIN CERTIFICATE-----"
end_marker = "-----END CERTIFICATE-----"
begin_index = full_cert.find(begin_marker)
end_index = full_cert.rfind(end_marker)
if end_index == -1 or begin_index == -1:
raise RuntimeError("Certificate does not contain valid begin"
" and end markers.")
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
return full_cert
else:
log('Certificate not found', level=WARNING)
return None
def full_restart():
''' Full restart and reload of openvswitch '''
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
service('start', 'openvswitch-force-reload-kmod')
else:
service('force-reload-kmod', 'openvswitch-switch')

View File

@ -1,102 +0,0 @@
"""Charm Helpers saltstack - declare the state of your machines.
This helper enables you to declare your machine state, rather than
program it procedurally (and have to test each change to your procedures).
Your install hook can be as simple as:
{{{
from charmhelpers.contrib.saltstack import (
install_salt_support,
update_machine_state,
)
def install():
install_salt_support()
update_machine_state('machine_states/dependencies.yaml')
update_machine_state('machine_states/installed.yaml')
}}}
and won't need to change (nor will its tests) when you change the machine
state.
It's using a python package called salt-minion which allows various formats for
specifying resources, such as:
{{{
/srv/{{ basedir }}:
file.directory:
- group: ubunet
- user: ubunet
- require:
- user: ubunet
- recurse:
- user
- group
ubunet:
group.present:
- gid: 1500
user.present:
- uid: 1500
- gid: 1500
- createhome: False
- require:
- group: ubunet
}}}
The docs for all the different state definitions are at:
http://docs.saltstack.com/ref/states/all/
TODO:
* Add test helpers which will ensure that machine state definitions
are functionally (but not necessarily logically) correct (ie. getting
salt to parse all state defs.
* Add a link to a public bootstrap charm example / blogpost.
* Find a way to obviate the need to use the grains['charm_dir'] syntax
in templates.
"""
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
import subprocess
import charmhelpers.contrib.templating.contexts
import charmhelpers.core.host
import charmhelpers.core.hookenv
salt_grains_path = '/etc/salt/grains'
def install_salt_support(from_ppa=True):
"""Installs the salt-minion helper for machine state.
By default the salt-minion package is installed from
the saltstack PPA. If from_ppa is False you must ensure
that the salt-minion package is available in the apt cache.
"""
if from_ppa:
subprocess.check_call([
'/usr/bin/add-apt-repository',
'--yes',
'ppa:saltstack/salt',
])
subprocess.check_call(['/usr/bin/apt-get', 'update'])
# We install salt-common as salt-minion would run the salt-minion
# daemon.
charmhelpers.fetch.apt_install('salt-common')
def update_machine_state(state_path):
"""Update the machine state using the provided state declaration."""
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
salt_grains_path)
subprocess.check_call([
'salt-call',
'--local',
'state.template',
state_path,
])

View File

@ -1,78 +0,0 @@
import subprocess
from charmhelpers.core import hookenv
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
"""Generate selfsigned SSL keypair
You must provide one of the 3 optional arguments:
config, subject or cn
If more than one is provided the leftmost will be used
Arguments:
keyfile -- (required) full path to the keyfile to be created
certfile -- (required) full path to the certfile to be created
keysize -- (optional) SSL key length
config -- (optional) openssl configuration file
subject -- (optional) dictionary with SSL subject variables
cn -- (optional) cerfificate common name
Required keys in subject dict:
cn -- Common name (eq. FQDN)
Optional keys in subject dict
country -- Country Name (2 letter code)
state -- State or Province Name (full name)
locality -- Locality Name (eg, city)
organization -- Organization Name (eg, company)
organizational_unit -- Organizational Unit Name (eg, section)
email -- Email Address
"""
cmd = []
if config:
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-config", config]
elif subject:
ssl_subject = ""
if "country" in subject:
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
if "state" in subject:
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
if "locality" in subject:
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
if "organization" in subject:
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
if "organizational_unit" in subject:
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
if "cn" in subject:
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
else:
hookenv.log("When using \"subject\" argument you must "
"provide \"cn\" field at very least")
return False
if "email" in subject:
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-subj", ssl_subject]
elif cn:
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
"-keyout", keyfile,
"-out", certfile, "-subj", "/CN={}".format(cn)]
if not cmd:
hookenv.log("No config, subject or cn provided,"
"unable to generate self signed SSL certificates")
return False
try:
subprocess.check_call(cmd)
return True
except Exception as e:
print "Execution of openssl command failed:\n{}".format(e)
return False

View File

@ -1,73 +0,0 @@
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
"""A helper to create a yaml cache of config with namespaced relation data."""
import os
import yaml
import charmhelpers.core.hookenv
charm_dir = os.environ.get('CHARM_DIR', '')
def juju_state_to_yaml(yaml_path, namespace_separator=':',
allow_hyphens_in_keys=True):
"""Update the juju config and state in a yaml file.
This includes any current relation-get data, and the charm
directory.
This function was created for the ansible and saltstack
support, as those libraries can use a yaml file to supply
context to templates, but it may be useful generally to
create and update an on-disk cache of all the config, including
previous relation data.
By default, hyphens are allowed in keys as this is supported
by yaml, but for tools like ansible, hyphens are not valid [1].
[1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
"""
config = charmhelpers.core.hookenv.config()
# Add the charm_dir which we will need to refer to charm
# file resources etc.
config['charm_dir'] = charm_dir
config['local_unit'] = charmhelpers.core.hookenv.local_unit()
# Add any relation data prefixed with the relation type.
relation_type = charmhelpers.core.hookenv.relation_type()
if relation_type is not None:
relation_data = charmhelpers.core.hookenv.relation_get()
relation_data = dict(
("{relation_type}{namespace_separator}{key}".format(
relation_type=relation_type.replace('-', '_'),
key=key,
namespace_separator=namespace_separator), val)
for key, val in relation_data.items())
config.update(relation_data)
# Don't use non-standard tags for unicode which will not
# work when salt uses yaml.load_safe.
yaml.add_representer(unicode, lambda dumper,
value: dumper.represent_scalar(
u'tag:yaml.org,2002:str', value))
yaml_dir = os.path.dirname(yaml_path)
if not os.path.exists(yaml_dir):
os.makedirs(yaml_dir)
if os.path.exists(yaml_path):
with open(yaml_path, "r") as existing_vars_file:
existing_vars = yaml.load(existing_vars_file.read())
else:
existing_vars = {}
if not allow_hyphens_in_keys:
config = dict(
(key.replace('-', '_'), val) for key, val in config.items())
existing_vars.update(config)
with open(yaml_path, "w+") as fp:
fp.write(yaml.dump(existing_vars))

View File

@ -1,13 +0,0 @@
'''
Templating using standard Python str.format() method.
'''
from charmhelpers.core import hookenv
def render(template, extra={}, **kwargs):
"""Return the template rendered using Python's str.format()."""
context = hookenv.execution_environment()
context.update(extra)
context.update(kwargs)
return template.format(**context)

View File

@ -136,7 +136,7 @@ def apt_hold(packages, fatal=False):
def add_source(source, key=None):
if (source.startswith('ppa:') or
source.startswith('http:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source])
@ -156,7 +156,9 @@ def add_source(source, key=None):
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
if key:
subprocess.check_call(['apt-key', 'import', key])
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'keyserver.ubuntu.com', '--recv',
key])
class SourceConfigError(Exception):

View File

@ -1 +0,0 @@
"Tools for working with files injected into a charm just before deployment."

View File

@ -1,57 +0,0 @@
import os
import tarfile
import zipfile
from charmhelpers.core import (
host,
hookenv,
)
class ArchiveError(Exception):
pass
def get_archive_handler(archive_name):
if os.path.isfile(archive_name):
if tarfile.is_tarfile(archive_name):
return extract_tarfile
elif zipfile.is_zipfile(archive_name):
return extract_zipfile
else:
# look at the file name
for ext in ('.tar', '.tar.gz', '.tgz', 'tar.bz2', '.tbz2', '.tbz'):
if archive_name.endswith(ext):
return extract_tarfile
for ext in ('.zip', '.jar'):
if archive_name.endswith(ext):
return extract_zipfile
def archive_dest_default(archive_name):
archive_file = os.path.basename(archive_name)
return os.path.join(hookenv.charm_dir(), "archives", archive_file)
def extract(archive_name, destpath=None):
handler = get_archive_handler(archive_name)
if handler:
if not destpath:
destpath = archive_dest_default(archive_name)
if not os.path.isdir(destpath):
host.mkdir(destpath)
handler(archive_name, destpath)
return destpath
else:
raise ArchiveError("No handler for archive")
def extract_tarfile(archive_name, destpath):
"Unpack a tar archive, optionally compressed"
archive = tarfile.open(archive_name)
archive.extractall(destpath)
def extract_zipfile(archive_name, destpath):
"Unpack a zip file"
archive = zipfile.ZipFile(archive_name)
archive.extractall(destpath)

View File

@ -1,50 +0,0 @@
#!/usr/bin/env python
import os
import sys
import subprocess
from charmhelpers.core import hookenv
def default_execd_dir():
return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
def execd_module_paths(execd_dir=None):
"""Generate a list of full paths to modules within execd_dir."""
if not execd_dir:
execd_dir = default_execd_dir()
if not os.path.exists(execd_dir):
return
for subpath in os.listdir(execd_dir):
module = os.path.join(execd_dir, subpath)
if os.path.isdir(module):
yield module
def execd_submodule_paths(command, execd_dir=None):
"""Generate a list of full paths to the specified command within exec_dir.
"""
for module_path in execd_module_paths(execd_dir):
path = os.path.join(module_path, command)
if os.access(path, os.X_OK) and os.path.isfile(path):
yield path
def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
"""Run command for each module within execd_dir which defines it."""
for submodule_path in execd_submodule_paths(command, execd_dir):
try:
subprocess.check_call(submodule_path, shell=True, stderr=stderr)
except subprocess.CalledProcessError as e:
hookenv.log("Error ({}) running {}. Output: {}".format(
e.returncode, e.cmd, e.output))
if die_on_error:
sys.exit(e.returncode)
def execd_preinstall(execd_dir=None):
"""Run charm-pre-install for each module within execd_dir."""
execd_run('charm-pre-install', execd_dir=execd_dir)