[hopem,r=wolsen]

Refactor and clean-up the hacluster charm.
This makes the code format and layout more consistent with
the rest of the openstack charms.
This commit is contained in:
Billy Olsen 2015-05-01 05:43:29 -07:00
commit 21c535bd6c
29 changed files with 1726 additions and 715 deletions

View File

@ -1,2 +1,3 @@
revision
bin
.coverage

View File

@ -2,17 +2,33 @@
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks unit_tests
@flake8 --exclude hooks/charmhelpers hooks unit_tests tests
@charm proof
unit_test:
@echo Starting tests...
@echo Starting unit tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
test:
@echo Starting Amulet tests...
ifndef OS_CHARMS_AMULET_VIP
@echo "WARNING: HA tests require OS_CHARMS_AMULET_VIP set to usable vip address"
endif
# coreycb note: The -v should only be temporary until Amulet sends
# raise_status() messages to stderr:
# https://bugs.launchpad.net/amulet/+bug/1320357
@juju test -v -p AMULET_HTTP_PROXY,OS_CHARMS_AMULET_VIP --timeout 900 \
00-setup 15-basic-trusty-icehouse
bin/charm_helpers_sync.py:
@mkdir -p bin
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
> bin/charm_helpers_sync.py
sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
publish: lint unit_test
bzr push lp:charms/hacluster
bzr push lp:charms/trusty/hacluster

27
TODO
View File

@ -1,27 +0,0 @@
HA Cluster (pacemaker/corosync) Charm
======================================
* Peer-relations
- make sure node was added to the cluster
- make sure node has been removed from the cluster (when deleting unit)
* One thing that can be done is to:
1. ha-relation-joined puts node in standby.
2. ha-relation-joined makes HA configuration
3. on hanode-relation-joined (2 or more nodes)
- services are stopped from upstart/lsb
- nodes are put in online mode
- services are loaded by cluster
- this way is not in HA until we have a second node.
* Needs to communicate the VIP to the top service
* TODO: Fix Disable upstart jobs
- sudo sh -c "echo 'manual' > /etc/init/SERVICE.override"
update-rc.d -f pacemaker remove
update-rc.d pacemaker start 50 1 2 3 4 5 . stop 01 0 6 .
TODO: Problem seems to be that peer-relation gets executed before the subordinate relation.
In that case, peer relation would have to put nodes in standby and then the subordinate relation
will have to put the nodes online and configure the services. Or probably not use it at all.
Hanode-relation puts node in standby.
ha-relation counts nodes in hanode-relation and if >2 then we online them and setup cluster.

5
charm-helpers-tests.yaml Normal file
View File

@ -0,0 +1,5 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
- contrib.amulet
- contrib.openstack.amulet

View File

@ -1,7 +1,23 @@
options:
debug:
type: boolean
default: False
description: Enable debug logging
prefer-ipv6:
type: boolean
default: False
description: |
If True enables IPv6 support. The charm will expect network interfaces
to be configured with an IPv6 address. If set to False (default) IPv4
is expected.
.
NOTE: these charms do not currently support IPv6 privacy extension. In
order for this charm to function correctly, the privacy extension must be
disabled and a non-temporary address must be configured/available on
your network interface.
corosync_mcastaddr:
default: 226.94.1.1
type: string
default: 226.94.1.1
description: |
Multicast IP address to use for exchanging messages over the network.
If multiple clusters are on the same bindnetaddr network, this value
@ -34,9 +50,9 @@ options:
type: string
default: 'False'
description: |
Enable resource fencing (aka STONITH) for every node in the cluster.
This requires MAAS credentials be provided and each node's power
parameters are properly configured in its invenvory.
Enable resource fencing (aka STONITH) for every node in the cluster.
This requires MAAS credentials be provided and each node's power
parameters are properly configured in its invenvory.
maas_url:
type: string
default:
@ -59,16 +75,16 @@ options:
type: string
default:
description: |
One or more IPs, separated by space, that will be used as a saftey check
for avoiding split brain situations. Nodes in the cluster will ping these
IPs periodicaly. Node that can not ping monitor_host will not run shared
resources (VIP, shared disk...).
One or more IPs, separated by space, that will be used as a saftey check
for avoiding split brain situations. Nodes in the cluster will ping these
IPs periodicaly. Node that can not ping monitor_host will not run shared
resources (VIP, shared disk...).
monitor_interval:
type: string
default: 5s
description: |
Time period between checks of resource health. It consists of a number
and a time factor, e.g. 5s = 5 seconds. 2m = 2 minutes.
Time period between checks of resource health. It consists of a number
and a time factor, e.g. 5s = 5 seconds. 2m = 2 minutes.
netmtu:
type: int
default:
@ -76,40 +92,26 @@ options:
Specifies the corosync.conf network mtu. If unset, the default
corosync.conf value is used (currently 1500). See 'man corosync.conf' for
detailed information on this config option.
prefer-ipv6:
type: boolean
default: False
description: |
If True enables IPv6 support. The charm will expect network interfaces
to be configured with an IPv6 address. If set to False (default) IPv4
is expected.
.
NOTE: these charms do not currently support IPv6 privacy extension. In
order for this charm to function correctly, the privacy extension must be
disabled and a non-temporary address must be configured/available on
your network interface.
corosync_transport:
type: string
default: "multicast"
description: |
Two supported modes are multicast (udp) or unicast (udpu)
debug:
default: False
type: boolean
description: Enable debug logging
nagios_context:
default: "juju"
type: string
description: |
Used by the nrpe-external-master subordinate charm.
A string that will be prepended to instance name to set the host name
in nagios. So for instance the hostname would be something like:
juju-postgresql-0
If you're running multiple environments with the same services in them
this allows you to differentiate between them.
Used by the nrpe-external-master subordinate charm.
A string that will be prepended to instance name to set the host name
in nagios. So for instance the hostname would be something like:
.
juju-postgresql-0
.
If you're running multiple environments with the same services in them
this allows you to differentiate between them.
nagios_servicegroups:
default: ""
type: string
description: |
A comma-separated list of nagios servicegroups.
If left empty, the nagios_context will be used as the servicegroup
A comma-separated list of nagios servicegroups.
If left empty, the nagios_context will be used as the servicegroup

View File

@ -1,109 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Paul Collins <paul.collins@canonical.com>
#
import os
import subprocess
import socket
import fcntl
import struct
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.network import ip as utils
try:
import netifaces
except ImportError:
apt_install('python-netifaces')
import netifaces
try:
from netaddr import IPNetwork
except ImportError:
apt_install('python-netaddr', fatal=True)
from netaddr import IPNetwork
def disable_upstart_services(*services):
for service in services:
with open("/etc/init/{}.override".format(service), "w") as override:
override.write("manual")
def enable_upstart_services(*services):
for service in services:
path = '/etc/init/{}.override'.format(service)
if os.path.exists(path):
os.remove(path)
def disable_lsb_services(*services):
for service in services:
subprocess.check_call(['update-rc.d', '-f', service, 'remove'])
def enable_lsb_services(*services):
for service in services:
subprocess.check_call(['update-rc.d', '-f', service, 'defaults'])
def get_iface_ipaddr(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8919, # SIOCGIFADDR
struct.pack('256s', iface[:15])
)[20:24])
def get_iface_netmask(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x891b, # SIOCGIFNETMASK
struct.pack('256s', iface[:15])
)[20:24])
def get_netmask_cidr(netmask):
netmask = netmask.split('.')
binary_str = ''
for octet in netmask:
binary_str += bin(int(octet))[2:].zfill(8)
return str(len(binary_str.rstrip('0')))
def get_network_address(iface):
if iface:
iface = str(iface)
network = "{}/{}".format(get_iface_ipaddr(iface),
get_netmask_cidr(get_iface_netmask(iface)))
ip = IPNetwork(network)
return str(ip.network)
else:
return None
def get_ipv6_network_address(iface):
# Behave in same way as ipv4 get_network_address() above if iface is None.
if not iface:
return None
try:
ipv6_addr = utils.get_ipv6_addr(iface=iface)[0]
all_addrs = netifaces.ifaddresses(iface)
for addr in all_addrs[netifaces.AF_INET6]:
if ipv6_addr == addr['addr']:
network = "{}/{}".format(addr['addr'], addr['netmask'])
return str(IPNetwork(network).network)
except ValueError:
raise Exception("Invalid interface '%s'" % iface)
raise Exception("No valid network found for interface '%s'" % iface)

View File

@ -1,46 +1,31 @@
#!/usr/bin/python
#
# Copyright 2012 Canonical Ltd.
# Copyright 2015 Canonical Ltd.
#
# Authors:
# Andres Rodriguez <andres.rodriguez@canonical.com>
#
import ast
import shutil
import sys
import os
import sys
import glob
from base64 import b64decode
import maas as MAAS
import pcmk
import hacluster
import socket
from charmhelpers.core.hookenv import (
log,
relation_get,
DEBUG,
INFO,
related_units,
relation_ids,
relation_set,
unit_get,
config,
Hooks, UnregisteredHookError,
local_unit,
unit_private_ip,
Hooks,
UnregisteredHookError,
)
from charmhelpers.core.host import (
service_start,
service_stop,
service_restart,
service_running,
write_file,
mkdir,
file_hash,
lsb_release
)
from charmhelpers.fetch import (
@ -50,17 +35,29 @@ from charmhelpers.fetch import (
)
from charmhelpers.contrib.hahelpers.cluster import (
peer_ips,
peer_units,
oldest_peer
)
from charmhelpers.contrib.openstack.utils import get_host_ip
from utils import (
get_corosync_conf,
assert_charm_supports_ipv6,
get_cluster_nodes,
parse_data,
configure_corosync,
configure_stonith,
configure_monitor_host,
configure_cluster_global,
enable_lsb_services,
disable_lsb_services,
disable_upstart_services,
)
from charmhelpers.contrib.charmsupport import nrpe
hooks = Hooks()
PACKAGES = ['corosync', 'pacemaker', 'python-netaddr', 'ipmitool']
COROSYNC_CONF = '/etc/corosync/corosync.conf'
COROSYNC_DEFAULT = '/etc/default/corosync'
COROSYNC_AUTHKEY = '/etc/corosync/authkey'
@ -74,6 +71,7 @@ COROSYNC_CONF_FILES = [
PACKAGES = ['corosync', 'pacemaker', 'python-netaddr', 'ipmitool',
'libnagios-plugin-perl']
SUPPORTED_TRANSPORTS = ['udp', 'udpu', 'multicast', 'unicast']
DEPRECATED_TRANSPORT_VALUES = {"multicast": "udp", "unicast": "udpu"}
@hooks.hook()
@ -88,12 +86,10 @@ def install():
if not os.path.isfile('/usr/lib/ocf/resource.d/ceph/rbd'):
shutil.copy('ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd')
_deprecated_transport_values = {"multicast": "udp", "unicast": "udpu"}
def get_transport():
transport = config('corosync_transport')
val = _deprecated_transport_values.get(transport, transport)
val = DEPRECATED_TRANSPORT_VALUES.get(transport, transport)
if val not in ['udp', 'udpu']:
msg = ("Unsupported corosync_transport type '%s' - supported "
"types are: %s" % (transport, ', '.join(SUPPORTED_TRANSPORTS)))
@ -101,100 +97,6 @@ def get_transport():
return val
def get_corosync_id(unit_name):
# Corosync nodeid 0 is reserved so increase all the nodeids to avoid it
off_set = 1000
return off_set + int(unit_name.split('/')[1])
def get_ha_nodes():
ha_units = peer_ips(peer_relation='hanode')
ha_units[local_unit()] = unit_private_ip()
ha_nodes = {}
for unit in ha_units:
corosync_id = get_corosync_id(unit)
ha_nodes[corosync_id] = get_host_ip(ha_units[unit])
return ha_nodes
def get_corosync_conf():
if config('prefer-ipv6'):
ip_version = 'ipv6'
bindnetaddr = hacluster.get_ipv6_network_address
else:
ip_version = 'ipv4'
bindnetaddr = hacluster.get_network_address
# NOTE(jamespage) use local charm configuration over any provided by
# principle charm
conf = {
'corosync_bindnetaddr':
bindnetaddr(config('corosync_bindiface')),
'corosync_mcastport': config('corosync_mcastport'),
'corosync_mcastaddr': config('corosync_mcastaddr'),
'ip_version': ip_version,
'ha_nodes': get_ha_nodes(),
'transport': get_transport(),
'debug': config('debug'),
}
if None not in conf.itervalues():
return conf
conf = {}
if config('netmtu'):
conf['netmtu'] = config('netmtu')
for relid in relation_ids('ha'):
for unit in related_units(relid):
bindiface = relation_get('corosync_bindiface',
unit, relid)
conf = {
'corosync_bindnetaddr': bindnetaddr(bindiface),
'corosync_mcastport': relation_get('corosync_mcastport',
unit, relid),
'corosync_mcastaddr': config('corosync_mcastaddr'),
'ip_version': ip_version,
'ha_nodes': get_ha_nodes(),
'transport': get_transport(),
'debug': config('debug'),
}
if config('prefer-ipv6'):
conf['nodeid'] = get_corosync_id(local_unit())
if None not in conf.itervalues():
return conf
missing = [k for k, v in conf.iteritems() if v is None]
log('Missing required configuration: %s' % missing)
return None
def emit_corosync_conf():
corosync_conf_context = get_corosync_conf()
if corosync_conf_context:
write_file(path=COROSYNC_CONF,
content=render_template('corosync.conf',
corosync_conf_context))
return True
else:
return False
def emit_base_conf():
corosync_default_context = {'corosync_enabled': 'yes'}
write_file(path=COROSYNC_DEFAULT,
content=render_template('corosync',
corosync_default_context))
corosync_key = config('corosync_key')
if corosync_key:
write_file(path=COROSYNC_AUTHKEY,
content=b64decode(corosync_key),
perms=0o400)
return True
else:
return False
@hooks.hook()
def config_changed():
if config('prefer-ipv6'):
@ -202,10 +104,9 @@ def config_changed():
corosync_key = config('corosync_key')
if not corosync_key:
log('CRITICAL',
'No Corosync key supplied, cannot proceed')
sys.exit(1)
hacluster.enable_lsb_services('pacemaker')
raise Exception('No Corosync key supplied, cannot proceed')
enable_lsb_services('pacemaker')
if configure_corosync():
pcmk.wait_for_pcmk()
@ -223,122 +124,31 @@ def upgrade_charm():
update_nrpe_config()
def restart_corosync():
if service_running("pacemaker"):
service_stop("pacemaker")
service_restart("corosync")
service_start("pacemaker")
def restart_corosync_on_change():
'''Simple decorator to restart corosync if any of its config changes'''
def wrap(f):
def wrapped_f(*args):
checksums = {}
for path in COROSYNC_CONF_FILES:
checksums[path] = file_hash(path)
return_data = f(*args)
# NOTE: this assumes that this call is always done around
# configure_corosync, which returns true if configuration
# files where actually generated
if return_data:
for path in COROSYNC_CONF_FILES:
if checksums[path] != file_hash(path):
restart_corosync()
break
return return_data
return wrapped_f
return wrap
@restart_corosync_on_change()
def configure_corosync():
log('Configuring and (maybe) restarting corosync')
return emit_base_conf() and emit_corosync_conf()
def configure_monitor_host():
'''Configure extra monitor host for better network failure detection'''
log('Checking monitor host configuration')
monitor_host = config('monitor_host')
if monitor_host:
if not pcmk.crm_opt_exists('ping'):
log('Implementing monitor host'
' configuration (host: %s)' % monitor_host)
monitor_interval = config('monitor_interval')
cmd = 'crm -w -F configure primitive ping' \
' ocf:pacemaker:ping params host_list="%s"' \
' multiplier="100" op monitor interval="%s"' %\
(monitor_host, monitor_interval)
pcmk.commit(cmd)
cmd = 'crm -w -F configure clone cl_ping ping' \
' meta interleave="true"'
pcmk.commit(cmd)
else:
log('Reconfiguring monitor host'
' configuration (host: %s)' % monitor_host)
cmd = 'crm -w -F resource param ping set host_list="%s"' %\
monitor_host
else:
if pcmk.crm_opt_exists('ping'):
log('Disabling monitor host configuration')
pcmk.commit('crm -w -F resource stop ping')
pcmk.commit('crm -w -F configure delete ping')
def configure_cluster_global():
'''Configure global cluster options'''
log('Applying global cluster configuration')
if int(config('cluster_count')) >= 3:
# NOTE(jamespage) if 3 or more nodes, then quorum can be
# managed effectively, so stop if quorum lost
log('Configuring no-quorum-policy to stop')
cmd = "crm configure property no-quorum-policy=stop"
else:
# NOTE(jamespage) if less that 3 nodes, quorum not possible
# so ignore
log('Configuring no-quorum-policy to ignore')
cmd = "crm configure property no-quorum-policy=ignore"
pcmk.commit(cmd)
cmd = 'crm configure rsc_defaults $id="rsc-options"' \
' resource-stickiness="100"'
pcmk.commit(cmd)
def parse_data(relid, unit, key):
'''Simple helper to ast parse relation data'''
data = relation_get(key, unit, relid)
if data:
return ast.literal_eval(data)
else:
return {}
@hooks.hook('ha-relation-joined',
'ha-relation-changed',
'hanode-relation-joined',
'hanode-relation-changed')
def configure_principle_cluster_resources():
def ha_relation_changed():
# Check that we are related to a principle and that
# it has already provided the required corosync configuration
if not get_corosync_conf():
log('Unable to configure corosync right now, deferring configuration')
log('Unable to configure corosync right now, deferring configuration',
level=INFO)
return
if relation_ids('hanode'):
log('Ready to form cluster - informing peers', level=DEBUG)
relation_set(relation_id=relation_ids('hanode')[0], ready=True)
else:
if relation_ids('hanode'):
log('Ready to form cluster - informing peers')
relation_set(relation_id=relation_ids('hanode')[0],
ready=True)
else:
log('Ready to form cluster, but not related to peers just yet')
return
log('Ready to form cluster, but not related to peers just yet',
level=INFO)
return
# Check that there's enough nodes in order to perform the
# configuration of the HA cluster
if (len(get_cluster_nodes()) <
int(config('cluster_count'))):
log('Not enough nodes in cluster, deferring configuration')
if len(get_cluster_nodes()) < int(config('cluster_count')):
log('Not enough nodes in cluster, deferring configuration',
level=INFO)
return
relids = relation_ids('ha')
@ -347,11 +157,13 @@ def configure_principle_cluster_resources():
relid = relids[0]
units = related_units(relid)
if len(units) < 1:
log('No principle unit found, deferring configuration')
log('No principle unit found, deferring configuration',
level=INFO)
return
unit = units[0]
log('Parsing cluster configuration'
' using rid: {}, unit: {}'.format(relid, unit))
log('Parsing cluster configuration using rid: %s, unit: %s' %
(relid, unit), level=DEBUG)
resources = parse_data(relid, unit, 'resources')
delete_resources = parse_data(relid, unit, 'delete_resources')
resource_params = parse_data(relid, unit, 'resource_params')
@ -363,7 +175,7 @@ def configure_principle_cluster_resources():
locations = parse_data(relid, unit, 'locations')
init_services = parse_data(relid, unit, 'init_services')
else:
log('Related to {} ha services'.format(len(relids)))
log('Related to %s ha services' % (len(relids)), level=DEBUG)
return
if True in [ra.startswith('ocf:openstack')
@ -384,27 +196,26 @@ def configure_principle_cluster_resources():
# Only configure the cluster resources
# from the oldest peer unit.
if oldest_peer(peer_units()):
log('Deleting Resources')
log(delete_resources)
log('Deleting Resources' % (delete_resources), level=DEBUG)
for res_name in delete_resources:
if pcmk.crm_opt_exists(res_name):
log('Stopping and deleting resource %s' % res_name)
log('Stopping and deleting resource %s' % res_name,
level=DEBUG)
if pcmk.crm_res_running(res_name):
pcmk.commit('crm -w -F resource stop %s' % res_name)
pcmk.commit('crm -w -F configure delete %s' % res_name)
log('Configuring Resources')
log(resources)
log('Configuring Resources: %s' % (resources), level=DEBUG)
for res_name, res_type in resources.iteritems():
# disable the service we are going to put in HA
if res_type.split(':')[0] == "lsb":
hacluster.disable_lsb_services(res_type.split(':')[1])
disable_lsb_services(res_type.split(':')[1])
if service_running(res_type.split(':')[1]):
service_stop(res_type.split(':')[1])
elif (len(init_services) != 0 and
res_name in init_services and
init_services[res_name]):
hacluster.disable_upstart_services(init_services[res_name])
disable_upstart_services(init_services[res_name])
if service_running(init_services[res_name]):
service_stop(init_services[res_name])
# Put the services in HA, if not already done so
@ -414,69 +225,62 @@ def configure_principle_cluster_resources():
cmd = 'crm -w -F configure primitive %s %s' % (res_name,
res_type)
else:
cmd = 'crm -w -F configure primitive %s %s %s' % \
(res_name,
res_type,
resource_params[res_name])
cmd = ('crm -w -F configure primitive %s %s %s' %
(res_name, res_type, resource_params[res_name]))
pcmk.commit(cmd)
log('%s' % cmd)
log('%s' % cmd, level=DEBUG)
if config('monitor_host'):
cmd = 'crm -F configure location Ping-%s %s rule' \
' -inf: pingd lte 0' % (res_name, res_name)
cmd = ('crm -F configure location Ping-%s %s rule '
'-inf: pingd lte 0' % (res_name, res_name))
pcmk.commit(cmd)
log('Configuring Groups')
log(groups)
log('Configuring Groups: %s' % (groups), level=DEBUG)
for grp_name, grp_params in groups.iteritems():
if not pcmk.crm_opt_exists(grp_name):
cmd = 'crm -w -F configure group %s %s' % (grp_name,
grp_params)
cmd = ('crm -w -F configure group %s %s' %
(grp_name, grp_params))
pcmk.commit(cmd)
log('%s' % cmd)
log('%s' % cmd, level=DEBUG)
log('Configuring Master/Slave (ms)')
log(ms)
log('Configuring Master/Slave (ms): %s' % (ms), level=DEBUG)
for ms_name, ms_params in ms.iteritems():
if not pcmk.crm_opt_exists(ms_name):
cmd = 'crm -w -F configure ms %s %s' % (ms_name, ms_params)
pcmk.commit(cmd)
log('%s' % cmd)
log('%s' % cmd, level=DEBUG)
log('Configuring Orders')
log(orders)
log('Configuring Orders: %s' % (orders), level=DEBUG)
for ord_name, ord_params in orders.iteritems():
if not pcmk.crm_opt_exists(ord_name):
cmd = 'crm -w -F configure order %s %s' % (ord_name,
ord_params)
pcmk.commit(cmd)
log('%s' % cmd)
log('%s' % cmd, level=DEBUG)
log('Configuring Colocations')
log(colocations)
log('Configuring Colocations: %s' % colocations, level=DEBUG)
for col_name, col_params in colocations.iteritems():
if not pcmk.crm_opt_exists(col_name):
cmd = 'crm -w -F configure colocation %s %s' % (col_name,
col_params)
pcmk.commit(cmd)
log('%s' % cmd)
log('%s' % cmd, level=DEBUG)
log('Configuring Clones')
log(clones)
log('Configuring Clones: %s' % clones, level=DEBUG)
for cln_name, cln_params in clones.iteritems():
if not pcmk.crm_opt_exists(cln_name):
cmd = 'crm -w -F configure clone %s %s' % (cln_name,
cln_params)
pcmk.commit(cmd)
log('%s' % cmd)
log('%s' % cmd, level=DEBUG)
log('Configuring Locations')
log(locations)
log('Configuring Locations: %s' % locations, level=DEBUG)
for loc_name, loc_params in locations.iteritems():
if not pcmk.crm_opt_exists(loc_name):
cmd = 'crm -w -F configure location %s %s' % (loc_name,
loc_params)
pcmk.commit(cmd)
log('%s' % cmd)
log('%s' % cmd, level=DEBUG)
for res_name, res_type in resources.iteritems():
if len(init_services) != 0 and res_name in init_services:
@ -504,88 +308,7 @@ def configure_principle_cluster_resources():
pcmk.commit(cmd)
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id,
clustered="yes")
def configure_stonith():
if config('stonith_enabled') not in ['true', 'True', True]:
log('Disabling STONITH')
cmd = "crm configure property stonith-enabled=false"
pcmk.commit(cmd)
else:
log('Enabling STONITH for all nodes in cluster.')
# configure stontih resources for all nodes in cluster.
# note: this is totally provider dependent and requires
# access to the MAAS API endpoint, using endpoint and credentials
# set in config.
url = config('maas_url')
creds = config('maas_credentials')
if None in [url, creds]:
log('maas_url and maas_credentials must be set'
' in config to enable STONITH.')
sys.exit(1)
maas = MAAS.MAASHelper(url, creds)
nodes = maas.list_nodes()
if not nodes:
log('Could not obtain node inventory from '
'MAAS @ %s.' % url)
sys.exit(1)
cluster_nodes = pcmk.list_nodes()
for node in cluster_nodes:
rsc, constraint = pcmk.maas_stonith_primitive(nodes, node)
if not rsc:
log('Failed to determine STONITH primitive for node'
' %s' % node)
sys.exit(1)
rsc_name = str(rsc).split(' ')[1]
if not pcmk.is_resource_present(rsc_name):
log('Creating new STONITH primitive %s.' %
rsc_name)
cmd = 'crm -F configure %s' % rsc
pcmk.commit(cmd)
if constraint:
cmd = 'crm -F configure %s' % constraint
pcmk.commit(cmd)
else:
log('STONITH primitive already exists '
'for node.')
cmd = "crm configure property stonith-enabled=true"
pcmk.commit(cmd)
def get_cluster_nodes():
hosts = []
hosts.append(unit_get('private-address'))
for relid in relation_ids('hanode'):
for unit in related_units(relid):
if relation_get('ready',
rid=relid,
unit=unit):
hosts.append(relation_get('private-address',
unit, relid))
hosts.sort()
return hosts
TEMPLATES_DIR = 'templates'
try:
import jinja2
except ImportError:
apt_install('python-jinja2', fatal=True)
import jinja2
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
relation_set(relation_id=rel_id, clustered="yes")
@hooks.hook()
@ -595,13 +318,6 @@ def stop():
apt_purge(['corosync', 'pacemaker'], fatal=True)
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
@hooks.hook('nrpe-external-master-relation-joined',
'nrpe-external-master-relation-changed')
def update_nrpe_config():
@ -659,4 +375,4 @@ if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
log('Unknown hook {} - skipping.'.format(e), level=DEBUG)

View File

@ -4,7 +4,10 @@ import json
import subprocess
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import log, ERROR
from charmhelpers.core.hookenv import (
log,
ERROR,
)
MAAS_STABLE_PPA = 'ppa:maas-maintainers/stable '
MAAS_PROFILE_NAME = 'maas-juju-hacluster'
@ -18,10 +21,10 @@ class MAASHelper(object):
self.install_maas_cli()
def install_maas_cli(self):
'''
Ensure maas-cli is installed. Fallback to MAAS stable PPA when
needed.
'''
"""Ensure maas-cli is installed
Fallback to MAAS stable PPA when needed.
"""
apt.init()
cache = apt.Cache()
@ -59,5 +62,6 @@ class MAASHelper(object):
except subprocess.CalledProcessError:
log('Could not get node inventory from MAAS.', ERROR)
return False
self.logout()
return json.loads(out)

View File

@ -2,7 +2,10 @@ import commands
import subprocess
import socket
from charmhelpers.core.hookenv import log, ERROR
from charmhelpers.core.hookenv import (
log,
ERROR
)
def wait_for_pcmk():
@ -21,6 +24,7 @@ def is_resource_present(resource):
status = commands.getstatusoutput("crm resource status %s" % resource)[0]
if status != 0:
return False
return True
@ -29,6 +33,7 @@ def standby(node=None):
cmd = "crm -F node standby"
else:
cmd = "crm -F node standby %s" % node
commit(cmd)
@ -37,6 +42,7 @@ def online(node=None):
cmd = "crm -F node online"
else:
cmd = "crm -F node online %s" % node
commit(cmd)
@ -44,15 +50,16 @@ def crm_opt_exists(opt_name):
output = commands.getstatusoutput("crm configure show")[1]
if opt_name in output:
return True
return False
def crm_res_running(opt_name):
(c, output) = commands.getstatusoutput("crm resource status %s" % opt_name)
(_, output) = commands.getstatusoutput("crm resource status %s" % opt_name)
if output.startswith("resource %s is running" % opt_name):
return True
else:
return False
return False
def list_nodes():
@ -62,20 +69,21 @@ def list_nodes():
for line in str(out).split('\n'):
if line != '':
nodes.append(line.split(':')[0])
return nodes
def _maas_ipmi_stonith_resource(node, power_params):
rsc_name = 'res_stonith_%s' % node
rsc = 'primitive %s stonith:external/ipmi' % rsc_name
rsc += ' params hostname=%s ipaddr=%s userid=%s passwd=%s interface=lan' %\
(node, power_params['power_address'],
power_params['power_user'], power_params['power_pass'])
rsc = ('primitive %s stonith:external/ipmi params hostname=%s ipaddr=%s '
'userid=%s passwd=%s interface=lan' %
(rsc_name, node, power_params['power_address'],
power_params['power_user'], power_params['power_pass']))
# ensure ipmi stonith agents are not running on the nodes that
# they manage.
constraint = 'location const_loc_stonith_avoid_%s %s -inf: %s' %\
(node, rsc_name, node)
constraint = ('location const_loc_stonith_avoid_%s %s -inf: %s' %
(node, rsc_name, node))
return rsc, constraint

427
hooks/utils.py Normal file
View File

@ -0,0 +1,427 @@
#!/usr/bin/python
import ast
import pcmk
import maas
import os
import subprocess
import socket
import fcntl
import struct
from base64 import b64decode
from charmhelpers.core.hookenv import (
local_unit,
log,
DEBUG,
INFO,
relation_get,
related_units,
relation_ids,
config,
unit_private_ip,
unit_get,
)
from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.core.host import (
service_start,
service_stop,
service_restart,
service_running,
write_file,
file_hash,
lsb_release
)
from charmhelpers.fetch import (
apt_install,
)
from charmhelpers.contrib.hahelpers.cluster import (
peer_ips,
)
from charmhelpers.contrib.network import ip as utils
try:
import netifaces
except ImportError:
apt_install('python-netifaces')
import netifaces
try:
from netaddr import IPNetwork
except ImportError:
apt_install('python-netaddr', fatal=True)
from netaddr import IPNetwork
try:
import jinja2
except ImportError:
apt_install('python-jinja2', fatal=True)
import jinja2
TEMPLATES_DIR = 'templates'
COROSYNC_CONF = '/etc/corosync/corosync.conf'
COROSYNC_DEFAULT = '/etc/default/corosync'
COROSYNC_AUTHKEY = '/etc/corosync/authkey'
COROSYNC_CONF_FILES = [
COROSYNC_DEFAULT,
COROSYNC_AUTHKEY,
COROSYNC_CONF
]
SUPPORTED_TRANSPORTS = ['udp', 'udpu', 'multicast', 'unicast']
def disable_upstart_services(*services):
for service in services:
with open("/etc/init/{}.override".format(service), "w") as override:
override.write("manual")
def enable_upstart_services(*services):
for service in services:
path = '/etc/init/{}.override'.format(service)
if os.path.exists(path):
os.remove(path)
def disable_lsb_services(*services):
for service in services:
subprocess.check_call(['update-rc.d', '-f', service, 'remove'])
def enable_lsb_services(*services):
for service in services:
subprocess.check_call(['update-rc.d', '-f', service, 'defaults'])
def get_iface_ipaddr(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8919, # SIOCGIFADDR
struct.pack('256s', iface[:15])
)[20:24])
def get_iface_netmask(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x891b, # SIOCGIFNETMASK
struct.pack('256s', iface[:15])
)[20:24])
def get_netmask_cidr(netmask):
netmask = netmask.split('.')
binary_str = ''
for octet in netmask:
binary_str += bin(int(octet))[2:].zfill(8)
return str(len(binary_str.rstrip('0')))
def get_network_address(iface):
if iface:
iface = str(iface)
network = "{}/{}".format(get_iface_ipaddr(iface),
get_netmask_cidr(get_iface_netmask(iface)))
ip = IPNetwork(network)
return str(ip.network)
else:
return None
def get_ipv6_network_address(iface):
# Behave in same way as ipv4 get_network_address() above if iface is None.
if not iface:
return None
try:
ipv6_addr = utils.get_ipv6_addr(iface=iface)[0]
all_addrs = netifaces.ifaddresses(iface)
for addr in all_addrs[netifaces.AF_INET6]:
if ipv6_addr == addr['addr']:
network = "{}/{}".format(addr['addr'], addr['netmask'])
return str(IPNetwork(network).network)
except ValueError:
raise Exception("Invalid interface '%s'" % iface)
raise Exception("No valid network found for interface '%s'" % iface)
def get_corosync_id(unit_name):
# Corosync nodeid 0 is reserved so increase all the nodeids to avoid it
off_set = 1000
return off_set + int(unit_name.split('/')[1])
def get_ha_nodes():
ha_units = peer_ips(peer_relation='hanode')
ha_units[local_unit()] = unit_private_ip()
ha_nodes = {}
for unit in ha_units:
corosync_id = get_corosync_id(unit)
ha_nodes[corosync_id] = get_host_ip(ha_units[unit])
return ha_nodes
def get_corosync_conf():
if config('prefer-ipv6'):
ip_version = 'ipv6'
bindnetaddr = get_ipv6_network_address
else:
ip_version = 'ipv4'
bindnetaddr = get_network_address
# NOTE(jamespage) use local charm configuration over any provided by
# principle charm
conf = {
'corosync_bindnetaddr':
bindnetaddr(config('corosync_bindiface')),
'corosync_mcastport': config('corosync_mcastport'),
'corosync_mcastaddr': config('corosync_mcastaddr'),
'ip_version': ip_version,
'ha_nodes': get_ha_nodes(),
'transport': get_transport(),
'debug': config('debug'),
}
if None not in conf.itervalues():
return conf
conf = {}
if config('netmtu'):
conf['netmtu'] = config('netmtu')
for relid in relation_ids('ha'):
for unit in related_units(relid):
bindiface = relation_get('corosync_bindiface',
unit, relid)
conf = {
'corosync_bindnetaddr': bindnetaddr(bindiface),
'corosync_mcastport': relation_get('corosync_mcastport',
unit, relid),
'corosync_mcastaddr': config('corosync_mcastaddr'),
'ip_version': ip_version,
'ha_nodes': get_ha_nodes(),
'transport': get_transport(),
'debug': config('debug'),
}
if config('prefer-ipv6'):
conf['nodeid'] = get_corosync_id(local_unit())
if None not in conf.itervalues():
return conf
missing = [k for k, v in conf.iteritems() if v is None]
log('Missing required configuration: %s' % missing)
return None
def emit_corosync_conf():
corosync_conf_context = get_corosync_conf()
if corosync_conf_context:
write_file(path=COROSYNC_CONF,
content=render_template('corosync.conf',
corosync_conf_context))
return True
return False
def emit_base_conf():
corosync_default_context = {'corosync_enabled': 'yes'}
write_file(path=COROSYNC_DEFAULT,
content=render_template('corosync',
corosync_default_context))
corosync_key = config('corosync_key')
if corosync_key:
write_file(path=COROSYNC_AUTHKEY,
content=b64decode(corosync_key),
perms=0o400)
return True
return False
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
def get_transport():
transport = config('corosync_transport')
_deprecated_transport_values = {"multicast": "udp", "unicast": "udpu"}
val = _deprecated_transport_values.get(transport, transport)
if val not in ['udp', 'udpu']:
msg = ("Unsupported corosync_transport type '%s' - supported "
"types are: %s" % (transport, ', '.join(SUPPORTED_TRANSPORTS)))
raise ValueError(msg)
return val
def get_cluster_nodes():
hosts = []
hosts.append(unit_get('private-address'))
for relid in relation_ids('hanode'):
for unit in related_units(relid):
if relation_get('ready', rid=relid, unit=unit):
hosts.append(relation_get('private-address', unit, relid))
hosts.sort()
return hosts
def parse_data(relid, unit, key):
"""Simple helper to ast parse relation data"""
data = relation_get(key, unit, relid)
if data:
return ast.literal_eval(data)
return {}
def configure_stonith():
if config('stonith_enabled') not in ['true', 'True', True]:
log('Disabling STONITH', level=INFO)
cmd = "crm configure property stonith-enabled=false"
pcmk.commit(cmd)
else:
log('Enabling STONITH for all nodes in cluster.', level=INFO)
# configure stontih resources for all nodes in cluster.
# note: this is totally provider dependent and requires
# access to the MAAS API endpoint, using endpoint and credentials
# set in config.
url = config('maas_url')
creds = config('maas_credentials')
if None in [url, creds]:
raise Exception('maas_url and maas_credentials must be set '
'in config to enable STONITH.')
nodes = maas.MAASHelper(url, creds).list_nodes()
if not nodes:
raise Exception('Could not obtain node inventory from '
'MAAS @ %s.' % url)
cluster_nodes = pcmk.list_nodes()
for node in cluster_nodes:
rsc, constraint = pcmk.maas_stonith_primitive(nodes, node)
if not rsc:
raise Exception('Failed to determine STONITH primitive for '
'node %s' % node)
rsc_name = str(rsc).split(' ')[1]
if not pcmk.is_resource_present(rsc_name):
log('Creating new STONITH primitive %s.' % rsc_name,
level=DEBUG)
cmd = 'crm -F configure %s' % rsc
pcmk.commit(cmd)
if constraint:
cmd = 'crm -F configure %s' % constraint
pcmk.commit(cmd)
else:
log('STONITH primitive already exists for node.', level=DEBUG)
pcmk.commit("crm configure property stonith-enabled=true")
def configure_monitor_host():
"""Configure extra monitor host for better network failure detection"""
log('Checking monitor host configuration', level=DEBUG)
monitor_host = config('monitor_host')
if monitor_host:
if not pcmk.crm_opt_exists('ping'):
log('Implementing monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
monitor_interval = config('monitor_interval')
cmd = ('crm -w -F configure primitive ping '
'ocf:pacemaker:ping params host_list="%s" '
'multiplier="100" op monitor interval="%s" ' %
(monitor_host, monitor_interval))
pcmk.commit(cmd)
cmd = ('crm -w -F configure clone cl_ping ping '
'meta interleave="true"')
pcmk.commit(cmd)
else:
log('Reconfiguring monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
cmd = ('crm -w -F resource param ping set host_list="%s"' %
monitor_host)
else:
if pcmk.crm_opt_exists('ping'):
log('Disabling monitor host configuration', level=DEBUG)
pcmk.commit('crm -w -F resource stop ping')
pcmk.commit('crm -w -F configure delete ping')
def configure_cluster_global():
"""Configure global cluster options"""
log('Applying global cluster configuration', level=DEBUG)
if int(config('cluster_count')) >= 3:
# NOTE(jamespage) if 3 or more nodes, then quorum can be
# managed effectively, so stop if quorum lost
log('Configuring no-quorum-policy to stop', level=DEBUG)
cmd = "crm configure property no-quorum-policy=stop"
else:
# NOTE(jamespage) if less that 3 nodes, quorum not possible
# so ignore
log('Configuring no-quorum-policy to ignore', level=DEBUG)
cmd = "crm configure property no-quorum-policy=ignore"
pcmk.commit(cmd)
cmd = ('crm configure rsc_defaults $id="rsc-options" '
'resource-stickiness="100"')
pcmk.commit(cmd)
def restart_corosync_on_change():
"""Simple decorator to restart corosync if any of its config changes"""
def wrap(f):
def wrapped_f(*args, **kwargs):
checksums = {}
for path in COROSYNC_CONF_FILES:
checksums[path] = file_hash(path)
return_data = f(*args, **kwargs)
# NOTE: this assumes that this call is always done around
# configure_corosync, which returns true if configuration
# files where actually generated
if return_data:
for path in COROSYNC_CONF_FILES:
if checksums[path] != file_hash(path):
restart_corosync()
break
return return_data
return wrapped_f
return wrap
@restart_corosync_on_change()
def configure_corosync():
log('Configuring and (maybe) restarting corosync', level=DEBUG)
return emit_base_conf() and emit_corosync_conf()
def restart_corosync():
if service_running("pacemaker"):
service_stop("pacemaker")
service_restart("corosync")
service_start("pacemaker")

View File

@ -1 +0,0 @@
68

View File

@ -2,5 +2,5 @@
verbosity=2
with-coverage=1
cover-erase=1
cover-package=hooks
cover-package=hooks,utils,pcmk,maas

View File

@ -1,13 +1,6 @@
#!/bin/bash
set -ex
# Check if amulet is installed before adding repository and updating apt-get.
dpkg -s amulet
if [ $? -ne 0 ]; then
sudo add-apt-repository -y ppa:juju/stable
sudo apt-get update
sudo apt-get install -y amulet
fi
#!/bin/bash -eux
# Install amulet packages
sudo apt-get update --yes
# Install any additional python packages or software here.
sudo apt-get install --yes python-amulet python-keystoneclient || true

View File

@ -1,33 +0,0 @@
#!/usr/bin/env python3
# This amulet test deploys the bundles.yaml file in this directory.
import os
import unittest
import yaml
import amulet
seconds_to_wait = 600
class BundleTest(unittest.TestCase):
""" Create a class for testing the charm in the unit test framework. """
@classmethod
def setUpClass(cls):
""" Set up an amulet deployment using the bundle. """
d = amulet.Deployment()
bundle_path = os.path.join(os.path.dirname(__file__), 'bundles.yaml')
with open(bundle_path, 'r') as bundle_file:
contents = yaml.safe_load(bundle_file)
d.load(contents)
d.setup(seconds_to_wait)
d.sentry.wait(seconds_to_wait)
cls.d = d
def test_deployed(self):
""" Test to see if the bundle deployed successfully. """
self.assertTrue(self.d.deployed)
if __name__ == '__main__':
unittest.main()

9
tests/15-basic-trusty-icehouse Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic hacluster deployment on trusty-icehouse."""
from basic_deployment import HAClusterBasicDeployment
if __name__ == '__main__':
deployment = HAClusterBasicDeployment(series='trusty')
deployment.run_tests()

109
tests/basic_deployment.py Executable file
View File

@ -0,0 +1,109 @@
#!/usr/bin/env python
import os
import amulet
import keystoneclient.v2_0 as keystone_client
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG, # flake8: noqa
ERROR
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
seconds_to_wait = 600
class HAClusterBasicDeployment(OpenStackAmuletDeployment):
def __init__(self, series=None, openstack=None, source=None, stable=False):
"""Deploy the entire test environment."""
super(HAClusterBasicDeployment, self).__init__(series, openstack,
source, stable)
env_var = 'OS_CHARMS_AMULET_VIP'
self._vip = os.getenv(env_var, None)
if not self._vip:
amulet.raise_status(amulet.SKIP, msg="No vip provided with '%s' - "
"skipping tests" % (env_var))
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
self._initialize_tests()
def _add_services(self):
this_service = {'name': 'hacluster'}
other_services = [{'name': 'mysql'}, {'name': 'keystone', 'units': 3}]
super(HAClusterBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
relations = {'keystone:shared-db': 'mysql:shared-db',
'hacluster:ha': 'keystone:ha'}
super(HAClusterBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
keystone_config = {'admin-password': 'openstack',
'admin-token': 'ubuntutesting',
'vip': self._vip}
mysql_config = {'dataset-size': '50%'}
configs = {'keystone': keystone_config,
'mysql': mysql_config}
super(HAClusterBasicDeployment, self)._configure_services(configs)
def _authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant, service_ip=None):
"""Authenticates admin user with the keystone admin endpoint.
This should be factored into:L
charmhelpers.contrib.openstack.amulet.utils.OpenStackAmuletUtils
"""
if not service_ip:
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
# NOTE: the hacluster unit id may not correspond with its parent unit
# id.
self.hacluster_sentry = self.d.sentry.unit['hacluster/0']
# Authenticate keystone admin
self.keystone = self._authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin',
service_ip=self._vip)
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user, password='password',
tenant_id=tenant.id,
email='demo@demo.com')
# Authenticate keystone demo
self.keystone_demo = u.authenticate_keystone_user(self.keystone,
user=self.demo_user,
password='password',
tenant=self.demo_tenant)

View File

@ -1,15 +0,0 @@
hacluster-mysql:
series: trusty
services:
hacluster:
charm: hacluster
num_units: 0
mysql:
charm: cs:trusty/mysql
num_units: 2
options:
"dataset-size": 128M
vip: 192.168.21.1
relations:
- - "mysql:ha"
- "hacluster:ha"

View File

@ -0,0 +1,38 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,93 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import os
import six
class AmuletDeployment(object):
"""Amulet deployment.
This class provides generic Amulet deployment and test runner
methods.
"""
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = None
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'])
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in six.iteritems(relations):
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=900)
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
except Exception:
raise
def run_tests(self):
"""Run all of the methods that are prefixed with 'test_'."""
for test in dir(self):
if test.startswith('test_'):
getattr(self, test)()

View File

@ -0,0 +1,314 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import io
import logging
import re
import sys
import time
import six
class AmuletUtils(object):
"""Amulet utilities.
This class provides common utility functions that are used by Amulet
tests.
"""
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def valid_ip(self, ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
else:
return False
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def validate_services(self, commands):
"""Validate services.
Verify the specified services are running on the corresponding
service units.
"""
for k, v in six.iteritems(commands):
for cmd in v:
output, code = k.run(cmd)
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
config = ConfigParser.ConfigParser()
config.readfp(io.StringIO(file_contents))
return config
def validate_config_data(self, sentry_unit, config_file, section,
expected):
"""Validate config file data.
Verify that the specified section of the config file contains
the expected option key:value pairs.
"""
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
return "section [{}] does not exist".format(section)
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
if config.get(section, k) != expected[k]:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, config.get(section, k), k, expected[k])
return None
def _validate_dict_data(self, expected, actual):
"""Validate dictionary data.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluate a variable and returns a
bool.
"""
for k, v in six.iteritems(expected):
if k in actual:
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
if v != actual[k]:
return "{}:{}".format(k, actual[k])
elif not v(actual[k]):
return "{}:{}".format(k, actual[k])
else:
return "key '{}' does not exist".format(k)
return None
def validate_relation_data(self, sentry_unit, relation, expected):
"""Validate actual relation data based on expected relation data."""
actual = sentry_unit.relation(relation[0], relation[1])
self.log.debug('actual: {}'.format(repr(actual)))
return self._validate_dict_data(expected, actual)
def _validate_list_data(self, expected, actual):
"""Compare expected list vs actual list data."""
for e in expected:
if e not in actual:
return "expected item {} not found in actual list".format(e)
return None
def not_null(self, string):
if string is not None:
return True
else:
return False
def _get_file_mtime(self, sentry_unit, filename):
"""Get last modification time of file."""
return sentry_unit.file_stat(filename)['mtime']
def _get_dir_mtime(self, sentry_unit, directory):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
"""Get process' start time.
Determine start time of the process based on the last modification
time of the /proc/pid directory. If pgrep_full is True, the process
name is matched against the full command line.
"""
if pgrep_full:
cmd = 'pgrep -o -f {}'.format(service)
else:
cmd = 'pgrep -o {}'.format(service)
cmd = cmd + ' | grep -v pgrep || exit 0'
cmd_out = sentry_unit.run(cmd)
self.log.debug('CMDout: ' + str(cmd_out))
if cmd_out[0]:
self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
proc_dir = '/proc/{}'.format(cmd_out[0].strip())
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
return True
else:
return False
def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=False, sleep_time=20,
retry_count=2):
"""Check if service was been started after a given time.
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
Returns:
bool: True if service found and its start time it newer than mtime,
False if service is older than mtime or if service was
not found.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
time.sleep(sleep_time)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
while retry_count > 0 and not proc_start_time:
self.log.debug('No pid file found for service %s, will retry %i '
'more times' % (service, retry_count))
time.sleep(30)
proc_start_time = self._get_proc_start_time(sentry_unit, service,
pgrep_full)
retry_count = retry_count - 1
if not proc_start_time:
self.log.warn('No proc start time found, assuming service did '
'not start')
return False
if proc_start_time >= mtime:
self.log.debug('proc start time is newer than provided mtime'
'(%s >= %s)' % (proc_start_time, mtime))
return True
else:
self.log.warn('proc start time (%s) is older than provided mtime '
'(%s), service did not restart' % (proc_start_time,
mtime))
return False
def config_updated_since(self, sentry_unit, filename, mtime,
sleep_time=20):
"""Check if file was modified after a given time.
Args:
sentry_unit (sentry): The sentry unit to check the file mtime on
filename (string): The file to check mtime of
mtime (float): The epoch time to check against
sleep_time (int): Seconds to sleep before looking for process
Returns:
bool: True if file was modified more recently than mtime, False if
file was modified before mtime,
"""
self.log.debug('Checking %s updated since %s' % (filename, mtime))
time.sleep(sleep_time)
file_mtime = self._get_file_mtime(sentry_unit, filename)
if file_mtime >= mtime:
self.log.debug('File mtime is newer than provided mtime '
'(%s >= %s)' % (file_mtime, mtime))
return True
else:
self.log.warn('File mtime %s is older than provided mtime %s'
% (file_mtime, mtime))
return False
def validate_service_config_changed(self, sentry_unit, mtime, service,
filename, pgrep_full=False,
sleep_time=20, retry_count=2):
"""Check service and file were updated after mtime
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
filename (string): The file to check mtime of
pgrep_full (boolean): Use full command line search mode with pgrep
sleep_time (int): Seconds to sleep before looking for process
retry_count (int): If service is not found, how many times to retry
Typical Usage:
u = OpenStackAmuletUtils(ERROR)
...
mtime = u.get_sentry_time(self.cinder_sentry)
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
if not u.validate_service_config_changed(self.cinder_sentry,
mtime,
'cinder-api',
'/etc/cinder/cinder.conf')
amulet.raise_status(amulet.FAIL, msg='update failed')
Returns:
bool: True if both service and file where updated/restarted after
mtime, False if service is older than mtime or if service was
not found or if filename was modified before mtime.
"""
self.log.debug('Checking %s restarted since %s' % (service, mtime))
time.sleep(sleep_time)
service_restart = self.service_restarted_since(sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=0,
retry_count=retry_count)
config_update = self.config_updated_since(sentry_unit, filename, mtime,
sleep_time=0)
return service_restart and config_update
def get_sentry_time(self, sentry_unit):
"""Return current epoch time on a sentry"""
cmd = "date +'%s'"
return float(sentry_unit.run(cmd)[0])
def relation_error(self, name, data):
return 'unexpected relation data in {} - {}'.format(name, data)
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,111 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
if self.stable:
for svc in other_services:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
# Openstack subordinate charms do not expose an origin option as that
# is controlled by the principle
ignore = ['neutron-openvswitch']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + ignore:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in ignore:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse) = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,294 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import time
import urllib
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if (admin_port in ep.adminurl and
internal_port in ep.internalurl and
public_port in ep.publicurl):
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in six.iteritems(expected):
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate tenant data.
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate role data.
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate flavor data.
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists."""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(local_path):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
count = 1
status = image.status
while status != 'active' and count < 10:
time.sleep(3)
image = glance.images.get(image.id)
status = image.status
self.log.debug('image status: {}'.format(status))
count += 1
if status != 'active':
self.log.error('image creation timed out')
return None
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
num_before = len(list(glance.images.list()))
glance.images.delete(image)
count = 1
num_after = len(list(glance.images.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(glance.images.list()))
self.log.debug('number of images: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('image deletion timed out')
return False
return True
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
count = 1
num_after = len(list(nova.servers.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(nova.servers.list()))
self.log.debug('number of instances: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('instance deletion timed out')
return False
return True

View File

@ -1,123 +1,42 @@
from __future__ import print_function
import mock
import os
import re
import shutil
import tempfile
import unittest
with mock.patch('charmhelpers.core.hookenv.config'):
import hooks as hacluster_hooks
import hooks
def local_log(msg, level='INFO'):
print('[{}] {}'.format(level, msg))
def write_file(path, content, *args, **kwargs):
with open(path, 'w') as f:
f.write(content)
f.flush()
class SwiftContextTestCase(unittest.TestCase):
@mock.patch('hooks.config')
def test_get_transport(self, mock_config):
mock_config.return_value = 'udp'
self.assertEqual('udp', hacluster_hooks.get_transport())
mock_config.return_value = 'udpu'
self.assertEqual('udpu', hacluster_hooks.get_transport())
mock_config.return_value = 'hafu'
self.assertRaises(ValueError, hacluster_hooks.get_transport)
@mock.patch('hooks.log', local_log)
@mock.patch('hooks.write_file', write_file)
@mock.patch.object(hooks, 'log', lambda *args, **kwargs: None)
@mock.patch('utils.COROSYNC_CONF', os.path.join(tempfile.mkdtemp(),
'corosync.conf'))
class TestCorosyncConf(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
hacluster_hooks.COROSYNC_CONF = os.path.join(self.tmpdir,
'corosync.conf')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_debug_on(self):
self.check_debug(True)
def test_debug_off(self):
self.check_debug(False)
@mock.patch('hooks.relation_get')
@mock.patch('hooks.related_units')
@mock.patch('hooks.relation_ids')
@mock.patch('hacluster.get_network_address')
@mock.patch('hooks.config')
def check_debug(self, enabled, mock_config, get_network_address,
relation_ids, related_units, relation_get):
cfg = {'debug': enabled,
'prefer-ipv6': False,
'corosync_transport': 'udpu',
'corosync_mcastaddr': 'corosync_mcastaddr'}
def c(k):
return cfg.get(k)
mock_config.side_effect = c
get_network_address.return_value = "127.0.0.1"
relation_ids.return_value = ['foo:1']
related_units.return_value = ['unit-machine-0']
relation_get.return_value = 'iface'
hacluster_hooks.get_ha_nodes = mock.MagicMock()
conf = hacluster_hooks.get_corosync_conf()
self.assertEqual(conf['debug'], enabled)
self.assertTrue(hacluster_hooks.emit_corosync_conf())
with open(hacluster_hooks.COROSYNC_CONF) as fd:
content = fd.read()
if enabled:
pattern = 'debug: on\n'
else:
pattern = 'debug: off\n'
matches = re.findall(pattern, content, re.M)
self.assertEqual(len(matches), 2, str(matches))
@mock.patch('pcmk.wait_for_pcmk')
@mock.patch('hooks.peer_units')
@mock.patch.object(hooks, 'peer_units')
@mock.patch('pcmk.crm_opt_exists')
@mock.patch('hooks.oldest_peer')
@mock.patch('hooks.configure_corosync')
@mock.patch('hooks.configure_cluster_global')
@mock.patch('hooks.configure_monitor_host')
@mock.patch('hooks.configure_stonith')
@mock.patch('hooks.related_units')
@mock.patch('hooks.get_cluster_nodes')
@mock.patch('hooks.relation_set')
@mock.patch('hooks.relation_ids')
@mock.patch('hooks.get_corosync_conf')
@mock.patch.object(hooks, 'oldest_peer')
@mock.patch.object(hooks, 'configure_corosync')
@mock.patch.object(hooks, 'configure_cluster_global')
@mock.patch.object(hooks, 'configure_monitor_host')
@mock.patch.object(hooks, 'configure_stonith')
@mock.patch.object(hooks, 'related_units')
@mock.patch.object(hooks, 'get_cluster_nodes')
@mock.patch.object(hooks, 'relation_set')
@mock.patch.object(hooks, 'relation_ids')
@mock.patch.object(hooks, 'get_corosync_conf')
@mock.patch('pcmk.commit')
@mock.patch('hooks.config')
@mock.patch('hooks.parse_data')
def test_configure_principle_cluster_resources(self, parse_data, config,
commit,
get_corosync_conf,
relation_ids, relation_set,
get_cluster_nodes,
related_units,
configure_stonith,
configure_monitor_host,
configure_cluster_global,
configure_corosync,
oldest_peer, crm_opt_exists,
peer_units, wait_for_pcmk):
@mock.patch.object(hooks, 'config')
@mock.patch.object(hooks, 'parse_data')
def test_ha_relation_changed(self, parse_data, config, commit,
get_corosync_conf, relation_ids, relation_set,
get_cluster_nodes, related_units,
configure_stonith, configure_monitor_host,
configure_cluster_global, configure_corosync,
oldest_peer, crm_opt_exists, peer_units,
wait_for_pcmk):
crm_opt_exists.return_value = False
oldest_peer.return_value = True
related_units.return_value = ['ha/0', 'ha/1', 'ha/2']
@ -130,10 +49,7 @@ class TestCorosyncConf(unittest.TestCase):
'corosync_mcastaddr': 'corosync_mcastaddr',
'cluster_count': 3}
def c(k):
return cfg.get(k)
config.side_effect = c
config.side_effect = lambda key: cfg.get(key)
rel_get_data = {'locations': {'loc_foo': 'bar rule inf: meh eq 1'},
'clones': {'cl_foo': 'res_foo meta interleave=true'},
@ -150,7 +66,7 @@ class TestCorosyncConf(unittest.TestCase):
parse_data.side_effect = fake_parse_data
hacluster_hooks.configure_principle_cluster_resources()
hooks.ha_relation_changed()
relation_set.assert_any_call(relation_id='hanode:1', ready=True)
configure_stonith.assert_called_with()
configure_monitor_host.assert_called_with()

View File

@ -0,0 +1,80 @@
import mock
import os
import re
import shutil
import tempfile
import unittest
import utils
def write_file(path, content, *args, **kwargs):
with open(path, 'w') as f:
f.write(content)
f.flush()
@mock.patch.object(utils, 'log', lambda *args, **kwargs: None)
@mock.patch.object(utils, 'write_file', write_file)
class UtilsTestCase(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
utils.COROSYNC_CONF = os.path.join(self.tmpdir, 'corosync.conf')
def tearDown(self):
shutil.rmtree(self.tmpdir)
@mock.patch.object(utils, 'relation_get')
@mock.patch.object(utils, 'related_units')
@mock.patch.object(utils, 'relation_ids')
@mock.patch.object(utils, 'get_network_address')
@mock.patch.object(utils, 'config')
def check_debug(self, enabled, mock_config, get_network_address,
relation_ids, related_units, relation_get):
cfg = {'debug': enabled,
'prefer-ipv6': False,
'corosync_transport': 'udpu',
'corosync_mcastaddr': 'corosync_mcastaddr'}
def c(k):
return cfg.get(k)
mock_config.side_effect = c
get_network_address.return_value = "127.0.0.1"
relation_ids.return_value = ['foo:1']
related_units.return_value = ['unit-machine-0']
relation_get.return_value = 'iface'
utils.get_ha_nodes = mock.MagicMock()
conf = utils.get_corosync_conf()
self.assertEqual(conf['debug'], enabled)
self.assertTrue(utils.emit_corosync_conf())
with open(utils.COROSYNC_CONF) as fd:
content = fd.read()
if enabled:
pattern = 'debug: on\n'
else:
pattern = 'debug: off\n'
matches = re.findall(pattern, content, re.M)
self.assertEqual(len(matches), 2, str(matches))
def test_debug_on(self):
self.check_debug(True)
def test_debug_off(self):
self.check_debug(False)
@mock.patch.object(utils, 'config')
def test_get_transport(self, mock_config):
mock_config.return_value = 'udp'
self.assertEqual('udp', utils.get_transport())
mock_config.return_value = 'udpu'
self.assertEqual('udpu', utils.get_transport())
mock_config.return_value = 'hafu'
self.assertRaises(ValueError, utils.get_transport)