[hopem,r=]

Sync charmhelpers and add amulet tests.
This commit is contained in:
Edward Hope-Morley 2016-01-24 15:59:59 +00:00
parent 199440c0df
commit 418b2721aa
43 changed files with 4389 additions and 286 deletions

View File

@ -5,9 +5,13 @@ lint:
@flake8 --exclude hooks/charmhelpers hooks unit_tests
@charm proof
unit_test:
test:
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
functional_test:
@echo Starting amulet deployment tests...
@juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
bin/charm_helpers_sync.py:
@mkdir -p bin
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
@ -15,6 +19,7 @@ bin/charm_helpers_sync.py:
sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
publish: lint unit_test
bzr push lp:charms/cinder-backup

5
charm-helpers-tests.yaml Normal file
View File

@ -0,0 +1,5 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
- contrib.amulet
- contrib.openstack.amulet

View File

@ -53,7 +53,7 @@ def _validate_cidr(network):
def no_ip_found_error_out(network):
errmsg = ("No IP address found in network: %s" % network)
errmsg = ("No IP address found in network(s): %s" % network)
raise ValueError(errmsg)
@ -61,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
"""Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
'192.168.1.0/24'. Supports multiple networks as a space-delimited list.
:param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1).
@ -75,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False):
else:
return None
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
networks = network.split() or [network]
for network in networks:
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
if fallback is not None:
return fallback

View File

@ -14,12 +14,18 @@
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None, stable=True):
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
@ -38,6 +47,22 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# out.
self.current_next = "trusty"
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
@ -45,6 +70,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the lp:~openstack-charmers namespace
base_charms = ['mysql', 'mongodb', 'nrpe']
@ -82,6 +109,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
@ -95,7 +124,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
'ceph-osd', 'ceph-radosgw']
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
if self.openstack:
for svc in services:
@ -111,9 +141,79 @@ class OpenStackAmuletDeployment(AmuletDeployment):
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=1800):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
self.log.info('Waiting for extended status on units...')
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
@ -125,7 +225,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty) = range(12)
self.wily_liberty, self.trusty_mitaka,
self.xenial_mitaka) = range(14)
releases = {
('precise', None): self.precise_essex,
@ -137,9 +238,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty}
('wily', None): self.wily_liberty,
('xenial', None): self.xenial_mitaka}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
@ -156,6 +259,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]

View File

@ -18,6 +18,7 @@ import amulet
import json
import logging
import os
import re
import six
import time
import urllib
@ -604,7 +605,22 @@ class OpenStackAmuletUtils(AmuletUtils):
'{}'.format(sample_type, samples))
return None
# rabbitmq/amqp specific helpers:
# rabbitmq/amqp specific helpers:
def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
"""Wait for rmq units extended status to show cluster readiness,
after an optional initial sleep period. Initial sleep is likely
necessary to be effective following a config change, as status
message may not instantly update to non-ready."""
if init_sleep:
time.sleep(init_sleep)
message = re.compile('^Unit is ready and clustered$')
deployment._auto_wait_for_status(message=message,
timeout=timeout,
include_only=['rabbitmq-server'])
def add_rmq_test_user(self, sentry_units,
username="testuser1", password="changeme"):
"""Add a test user via the first rmq juju unit, check connection as
@ -805,7 +821,10 @@ class OpenStackAmuletUtils(AmuletUtils):
if port:
config['ssl_port'] = port
deployment.configure('rabbitmq-server', config)
deployment.d.configure('rabbitmq-server', config)
# Wait for unit status
self.rmq_wait_for_cluster(deployment)
# Confirm
tries = 0
@ -832,7 +851,10 @@ class OpenStackAmuletUtils(AmuletUtils):
# Disable RMQ SSL
config = {'ssl': 'off'}
deployment.configure('rabbitmq-server', config)
deployment.d.configure('rabbitmq-server', config)
# Wait for unit status
self.rmq_wait_for_cluster(deployment)
# Confirm
tries = 0

View File

@ -57,6 +57,7 @@ from charmhelpers.core.host import (
get_nic_hwaddr,
mkdir,
write_file,
pwgen,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
@ -87,6 +88,8 @@ from charmhelpers.contrib.network.ip import (
is_bridge_member,
)
from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.core.unitdata import kv
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
@ -626,15 +629,28 @@ class HAProxyContext(OSContextGenerator):
if config('haproxy-client-timeout'):
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
if config('haproxy-queue-timeout'):
ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
if config('haproxy-connect-timeout'):
ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
if config('prefer-ipv6'):
ctxt['ipv6'] = True
ctxt['local_host'] = 'ip6-localhost'
ctxt['haproxy_host'] = '::'
ctxt['stat_port'] = ':::8888'
else:
ctxt['local_host'] = '127.0.0.1'
ctxt['haproxy_host'] = '0.0.0.0'
ctxt['stat_port'] = ':8888'
ctxt['stat_port'] = '8888'
db = kv()
ctxt['stat_password'] = db.get('stat-password')
if not ctxt['stat_password']:
ctxt['stat_password'] = db.set('stat-password',
pwgen(32))
db.flush()
for frontend in cluster_hosts:
if (len(cluster_hosts[frontend]['backends']) > 1 or
@ -952,6 +968,19 @@ class NeutronContext(OSContextGenerator):
'config': config}
return ovs_ctxt
def midonet_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
midonet_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
mido_ctxt = {'core_plugin': driver,
'neutron_plugin': 'midonet',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': midonet_config}
return mido_ctxt
def __call__(self):
if self.network_manager not in ['quantum', 'neutron']:
return {}
@ -973,6 +1002,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.nuage_ctxt())
elif self.plugin == 'plumgrid':
ctxt.update(self.pg_ctxt())
elif self.plugin == 'midonet':
ctxt.update(self.midonet_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
@ -1073,6 +1104,20 @@ class OSConfigFlagContext(OSContextGenerator):
config_flags_parser(config_flags)}
class LibvirtConfigFlagsContext(OSContextGenerator):
"""
This context provides support for extending
the libvirt section through user-defined flags.
"""
def __call__(self):
ctxt = {}
libvirt_flags = config('libvirt-flags')
if libvirt_flags:
ctxt['libvirt_flags'] = config_flags_parser(
libvirt_flags)
return ctxt
class SubordinateConfigContext(OSContextGenerator):
"""
@ -1105,7 +1150,7 @@ class SubordinateConfigContext(OSContextGenerator):
ctxt = {
... other context ...
'subordinate_config': {
'subordinate_configuration': {
'DEFAULT': {
'key1': 'value1',
},
@ -1146,22 +1191,23 @@ class SubordinateConfigContext(OSContextGenerator):
try:
sub_config = json.loads(sub_config)
except:
log('Could not parse JSON from subordinate_config '
'setting from %s' % rid, level=ERROR)
log('Could not parse JSON from '
'subordinate_configuration setting from %s'
% rid, level=ERROR)
continue
for service in self.services:
if service not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s service' % (rid, service),
level=INFO)
log('Found subordinate_configuration on %s but it '
'contained nothing for %s service'
% (rid, service), level=INFO)
continue
sub_config = sub_config[service]
if self.config_file not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s' % (rid, self.config_file),
level=INFO)
log('Found subordinate_configuration on %s but it '
'contained nothing for %s'
% (rid, self.config_file), level=INFO)
continue
sub_config = sub_config[self.config_file]

View File

@ -9,15 +9,17 @@
CRITICAL=0
NOTACTIVE=''
LOGFILE=/var/log/nagios/check_haproxy.log
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
typeset -i N_INSTANCES=0
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
do
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
N_INSTANCES=N_INSTANCES+1
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
if [ $? != 0 ]; then
date >> $LOGFILE
echo $output >> $LOGFILE
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
CRITICAL=1
NOTACTIVE="${NOTACTIVE} $appserver"
fi
@ -28,5 +30,5 @@ if [ $CRITICAL = 1 ]; then
exit 2
fi
echo "OK: All haproxy instances looking good"
echo "OK: All haproxy instances ($N_INSTANCES) looking good"
exit 0

View File

@ -204,11 +204,25 @@ def neutron_plugins():
database=config('database'),
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['plumgrid-lxc'],
['iovisor-dkms']],
'packages': ['plumgrid-lxc',
'iovisor-dkms'],
'server_packages': ['neutron-server',
'neutron-plugin-plumgrid'],
'server_services': ['neutron-server']
},
'midonet': {
'config': '/etc/neutron/plugins/midonet/midonet.ini',
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [[headers_package()] + determine_dkms_package()],
'server_packages': ['neutron-server',
'python-neutron-plugin-midonet'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':

View File

@ -12,27 +12,35 @@ defaults
option tcplog
option dontlognull
retries 3
timeout queue 1000
timeout connect 1000
{% if haproxy_client_timeout -%}
{%- if haproxy_queue_timeout %}
timeout queue {{ haproxy_queue_timeout }}
{%- else %}
timeout queue 5000
{%- endif %}
{%- if haproxy_connect_timeout %}
timeout connect {{ haproxy_connect_timeout }}
{%- else %}
timeout connect 5000
{%- endif %}
{%- if haproxy_client_timeout %}
timeout client {{ haproxy_client_timeout }}
{% else -%}
{%- else %}
timeout client 30000
{% endif -%}
{% if haproxy_server_timeout -%}
{%- endif %}
{%- if haproxy_server_timeout %}
timeout server {{ haproxy_server_timeout }}
{% else -%}
{%- else %}
timeout server 30000
{% endif -%}
{%- endif %}
listen stats {{ stat_port }}
listen stats
bind {{ local_host }}:{{ stat_port }}
mode http
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /
stats auth admin:password
stats auth admin:{{ stat_password }}
{% if frontends -%}
{% for service, ports in service_ports.items() -%}

View File

@ -26,6 +26,7 @@ import re
import six
import traceback
import uuid
import yaml
from charmhelpers.contrib.network import ip
@ -41,6 +42,7 @@ from charmhelpers.core.hookenv import (
log as juju_log,
charm_dir,
INFO,
related_units,
relation_ids,
relation_set,
status_set,
@ -84,6 +86,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
])
@ -97,60 +100,70 @@ OPENSTACK_CODENAMES = OrderedDict([
('2014.2', 'juno'),
('2015.1', 'kilo'),
('2015.2', 'liberty'),
('2016.1', 'mitaka'),
])
# The ugly duckling
# The ugly duckling - must list releases oldest to newest
SWIFT_CODENAMES = OrderedDict([
('1.4.3', 'diablo'),
('1.4.8', 'essex'),
('1.7.4', 'folsom'),
('1.8.0', 'grizzly'),
('1.7.7', 'grizzly'),
('1.7.6', 'grizzly'),
('1.10.0', 'havana'),
('1.9.1', 'havana'),
('1.9.0', 'havana'),
('1.13.1', 'icehouse'),
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
('2.0.0', 'juno'),
('2.1.0', 'juno'),
('2.2.0', 'juno'),
('2.2.1', 'kilo'),
('2.2.2', 'kilo'),
('2.3.0', 'liberty'),
('2.4.0', 'liberty'),
('diablo',
['1.4.3']),
('essex',
['1.4.8']),
('folsom',
['1.7.4']),
('grizzly',
['1.7.6', '1.7.7', '1.8.0']),
('havana',
['1.9.0', '1.9.1', '1.10.0']),
('icehouse',
['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
('juno',
['2.0.0', '2.1.0', '2.2.0']),
('kilo',
['2.2.1', '2.2.2']),
('liberty',
['2.3.0', '2.4.0', '2.5.0']),
('mitaka',
['2.5.0']),
])
# >= Liberty version->codename mapping
PACKAGE_CODENAMES = {
'nova-common': OrderedDict([
('12.0.0', 'liberty'),
('12.0', 'liberty'),
('13.0', 'mitaka'),
]),
'neutron-common': OrderedDict([
('7.0.0', 'liberty'),
('7.0', 'liberty'),
('8.0', 'mitaka'),
]),
'cinder-common': OrderedDict([
('7.0.0', 'liberty'),
('7.0', 'liberty'),
('8.0', 'mitaka'),
]),
'keystone': OrderedDict([
('8.0.0', 'liberty'),
('8.0', 'liberty'),
('9.0', 'mitaka'),
]),
'horizon-common': OrderedDict([
('8.0.0', 'liberty'),
('8.0', 'liberty'),
('9.0', 'mitaka'),
]),
'ceilometer-common': OrderedDict([
('5.0.0', 'liberty'),
('5.0', 'liberty'),
('6.0', 'mitaka'),
]),
'heat-common': OrderedDict([
('5.0.0', 'liberty'),
('5.0', 'liberty'),
('6.0', 'mitaka'),
]),
'glance-common': OrderedDict([
('11.0.0', 'liberty'),
('11.0', 'liberty'),
('12.0', 'mitaka'),
]),
'openstack-dashboard': OrderedDict([
('8.0.0', 'liberty'),
('8.0', 'liberty'),
('9.0', 'mitaka'),
]),
}
@ -213,6 +226,33 @@ def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
error_out(e)
def get_os_version_codename_swift(codename):
'''Determine OpenStack version number of swift from codename.'''
for k, v in six.iteritems(SWIFT_CODENAMES):
if k == codename:
return v[-1]
e = 'Could not derive swift version for '\
'codename: %s' % codename
error_out(e)
def get_swift_codename(version):
'''Determine OpenStack codename that corresponds to swift version.'''
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
if len(codenames) > 1:
# If more than one release codename contains this version we determine
# the actual codename based on the highest available install source.
for codename in reversed(codenames):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
return codenames[0]
return None
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt
@ -237,7 +277,14 @@ def get_os_codename_package(package, fatal=True):
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
if 'swift' in pkg.name:
# Fully x.y.z match for swift versions
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
else:
# x.y match only for 20XX.X
# and ignore patch level for other packages
match = re.match('^(\d+)\.(\d+)', vers)
if match:
vers = match.group(0)
@ -249,13 +296,8 @@ def get_os_codename_package(package, fatal=True):
# < Liberty co-ordinated project versions
try:
if 'swift' in pkg.name:
swift_vers = vers[:5]
if swift_vers not in SWIFT_CODENAMES:
# Deal with 1.10.0 upward
swift_vers = vers[:6]
return SWIFT_CODENAMES[swift_vers]
return get_swift_codename(vers)
else:
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
if not fatal:
@ -273,12 +315,14 @@ def get_os_version_package(pkg, fatal=True):
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
for cname, version in six.iteritems(vers_map):
if cname == codename:
return version[-1]
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version
# e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e)
@ -374,6 +418,9 @@ def configure_installation_source(rel):
'liberty': 'trusty-updates/liberty',
'liberty/updates': 'trusty-updates/liberty',
'liberty/proposed': 'trusty-proposed/liberty',
'mitaka': 'trusty-updates/mitaka',
'mitaka/updates': 'trusty-updates/mitaka',
'mitaka/proposed': 'trusty-proposed/mitaka',
}
try:
@ -441,11 +488,16 @@ def openstack_upgrade_available(package):
cur_vers = get_os_version_package(package)
if "swift" in package:
codename = get_os_codename_install_source(src)
available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
avail_vers = get_os_version_codename_swift(codename)
else:
available_vers = get_os_version_install_source(src)
avail_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(available_vers, cur_vers) == 1
if "swift" in package:
major_cur_vers = cur_vers.split('.', 1)[0]
major_avail_vers = avail_vers.split('.', 1)[0]
major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
return apt.version_compare(avail_vers, cur_vers) == 1
def ensure_block_device(block_device):
@ -574,7 +626,7 @@ def _git_yaml_load(projects_yaml):
return yaml.load(projects_yaml)
def git_clone_and_install(projects_yaml, core_project, depth=1):
def git_clone_and_install(projects_yaml, core_project):
"""
Clone/install all specified OpenStack repositories.
@ -624,6 +676,9 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
for p in projects['repositories']:
repo = p['repository']
branch = p['branch']
depth = '1'
if 'depth' in p.keys():
depth = p['depth']
if p['name'] == 'requirements':
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
@ -668,19 +723,13 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
"""
Clone and install a single git repository.
"""
dest_dir = os.path.join(parent_dir, os.path.basename(repo))
if not os.path.exists(parent_dir):
juju_log('Directory already exists at {}. '
'No need to create directory.'.format(parent_dir))
os.mkdir(parent_dir)
if not os.path.exists(dest_dir):
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
depth=depth)
else:
repo_dir = dest_dir
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
venv = os.path.join(parent_dir, 'venv')
@ -858,7 +907,9 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None):
if charm_state != 'active' and charm_state != 'unknown':
state = workload_state_compare(state, charm_state)
if message:
message = "{} {}".format(message, charm_message)
charm_message = charm_message.replace("Incomplete relations: ",
"")
message = "{}, {}".format(message, charm_message)
else:
message = charm_message
@ -975,3 +1026,19 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
action_set({'outcome': 'no upgrade available.'})
return ret
def remote_restart(rel_name, remote_service=None):
trigger = {
'restart-trigger': str(uuid.uuid4()),
}
if remote_service:
trigger['remote-service'] = remote_service
for rid in relation_ids(rel_name):
# This subordinate can be related to two seperate services using
# different subordinate relations so only issue the restart if
# the principle is conencted down the relation we think it is
if related_units(relid=rid):
relation_set(relation_id=rid,
relation_settings=trigger,
)

View File

@ -42,8 +42,12 @@ def parse_options(given, available):
yield "--{0}={1}".format(key, value)
def pip_install_requirements(requirements, **options):
"""Install a requirements file """
def pip_install_requirements(requirements, constraints=None, **options):
"""Install a requirements file.
:param constraints: Path to pip constraints file.
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
"""
command = ["install"]
available_options = ('proxy', 'src', 'log', )
@ -51,8 +55,13 @@ def pip_install_requirements(requirements, **options):
command.append(option)
command.append("-r {0}".format(requirements))
log("Installing from file: {} with options: {}".format(requirements,
command))
if constraints:
command.append("-c {0}".format(constraints))
log("Installing from file: {} with constraints {} "
"and options: {}".format(requirements, constraints, command))
else:
log("Installing from file: {} with options: {}".format(requirements,
command))
pip_execute(command)

View File

@ -23,6 +23,8 @@
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import bisect
import six
import os
import shutil
@ -72,6 +74,394 @@ log to syslog = {use_syslog}
err to syslog = {use_syslog}
clog to syslog = {use_syslog}
"""
# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
def validator(value, valid_type, valid_range=None):
"""
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
Example input:
validator(value=1,
valid_type=int,
valid_range=[0, 2])
This says I'm testing value=1. It must be an int inclusive in [0,2]
:param value: The value to validate
:param valid_type: The type that value should be.
:param valid_range: A range of values that value can assume.
:return:
"""
assert isinstance(value, valid_type), "{} is not a {}".format(
value,
valid_type)
if valid_range is not None:
assert isinstance(valid_range, list), \
"valid_range must be a list, was given {}".format(valid_range)
# If we're dealing with strings
if valid_type is six.string_types:
assert value in valid_range, \
"{} is not in the list {}".format(value, valid_range)
# Integer, float should have a min and max
else:
if len(valid_range) != 2:
raise ValueError(
"Invalid valid_range list of {} for {}. "
"List must be [min,max]".format(valid_range, value))
assert value >= valid_range[0], \
"{} is less than minimum allowed value of {}".format(
value, valid_range[0])
assert value <= valid_range[1], \
"{} is greater than maximum allowed value of {}".format(
value, valid_range[1])
class PoolCreationError(Exception):
"""
A custom error to inform the caller that a pool creation failed. Provides an error message
"""
def __init__(self, message):
super(PoolCreationError, self).__init__(message)
class Pool(object):
"""
An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
"""
def __init__(self, service, name):
self.service = service
self.name = name
# Create the pool if it doesn't exist already
# To be implemented by subclasses
def create(self):
pass
def add_cache_tier(self, cache_pool, mode):
"""
Adds a new cache tier to an existing pool.
:param cache_pool: six.string_types. The cache tier pool name to add.
:param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
:return: None
"""
# Check the input types and values
validator(value=cache_pool, valid_type=six.string_types)
validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
def remove_cache_tier(self, cache_pool):
"""
Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
:param cache_pool: six.string_types. The cache tier pool name to remove.
:return: None
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(cache_pool)
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
elif mode == 'writeback':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
# Flush the cache and wait for it to return
check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
def get_pgs(self, pool_size):
"""
:param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
erasure coded pools
:return: int. The number of pgs to use.
"""
validator(value=pool_size, valid_type=int)
osds = get_osds(self.service)
if not osds:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
return 200
# Calculate based on Ceph best practices
if osds < 5:
return 128
elif 5 < osds < 10:
return 512
elif 10 < osds < 50:
return 4096
else:
estimate = (osds * 100) / pool_size
# Return the next nearest power of 2
index = bisect.bisect_right(powers_of_two, estimate)
return powers_of_two[index]
class ReplicatedPool(Pool):
def __init__(self, service, name, replicas=2):
super(ReplicatedPool, self).__init__(service=service, name=name)
self.replicas = replicas
def create(self):
if not pool_exists(self.service, self.name):
# Create it
pgs = self.get_pgs(self.replicas)
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)]
try:
check_call(cmd)
except CalledProcessError:
raise
# Default jerasure erasure coded pool
class ErasurePool(Pool):
def __init__(self, service, name, erasure_code_profile="default"):
super(ErasurePool, self).__init__(service=service, name=name)
self.erasure_code_profile = erasure_code_profile
def create(self):
if not pool_exists(self.service, self.name):
# Try to find the erasure profile information so we can properly size the pgs
erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
# Check for errors
if erasure_profile is None:
log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
level=ERROR)
raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
if 'k' not in erasure_profile or 'm' not in erasure_profile:
# Error
log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
level=ERROR)
raise PoolCreationError(
message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
# Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs),
'erasure', self.erasure_code_profile]
try:
check_call(cmd)
except CalledProcessError:
raise
"""Get an existing erasure code profile if it already exists.
Returns json formatted output"""
def get_erasure_profile(service, name):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param name:
:return:
"""
try:
out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get',
name, '--format=json'])
return json.loads(out)
except (CalledProcessError, OSError, ValueError):
return None
def pool_set(service, pool_name, key, value):
"""
Sets a value for a RADOS pool in ceph.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param key: six.string_types
:param value:
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
try:
check_call(cmd)
except CalledProcessError:
raise
def snapshot_pool(service, pool_name, snapshot_name):
"""
Snapshots a RADOS pool in ceph.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param snapshot_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
try:
check_call(cmd)
except CalledProcessError:
raise
def remove_pool_snapshot(service, pool_name, snapshot_name):
"""
Remove a snapshot from a RADOS pool in ceph.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param snapshot_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
try:
check_call(cmd)
except CalledProcessError:
raise
# max_bytes should be an int or long
def set_pool_quota(service, pool_name, max_bytes):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param max_bytes: int or long
:return: None. Can raise CalledProcessError
"""
# Set a byte quota on a RADOS pool in ceph.
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes]
try:
check_call(cmd)
except CalledProcessError:
raise
def remove_pool_quota(service, pool_name):
"""
Set a byte quota on a RADOS pool in ceph.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
try:
check_call(cmd)
except CalledProcessError:
raise
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host',
data_chunks=2, coding_chunks=1,
locality=None, durability_estimator=None):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
for more details
:param service: six.string_types. The Ceph user name to run the command under
:param profile_name: six.string_types
:param erasure_plugin_name: six.string_types
:param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
'room', 'root', 'row'])
:param data_chunks: int
:param coding_chunks: int
:param locality: int
:param durability_estimator: int
:return: None. Can raise CalledProcessError
"""
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
'ruleset_failure_domain=' + failure_domain]
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
# Add plugin specific information
if locality is not None:
# For local erasure codes
cmd.append('l=' + str(locality))
if durability_estimator is not None:
# For Shec erasure codes
cmd.append('c=' + str(durability_estimator))
if erasure_profile_exists(service, profile_name):
cmd.append('--force')
try:
check_call(cmd)
except CalledProcessError:
raise
def rename_pool(service, old_name, new_name):
"""
Rename a Ceph pool from old_name to new_name
:param service: six.string_types. The Ceph user name to run the command under
:param old_name: six.string_types
:param new_name: six.string_types
:return: None
"""
validator(value=old_name, valid_type=six.string_types)
validator(value=new_name, valid_type=six.string_types)
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
check_call(cmd)
def erasure_profile_exists(service, name):
"""
Check to see if an Erasure code profile already exists.
:param service: six.string_types. The Ceph user name to run the command under
:param name: six.string_types
:return: int or None
"""
validator(value=name, valid_type=six.string_types)
try:
check_call(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get',
name])
return True
except CalledProcessError:
return False
def get_cache_mode(service, pool_name):
"""
Find the current caching mode of the pool_name given.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:return: int or None
"""
validator(value=service, valid_type=six.string_types)
validator(value=pool_name, valid_type=six.string_types)
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
try:
osd_json = json.loads(out)
for pool in osd_json['pools']:
if pool['pool_name'] == pool_name:
return pool['cache_mode']
return None
except ValueError:
raise
def pool_exists(service, name):
"""Check to see if a RADOS pool already exists."""
try:
out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
except CalledProcessError:
return False
return name in out
def get_osds(service):
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
"""
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json']).decode('UTF-8'))
return None
def install():
@ -101,53 +491,37 @@ def create_rbd_image(service, pool, image, sizemb):
check_call(cmd)
def pool_exists(service, name):
"""Check to see if a RADOS pool already exists."""
try:
out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
except CalledProcessError:
return False
def update_pool(client, pool, settings):
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
for k, v in six.iteritems(settings):
cmd.append(k)
cmd.append(v)
return name in out
check_call(cmd)
def get_osds(service):
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
"""
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json']).decode('UTF-8'))
return None
def create_pool(service, name, replicas=3):
def create_pool(service, name, replicas=3, pg_num=None):
"""Create a new RADOS pool."""
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
if not pg_num:
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pg_num = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pg_num = 200
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
check_call(cmd)
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
str(replicas)]
check_call(cmd)
update_pool(service, name, settings={'size': str(replicas)})
def delete_pool(service, name):
@ -202,10 +576,10 @@ def create_key_file(service, key):
log('Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
"""Query named relation 'ceph' to determine current nodes."""
def get_ceph_nodes(relation='ceph'):
"""Query named relation to determine current nodes."""
hosts = []
for r_id in relation_ids('ceph'):
for r_id in relation_ids(relation):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
@ -357,14 +731,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership.
Returns False if no ceph key is available in relation state.
"""
key = None
for rid in relation_ids('ceph'):
for rid in relation_ids(relation):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
@ -405,6 +779,7 @@ class CephBrokerRq(object):
The API is versioned and defaults to version 1.
"""
def __init__(self, api_version=1, request_id=None):
self.api_version = api_version
if request_id:
@ -413,9 +788,16 @@ class CephBrokerRq(object):
self.request_id = str(uuid.uuid1())
self.ops = []
def add_op_create_pool(self, name, replica_count=3):
def add_op_create_pool(self, name, replica_count=3, pg_num=None):
"""Adds an operation to create a pool.
@param pg_num setting: optional setting. If not provided, this value
will be calculated by the broker based on how many OSDs are in the
cluster at the time of creation. Note that, if provided, this value
will be capped at the current available maximum.
"""
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count})
'replicas': replica_count, 'pg_num': pg_num})
def set_ops(self, ops):
"""Set request ops to provided value.
@ -433,8 +815,8 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in ['replicas', 'name', 'op']:
if self.ops[req_no][key] != other.ops[req_no][key]:
for key in ['replicas', 'name', 'op', 'pg_num']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:
return False
@ -540,7 +922,7 @@ def get_previous_request(rid):
return request
def get_request_states(request):
def get_request_states(request, relation='ceph'):
"""Return a dict of requests per relation id with their corresponding
completion state.
@ -552,7 +934,7 @@ def get_request_states(request):
"""
complete = []
requests = {}
for rid in relation_ids('ceph'):
for rid in relation_ids(relation):
complete = False
previous_request = get_previous_request(rid)
if request == previous_request:
@ -570,14 +952,14 @@ def get_request_states(request):
return requests
def is_request_sent(request):
def is_request_sent(request, relation='ceph'):
"""Check to see if a functionally equivalent request has already been sent
Returns True if a similair request has been sent
@param request: A CephBrokerRq object
"""
states = get_request_states(request)
states = get_request_states(request, relation=relation)
for rid in states.keys():
if not states[rid]['sent']:
return False
@ -585,7 +967,7 @@ def is_request_sent(request):
return True
def is_request_complete(request):
def is_request_complete(request, relation='ceph'):
"""Check to see if a functionally equivalent request has already been
completed
@ -593,7 +975,7 @@ def is_request_complete(request):
@param request: A CephBrokerRq object
"""
states = get_request_states(request)
states = get_request_states(request, relation=relation)
for rid in states.keys():
if not states[rid]['complete']:
return False
@ -643,15 +1025,15 @@ def get_broker_rsp_key():
return 'broker-rsp-' + local_unit().replace('/', '-')
def send_request_if_needed(request):
def send_request_if_needed(request, relation='ceph'):
"""Send broker request if an equivalent request has not already been sent
@param request: A CephBrokerRq object
"""
if is_request_sent(request):
if is_request_sent(request, relation=relation):
log('Request already sent but not complete, not sending new request',
level=DEBUG)
else:
for rid in relation_ids('ceph'):
for rid in relation_ids(relation):
log('Sending request {}'.format(request.request_id), level=DEBUG)
relation_set(relation_id=rid, broker_req=request.request)

View File

@ -76,3 +76,13 @@ def ensure_loopback_device(path, size):
check_call(cmd)
return create_loopback(path)
def is_mapped_loopback_device(device):
"""
Checks if a given device name is an existing/mapped loopback device.
:param device: str: Full path to the device (eg, /dev/loop1).
:returns: str: Path to the backing file if is a loopback device
empty string otherwise
"""
return loopback_devices().get(device, "")

View File

@ -490,6 +490,19 @@ def relation_types():
return rel_types
@cached
def peer_relation_id():
'''Get the peers relation id if a peers relation has been joined, else None.'''
md = metadata()
section = md.get('peers')
if section:
for key in section:
relids = relation_ids(key)
if relids:
return relids[0]
return None
@cached
def relation_to_interface(relation_name):
"""
@ -504,12 +517,12 @@ def relation_to_interface(relation_name):
def relation_to_role_and_interface(relation_name):
"""
Given the name of a relation, return the role and the name of the interface
that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
"""
_metadata = metadata()
for role in ('provides', 'requires', 'peer'):
for role in ('provides', 'requires', 'peers'):
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
if interface:
return role, interface
@ -521,7 +534,7 @@ def role_and_interface_to_relations(role, interface_name):
"""
Given a role and interface name, return a list of relation names for the
current charm that use that interface under that role (where role is one
of ``provides``, ``requires``, or ``peer``).
of ``provides``, ``requires``, or ``peers``).
:returns: A list of relation names.
"""
@ -542,7 +555,7 @@ def interface_to_relations(interface_name):
:returns: A list of relation names.
"""
results = []
for role in ('provides', 'requires', 'peer'):
for role in ('provides', 'requires', 'peers'):
results.extend(role_and_interface_to_relations(role, interface_name))
return results
@ -624,7 +637,7 @@ def unit_private_ip():
@cached
def storage_get(attribute="", storage_id=""):
def storage_get(attribute=None, storage_id=None):
"""Get storage attributes"""
_args = ['storage-get', '--format=json']
if storage_id:
@ -638,7 +651,7 @@ def storage_get(attribute="", storage_id=""):
@cached
def storage_list(storage_name=""):
def storage_list(storage_name=None):
"""List the storage IDs for the unit"""
_args = ['storage-list', '--format=json']
if storage_name:
@ -820,6 +833,7 @@ def status_get():
def translate_exc(from_exc, to_exc):
def inner_translate_exc1(f):
@wraps(f)
def inner_translate_exc2(*args, **kwargs):
try:
return f(*args, **kwargs)
@ -864,6 +878,40 @@ def leader_set(settings=None, **kwargs):
subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def payload_register(ptype, klass, pid):
""" is used while a hook is running to let Juju know that a
payload has been started."""
cmd = ['payload-register']
for x in [ptype, klass, pid]:
cmd.append(x)
subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def payload_unregister(klass, pid):
""" is used while a hook is running to let Juju know
that a payload has been manually stopped. The <class> and <id> provided
must match a payload that has been previously registered with juju using
payload-register."""
cmd = ['payload-unregister']
for x in [klass, pid]:
cmd.append(x)
subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def payload_status_set(klass, pid, status):
"""is used to update the current status of a registered payload.
The <class> and <id> provided must match a payload that has been previously
registered with juju using payload-register. The <status> must be one of the
follow: starting, started, stopping, stopped"""
cmd = ['payload-status-set']
for x in [klass, pid, status]:
cmd.append(x)
subprocess.check_call(cmd)
@cached
def juju_version():
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""

View File

@ -67,10 +67,14 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
"""Pause a system service.
Stop it, and prevent it from starting again at boot."""
stopped = service_stop(service_name)
stopped = True
if service_running(service_name):
stopped = service_stop(service_name)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if os.path.exists(upstart_file):
if init_is_systemd():
service('disable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
@ -78,9 +82,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
# XXX: Support SystemD too
raise ValueError(
"Unable to detect {0} as either Upstart {1} or SysV {2}".format(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
return stopped
@ -92,7 +96,9 @@ def service_resume(service_name, init_dir="/etc/init",
Reenable starting again at boot. Start the service"""
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if os.path.exists(upstart_file):
if init_is_systemd():
service('enable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
@ -100,34 +106,42 @@ def service_resume(service_name, init_dir="/etc/init",
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
# XXX: Support SystemD too
raise ValueError(
"Unable to detect {0} as either Upstart {1} or SysV {2}".format(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_start(service_name)
started = service_running(service_name)
if not started:
started = service_start(service_name)
return started
def service(action, service_name):
"""Control a system service"""
cmd = ['service', service_name, action]
if init_is_systemd():
cmd = ['systemctl', action, service_name]
else:
cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0
def service_running(service):
def service_running(service_name):
"""Determine whether a system service is running"""
try:
output = subprocess.check_output(
['service', service, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
if init_is_systemd():
return service('is-active', service_name)
else:
if ("start/running" in output or "is running" in output):
return True
else:
try:
output = subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def service_available(service_name):
@ -142,8 +156,29 @@ def service_available(service_name):
return True
def adduser(username, password=None, shell='/bin/bash', system_user=False):
"""Add a user to the system"""
SYSTEMD_SYSTEM = '/run/systemd/system'
def init_is_systemd():
"""Return True if the host system uses systemd, False otherwise."""
return os.path.isdir(SYSTEMD_SYSTEM)
def adduser(username, password=None, shell='/bin/bash', system_user=False,
primary_group=None, secondary_groups=None):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
:param str username: Username to create
:param str password: Password for user; if ``None``, create a system user
:param str shell: The default shell for the user
:param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups
:returns: The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
@ -158,6 +193,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
'--shell', shell,
'--password', password,
])
if not primary_group:
try:
grp.getgrnam(username)
primary_group = username # avoid "group exists" error
except KeyError:
pass
if primary_group:
cmd.extend(['-g', primary_group])
if secondary_groups:
cmd.extend(['-G', ','.join(secondary_groups)])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
@ -255,14 +300,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab
"""
"""Remove the given mountpoint entry from /etc/fstab"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file
"""
"""Adds the given device entry to the /etc/fstab file"""
return Fstab.add(dev, mp, fs, options=options)
@ -318,8 +361,7 @@ def fstab_mount(mountpoint):
def file_hash(path, hash_type='md5'):
"""
Generate a hash checksum of the contents of 'path' or None if not found.
"""Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
@ -334,10 +376,9 @@ def file_hash(path, hash_type='md5'):
def path_hash(path):
"""
Generate a hash checksum of all files matching 'path'. Standard wildcards
like '*' and '?' are supported, see documentation for the 'glob' module for
more information.
"""Generate a hash checksum of all files matching 'path'. Standard
wildcards like '*' and '?' are supported, see documentation for the 'glob'
module for more information.
:return: dict: A { filename: hash } dictionary for all matched files.
Empty if none found.
@ -349,8 +390,7 @@ def path_hash(path):
def check_hash(path, checksum, hash_type='md5'):
"""
Validate a file using a cryptographic checksum.
"""Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
@ -365,6 +405,7 @@ def check_hash(path, checksum, hash_type='md5'):
class ChecksumError(ValueError):
"""A class derived from Value error to indicate the checksum failed."""
pass
@ -470,7 +511,7 @@ def get_bond_master(interface):
def list_nics(nic_type=None):
'''Return a list of nics of given type(s)'''
"""Return a list of nics of given type(s)"""
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
@ -512,12 +553,13 @@ def list_nics(nic_type=None):
def set_nic_mtu(nic, mtu):
'''Set MTU on a network interface'''
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
@ -529,6 +571,7 @@ def get_nic_mtu(nic):
def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface."""
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
@ -539,7 +582,7 @@ def get_nic_hwaddr(nic):
def cmp_pkgrevno(package, revno, pkgcache=None):
'''Compare supplied revno with the revno of the installed package
"""Compare supplied revno with the revno of the installed package
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
@ -548,7 +591,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
This function imports apt_cache function from charmhelpers.fetch if
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance.
'''
"""
import apt_pkg
if not pkgcache:
from charmhelpers.fetch import apt_cache
@ -558,15 +601,30 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
@contextmanager
def chdir(d):
def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd()
try:
yield os.chdir(d)
yield os.chdir(directory)
finally:
os.chdir(cur)
def chownr(path, owner, group, follow_links=True):
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
"""Recursively change user and group ownership of files and directories
in given path. Doesn't chown path itself by default, only its children.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
:param bool follow_links: Also Chown links if True
:param bool chowntopdir: Also chown path itself if True
"""
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if follow_links:
@ -574,6 +632,10 @@ def chownr(path, owner, group, follow_links=True):
else:
chown = os.lchown
if chowntopdir:
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
if not broken_symlink:
chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for name in dirs + files:
full = os.path.join(root, name)
@ -583,4 +645,28 @@ def chownr(path, owner, group, follow_links=True):
def lchownr(path, owner, group):
"""Recursively change user and group ownership of files and directories
in a given path, not following symbolic links. See the documentation for
'os.lchown' for more information.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
"""
chownr(path, owner, group, follow_links=False)
def get_total_ram():
"""The total amount of system RAM in bytes.
This is what is reported by the OS, and may be overcommitted when
there are multiple containers hosted on the same machine.
"""
with open('/proc/meminfo', 'r') as f:
for line in f.readlines():
if line:
key, value, unit = line.split()
if key == 'MemTotal:':
assert unit == 'kB', 'Unknown unit'
return int(value) * 1024 # Classic, not KiB.
raise NotImplementedError()

View File

@ -46,6 +46,8 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256,
group_info = add_group(group)
gid = group_info.gr_gid
add_user_to_group(user, group)
if max_map_count < 2 * nr_hugepages:
max_map_count = 2 * nr_hugepages
sysctl_settings = {
'vm.nr_hugepages': nr_hugepages,
'vm.max_map_count': max_map_count,

View File

@ -243,33 +243,40 @@ class TemplateCallback(ManagerCallback):
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str target: The target to write the rendered template to
:param str target: The target to write the rendered template to (or None)
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
:param partial on_change_action: functools partial to be executed when
rendered file changes
:param jinja2 loader template_loader: A jinja2 template loader
:return str: The rendered template
"""
def __init__(self, source, target,
owner='root', group='root', perms=0o444,
on_change_action=None):
on_change_action=None, template_loader=None):
self.source = source
self.target = target
self.owner = owner
self.group = group
self.perms = perms
self.on_change_action = on_change_action
self.template_loader = template_loader
def __call__(self, manager, service_name, event_name):
pre_checksum = ''
if self.on_change_action and os.path.isfile(self.target):
pre_checksum = host.file_hash(self.target)
service = manager.get_service(service_name)
context = {}
context = {'ctx': {}}
for ctx in service.get('required_data', []):
context.update(ctx)
templating.render(self.source, self.target, context,
self.owner, self.group, self.perms)
context['ctx'].update(ctx)
result = templating.render(self.source, self.target, context,
self.owner, self.group, self.perms,
template_loader=self.template_loader)
if self.on_change_action:
if pre_checksum == host.file_hash(self.target):
hookenv.log(
@ -278,6 +285,8 @@ class TemplateCallback(ManagerCallback):
else:
self.on_change_action()
return result
# Convenience aliases for templates
render_template = template = TemplateCallback

View File

@ -21,13 +21,14 @@ from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None, encoding='UTF-8'):
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute.
The `target` path should be absolute. It can also be `None`, in which
case no file will be written.
The context should be a dict containing the values to be replaced in the
template.
@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root',
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
The rendered template will be written to the file as well as being returned
as a string.
Note: Using this requires python-jinja2; if it is not installed, calling
this will attempt to use charmhelpers.fetch.apt_install to install it.
"""
@ -52,17 +56,26 @@ def render(source, target, context, owner='root', group='root',
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
loader = Environment(loader=FileSystemLoader(templates_dir))
if template_loader:
template_env = Environment(loader=template_loader)
else:
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = loader.get_template(source)
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
if target is not None:
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
# This is a terrible default directory permission, as the file
# or its siblings will often contain secrets.
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
host.write_file(target, content.encode(encoding), owner, group, perms)
return content

View File

@ -98,6 +98,14 @@ CLOUD_ARCHIVE_POCKETS = {
'liberty/proposed': 'trusty-proposed/liberty',
'trusty-liberty/proposed': 'trusty-proposed/liberty',
'trusty-proposed/liberty': 'trusty-proposed/liberty',
# Mitaka
'mitaka': 'trusty-updates/mitaka',
'trusty-mitaka': 'trusty-updates/mitaka',
'trusty-mitaka/updates': 'trusty-updates/mitaka',
'trusty-updates/mitaka': 'trusty-updates/mitaka',
'mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
}
# The order of this list is very important. Handlers should be listed in from
@ -225,12 +233,12 @@ def apt_purge(packages, fatal=False):
def apt_mark(packages, mark, fatal=False):
"""Flag one or more packages using apt-mark"""
log("Marking {} as {}".format(packages, mark))
cmd = ['apt-mark', mark]
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd, universal_newlines=True)
@ -411,7 +419,7 @@ def plugins(fetch_handlers=None):
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
except NotImplementedError:
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(

View File

@ -108,7 +108,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
install_opener(opener)
response = urlopen(source)
try:
with open(dest, 'w') as dest_file:
with open(dest, 'wb') as dest_file:
dest_file.write(response.read())
except Exception as e:
if os.path.isfile(dest):

View File

@ -15,60 +15,50 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from subprocess import check_call
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
UnhandledSource,
filter_installed_packages,
apt_install,
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('bzrlib does not support Python3')
try:
from bzrlib.branch import Branch
from bzrlib import bzrdir, workingtree, errors
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-bzrlib")
from bzrlib.branch import Branch
from bzrlib import bzrdir, workingtree, errors
if filter_installed_packages(['bzr']) != []:
apt_install(['bzr'])
if filter_installed_packages(['bzr']) != []:
raise NotImplementedError('Unable to install bzr')
class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('bzr+ssh', 'lp'):
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.bzr'))
else:
return True
def branch(self, source, dest):
url_parts = self.parse_url(source)
# If we use lp:branchname scheme we need to load plugins
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if url_parts.scheme == "lp":
from bzrlib.plugin import load_plugins
load_plugins()
try:
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
except errors.AlreadyControlDirError:
local_branch = Branch.open(dest)
try:
remote_branch = Branch.open(source)
remote_branch.push(local_branch)
tree = workingtree.WorkingTree.open(dest)
tree.update()
except Exception as e:
raise e
if os.path.exists(dest):
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
else:
check_call(['bzr', 'branch', source, dest])
def install(self, source):
def install(self, source, dest=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:

View File

@ -15,24 +15,18 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import os
from subprocess import check_call
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
UnhandledSource,
filter_installed_packages,
apt_install,
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('GitPython does not support Python 3')
try:
from git import Repo
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-git")
from git import Repo
from git.exc import GitCommandError # noqa E402
if filter_installed_packages(['git']) != []:
apt_install(['git'])
if filter_installed_packages(['git']) != []:
raise NotImplementedError('Unable to install git')
class GitUrlFetchHandler(BaseFetchHandler):
@ -40,19 +34,24 @@ class GitUrlFetchHandler(BaseFetchHandler):
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git'):
if url_parts.scheme not in ('http', 'https', 'git', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.git'))
else:
return True
def clone(self, source, dest, branch, depth=None):
def clone(self, source, dest, branch="master", depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if depth:
Repo.clone_from(source, dest, branch=branch, depth=depth)
if os.path.exists(dest):
cmd = ['git', '-C', dest, 'pull', source, branch]
else:
Repo.clone_from(source, dest, branch=branch)
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
@ -62,12 +61,8 @@ class GitUrlFetchHandler(BaseFetchHandler):
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:
self.clone(source, dest_dir, branch, depth)
except GitCommandError as e:
raise UnhandledSource(e)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -98,4 +98,4 @@ def set_ceph_env_variables(service):
with open('/etc/environment', 'a') as out:
out.write('CEPH_ARGS="--id %s"\n' % service)
with open('/etc/init/cinder-backup.override', 'w') as out:
out.write('env CEPH_ARGS="--id %s"\n' % service)
out.write('env CEPH_ARGS="--id %s"\n' % service)

View File

@ -5,7 +5,7 @@ description: |
Cinder is the block storage service for the Openstack project.
.
This subordinate charm configures the Cinder backup service.
categories:
tags:
- miscellaneous
subordinate: true
provides:

17
tests/00-setup Executable file
View File

@ -0,0 +1,17 @@
#!/bin/bash
set -ex
sudo add-apt-repository --yes ppa:juju/stable
sudo apt-get update --yes
sudo apt-get install --yes amulet \
distro-info-data \
python-cinderclient \
python-distro-info \
python-glanceclient \
python-heatclient \
python-keystoneclient \
python-neutronclient \
python-novaclient \
python-pika \
python-swiftclient

View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic cinder-backup deployment on precise-icehouse."""
from basic_deployment import CinderBackupBasicDeployment
if __name__ == '__main__':
deployment = CinderBackupBasicDeployment(series='precise',
openstack='cloud:precise-icehouse',
source='cloud:precise-updates/icehouse')
deployment.run_tests()

View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic cinder-backup deployment on trusty-icehouse."""
from basic_deployment import CinderBackupBasicDeployment
if __name__ == '__main__':
deployment = CinderBackupBasicDeployment(series='trusty')
deployment.run_tests()

11
tests/016-basic-trusty-juno Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic cinder-backup deployment on trusty-juno."""
from basic_deployment import CinderBackupBasicDeployment
if __name__ == '__main__':
deployment = CinderBackupBasicDeployment(series='trusty',
openstack='cloud:trusty-juno',
source='cloud:trusty-updates/juno')
deployment.run_tests()

11
tests/017-basic-trusty-kilo Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python
"""Amulet tests on a basic cinder-backup deployment on trusty-kilo."""
from basic_deployment import CinderBackupBasicDeployment
if __name__ == '__main__':
deployment = CinderBackupBasicDeployment(series='trusty',
openstack='cloud:trusty-kilo',
source='cloud:trusty-updates/kilo')
deployment.run_tests()

9
tests/018-basic-vivid-kilo Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic cinder-backup deployment on vivid-kilo."""
from basic_deployment import CinderBackupBasicDeployment
if __name__ == '__main__':
deployment = CinderBackupBasicDeployment(series='vivid')
deployment.run_tests()

9
tests/019-basic-wily-liberty Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/python
"""Amulet tests on a basic cinder-backup deployment on wily-liberty."""
from basic_deployment import CinderBackupBasicDeployment
if __name__ == '__main__':
deployment = CinderBackupBasicDeployment(series='wily')
deployment.run_tests()

68
tests/README Normal file
View File

@ -0,0 +1,68 @@
This directory provides Amulet tests that focus on verification of
cinder-backup deployments.
test_* methods are called in lexical sort order.
Test name convention to ensure desired test order:
1xx service and endpoint checks
2xx relation checks
3xx config checks
4xx functional checks
9xx restarts and other final checks
Common uses of backend relations in deployments:
- [ cinder, cinder-ceph ]
- [ cinder, cinder-backup ]
- [ cinder-ceph, ceph ]
- [ cinder-backup, ceph ]
In order to run tests, you'll need charm-tools installed (in addition to
juju, of course):
sudo add-apt-repository ppa:juju/stable
sudo apt-get update
sudo apt-get install charm-tools
If you use a web proxy server to access the web, you'll need to set the
AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
The following examples demonstrate different ways that tests can be executed.
All examples are run from the charm's root directory.
* To run all tests (starting with 00-setup):
make test
* To run a specific test module (or modules):
juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To run a specific test module (or modules), and keep the environment
deployed after a failure:
juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
* To re-run a test module against an already deployed environment (one
that was deployed by a previous call to 'juju test --set-e'):
./tests/15-basic-trusty-icehouse
For debugging and test development purposes, all code should be idempotent.
In other words, the code should have the ability to be re-run without changing
the results beyond the initial run. This enables editing and re-running of a
test module against an already deployed environment, as described above.
Manual debugging tips:
* Set the following env vars before using the OpenStack CLI as admin:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=openstack
export OS_REGION_NAME=RegionOne
* Set the following env vars before using the OpenStack CLI as demoUser:
export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
export OS_TENANT_NAME=demoTenant
export OS_USERNAME=demoUser
export OS_PASSWORD=password
export OS_REGION_NAME=RegionOne

813
tests/basic_deployment.py Normal file
View File

@ -0,0 +1,813 @@
#!/usr/bin/python
""" Basic cinder-backup functional test. """
import amulet
import json
import time
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OpenStackAmuletUtils,
DEBUG,
)
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic cinder-backup deployment."""
def __init__(self, series=None, openstack=None, source=None, git=False,
stable=False):
"""Deploy the entire test environment."""
super(CinderBackupBasicDeployment, self).__init__(series, openstack,
source, stable)
self.git = git
self._add_services()
self._add_relations()
self._configure_services()
self._deploy()
self._initialize_tests()
def _add_services(self):
"""Add the services that we're testing, where cinder-backup is
local, and the rest of the services are from lp branches that
are compatible with the local charm (e.g. stable or next).
"""
# Note: cinder-backup becomes a cinder subordinate unit.
this_service = {'name': 'cinder-backup'}
other_services = [
{'name': 'mysql'},
{'name': 'keystone'},
{'name': 'rabbitmq-server'},
{'name': 'ceph', 'units': 3},
{'name': 'cinder'},
{'name': 'cinder-ceph'}
]
super(CinderBackupBasicDeployment, self)._add_services(this_service,
other_services)
def _add_relations(self):
"""Add all of the relations for the services."""
relations = {
'cinder-backup:ceph': 'ceph:client',
'cinder-ceph:ceph': 'ceph:client',
'cinder:storage-backend': 'cinder-ceph:storage-backend',
'cinder:backup-backend': 'cinder-backup:backup-backend',
'keystone:shared-db': 'mysql:shared-db',
'cinder:shared-db': 'mysql:shared-db',
'cinder:identity-service': 'keystone:identity-service',
'cinder:amqp': 'rabbitmq-server:amqp',
}
super(CinderBackupBasicDeployment, self)._add_relations(relations)
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {
'admin-password': 'openstack',
'admin-token': 'ubuntutesting'
}
mysql_config = {
'dataset-size': '50%'
}
cinder_config = {
'block-device': 'None',
'glance-api-version': '2'
}
ceph_config = {
'monitor-count': '3',
'auth-supported': 'none',
'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
'osd-reformat': 'yes',
'ephemeral-unmount': '/mnt',
'osd-devices': '/dev/vdb /srv/ceph'
}
cinder_ceph_config = {
'ceph-osd-replication-count': '3',
}
configs = {
'keystone': keystone_config,
'mysql': mysql_config,
'cinder': cinder_config,
'ceph': ceph_config,
'cinder-ceph': cinder_ceph_config,
'cinder-backup': cinder_ceph_config
}
super(CinderBackupBasicDeployment, self)._configure_services(configs)
def _initialize_tests(self):
"""Perform final initialization before tests get run."""
# Access the sentries for inspecting service units
u.log.debug('!!!!!')
u.log.debug(dir(self.d.sentry))
self.mysql_sentry = self.d.sentry.unit['mysql/0']
self.keystone_sentry = self.d.sentry.unit['keystone/0']
self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
self.cinder_sentry = self.d.sentry.unit['cinder/0']
self.ceph0_sentry = self.d.sentry.unit['ceph/0']
self.ceph1_sentry = self.d.sentry.unit['ceph/1']
self.ceph2_sentry = self.d.sentry.unit['ceph/2']
self.cinder_backup_sentry = self.d.sentry.unit['cinder-backup/0']
u.log.debug('openstack release val: {}'.format(
self._get_openstack_release()))
u.log.debug('openstack release str: {}'.format(
self._get_openstack_release_string()))
# Let things settle a bit original moving forward
time.sleep(30)
# Authenticate admin with keystone
self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
user='admin',
password='openstack',
tenant='admin')
# Authenticate admin with cinder endpoint
self.cinder = u.authenticate_cinder_admin(self.keystone_sentry,
username='admin',
password='openstack',
tenant='admin')
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='demo@demo.com')
# Authenticate demo user with keystone
self.keystone_demo = u.authenticate_keystone_user(self.keystone,
self.demo_user,
'password',
self.demo_tenant)
# Authenticate demo user with nova-api
self.nova_demo = u.authenticate_nova_user(self.keystone,
self.demo_user,
'password',
self.demo_tenant)
def test_100_ceph_processes(self):
"""Verify that the expected service processes are running
on each ceph unit."""
# Process name and quantity of processes to expect on each unit
ceph_processes = {
'ceph-mon': 1,
'ceph-osd': 2
}
# Units with process names and PID quantities expected
expected_processes = {
self.ceph0_sentry: ceph_processes,
self.ceph1_sentry: ceph_processes,
self.ceph2_sentry: ceph_processes
}
actual_pids = u.get_unit_process_ids(expected_processes)
ret = u.validate_unit_process_ids(expected_processes, actual_pids)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_102_services(self):
"""Verify the expected services are running on the service units."""
services = {
self.mysql_sentry: ['mysql'],
self.rabbitmq_sentry: ['rabbitmq-server'],
self.keystone_sentry: ['keystone'],
self.cinder_sentry: ['cinder-api',
'cinder-scheduler',
'cinder-volume'],
}
if self._get_openstack_release() < self.vivid_kilo:
# For upstart systems only. Ceph services under systemd
# are checked by process name instead.
ceph_services = [
'ceph-mon-all',
'ceph-mon id=`hostname`',
'ceph-osd-all',
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
]
services[self.ceph0_sentry] = ceph_services
services[self.ceph1_sentry] = ceph_services
services[self.ceph2_sentry] = ceph_services
ret = u.validate_services_by_name(services)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_110_users(self):
"""Verify expected users."""
u.log.debug('Checking keystone users...')
expected = [
{'name': 'cinder_cinderv2',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'},
{'name': 'admin',
'enabled': True,
'tenantId': u.not_null,
'id': u.not_null,
'email': 'juju@localhost'}
]
actual = self.keystone.users.list()
ret = u.validate_user_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_112_service_catalog(self):
"""Verify that the service catalog endpoint data"""
u.log.debug('Checking keystone service catalog...')
endpoint_vol = {
'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url
}
endpoint_id = {
'adminURL': u.valid_url,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url
}
if self._get_openstack_release() >= self.trusty_icehouse:
endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null
expected = {
'identity': [endpoint_id],
'volume': [endpoint_id]
}
actual = self.keystone.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_114_cinder_endpoint(self):
"""Verify the cinder endpoint data."""
u.log.debug('Checking cinder api endpoint data...')
endpoints = self.keystone.endpoints.list()
admin_port = internal_port = public_port = '8776'
expected = {
'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null
}
ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
public_port, expected)
if ret:
amulet.raise_status(amulet.FAIL,
msg='cinder endpoint: {}'.format(ret))
def validate_broker_req(self, unit, relation, expected):
rel_data = json.loads(unit.relation(
relation[0],
relation[1])['broker_req'])
if rel_data['api-version'] != expected['api-version']:
return "Broker request api mismatch"
for index in range(0, len(rel_data['ops'])):
actual_op = rel_data['ops'][index]
expected_op = expected['ops'][index]
for key in ['op', 'name', 'replicas']:
if actual_op[key] == expected_op[key]:
u.log.debug("OK op {} key {}".format(index, key))
else:
return "Mismatch, op: {} key: {}".format(index, key)
return None
def get_broker_request(self):
client_unit = self.cinder_backup_sentry
broker_req = json.loads(client_unit.relation(
'ceph',
'ceph:client')['broker_req'])
return broker_req
def get_broker_response(self):
broker_request = self.get_broker_request()
response_key = "broker-rsp-cinder-backup-0"
ceph_sentrys = [self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry]
for sentry in ceph_sentrys:
relation_data = sentry.relation('client', 'cinder-backup:ceph')
if relation_data.get(response_key):
broker_response = json.loads(relation_data[response_key])
if (broker_request['request-id'] ==
broker_response['request-id']):
return broker_response
def test_200_cinderbackup_ceph_ceph_relation(self):
u.log.debug('Checking cinder-backup:ceph to ceph:client '
'relation data...')
unit = self.cinder_backup_sentry
relation = ['ceph', 'ceph:client']
req = {
"api-version": 1,
"ops": [{"replicas": 3,
"name": "cinder-backup",
"op": "create-pool"}]
}
expected = {
'private-address': u.valid_ip,
'broker_req': u.not_null,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder-backup ceph', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
ret = self.validate_broker_req(unit, relation, req)
if ret:
msg = u.relation_error('cinder-backup ceph', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_201_ceph_cinderbackup_ceph_relation(self):
u.log.debug('Checking ceph:client to cinder-backup:ceph '
'relation data...')
ceph_unit = self.ceph0_sentry
relation = ['client', 'cinder-backup:ceph']
expected = {
'key': u.not_null,
'private-address': u.valid_ip,
'ceph-public-address': u.valid_ip,
'auth': 'none',
}
ret = u.validate_relation_data(ceph_unit, relation, expected)
if ret:
msg = u.relation_error('cinder cinder-backup backup-backend', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
broker_response = self.get_broker_response()
if not broker_response or broker_response['exit-code'] != 0:
msg = ('Broker request invalid'
' or failed: {}'.format(broker_response))
amulet.raise_status(amulet.FAIL, msg=msg)
def test_202_cinderbackup_cinder_backend_relation(self):
u.log.debug('Checking cinder-backup:backup-backend to '
'cinder:backup-backend relation data...')
unit = self.cinder_backup_sentry
relation = ['backup-backend', 'cinder:backup-backend']
sub = ('{"cinder": {"/etc/cinder/cinder.conf": {"sections": '
'{"DEFAULT": ['
'["backup_driver", "cinder.backup.drivers.ceph"], '
'["backup_ceph_pool", "cinder-backup"], '
'["backup_ceph_user", "cinder-backup"]]}}}}')
expected = {
'subordinate_configuration': sub,
'private-address': u.valid_ip,
'backend_name': 'cinder-backup'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder cinder-backup backup-backend', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_203_cinder_cinderbackup_backend_relation(self):
u.log.debug('Checking cinder:backup-backend to '
'cinder-backup:backup-backend relation data...')
unit = self.cinder_sentry
relation = ['backup-backend', 'cinder-backup:backup-backend']
expected = {
'private-address': u.valid_ip,
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder cinder-backup backup-backend', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_204_mysql_cinder_db_relation(self):
"""Verify the mysql:glance shared-db relation data"""
u.log.debug('Checking mysql:cinder db relation data...')
unit = self.mysql_sentry
relation = ['shared-db', 'cinder:shared-db']
expected = {
'private-address': u.valid_ip,
'db_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_205_cinder_mysql_db_relation(self):
"""Verify the cinder:mysql shared-db relation data"""
u.log.debug('Checking cinder:mysql db relation data...')
unit = self.cinder_sentry
relation = ['shared-db', 'mysql:shared-db']
expected = {
'private-address': u.valid_ip,
'hostname': u.valid_ip,
'username': 'cinder',
'database': 'cinder'
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_206_keystone_cinder_id_relation(self):
"""Verify the keystone:cinder identity-service relation data"""
u.log.debug('Checking keystone:cinder id relation data...')
unit = self.keystone_sentry
relation = ['identity-service',
'cinder:identity-service']
expected = {
'service_protocol': 'http',
'service_tenant': 'services',
'admin_token': 'ubuntutesting',
'service_password': u.not_null,
'service_port': '5000',
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'auth_host': u.valid_ip,
'service_username': 'cinder_cinderv2',
'service_tenant_id': u.not_null,
'service_host': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('identity-service cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_207_cinder_keystone_id_relation(self):
"""Verify the cinder:keystone identity-service relation data"""
u.log.debug('Checking cinder:keystone id relation data...')
unit = self.cinder_sentry
relation = ['identity-service',
'keystone:identity-service']
expected = {
'cinder_service': 'cinder',
'cinder_region': 'RegionOne',
'cinder_public_url': u.valid_url,
'cinder_internal_url': u.valid_url,
'cinder_admin_url': u.valid_url,
'private-address': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_208_rabbitmq_cinder_amqp_relation(self):
"""Verify the rabbitmq-server:cinder amqp relation data"""
u.log.debug('Checking rmq:cinder amqp relation data...')
unit = self.rabbitmq_sentry
relation = ['amqp', 'cinder:amqp']
expected = {
'private-address': u.valid_ip,
'password': u.not_null,
'hostname': u.valid_ip
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('amqp cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_209_cinder_rabbitmq_amqp_relation(self):
"""Verify the cinder:rabbitmq-server amqp relation data"""
u.log.debug('Checking cinder:rmq amqp relation data...')
unit = self.cinder_sentry
relation = ['amqp', 'rabbitmq-server:amqp']
expected = {
'private-address': u.valid_ip,
'vhost': 'openstack',
'username': u.not_null
}
ret = u.validate_relation_data(unit, relation, expected)
if ret:
msg = u.relation_error('cinder amqp', ret)
amulet.raise_status(amulet.FAIL, msg=msg)
def test_300_cinder_config(self):
"""Verify the data in the cinder.conf file."""
u.log.debug('Checking cinder config file data...')
unit = self.cinder_sentry
conf = '/etc/cinder/cinder.conf'
unit_mq = self.rabbitmq_sentry
unit_ks = self.keystone_sentry
rel_mq_ci = unit_mq.relation('amqp', 'cinder:amqp')
rel_ks_ci = unit_ks.relation('identity-service',
'cinder:identity-service')
auth_uri = 'http://' + rel_ks_ci['auth_host'] + \
':' + rel_ks_ci['service_port'] + '/'
expected = {
'DEFAULT': {
'use_syslog': 'False',
'debug': 'False',
'verbose': 'False',
'iscsi_helper': 'tgtadm',
'volume_group': 'cinder-volumes',
'auth_strategy': 'keystone',
'volumes_dir': '/var/lib/cinder/volumes',
'enabled_backends': 'cinder-ceph',
'backup_driver': 'cinder.backup.drivers.ceph',
'backup_ceph_pool': 'cinder-backup',
'backup_ceph_user': 'cinder-backup'
},
'keystone_authtoken': {
'admin_user': rel_ks_ci['service_username'],
'admin_password': rel_ks_ci['service_password'],
'admin_tenant_name': rel_ks_ci['service_tenant'],
'auth_uri': auth_uri
},
'cinder-ceph': {
'volume_backend_name': 'cinder-ceph',
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
'rbd_pool': 'cinder-ceph',
'rbd_user': 'cinder-ceph'
},
}
expected_rmq = {
'rabbit_userid': 'cinder',
'rabbit_virtual_host': 'openstack',
'rabbit_password': rel_mq_ci['password'],
'rabbit_host': rel_mq_ci['hostname'],
}
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
expected['oslo_messaging_rabbit'] = expected_rmq
else:
# Juno or earlier
expected['DEFAULT'].update(expected_rmq)
expected['keystone_authtoken']['auth_host'] = \
rel_ks_ci['auth_host']
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_301_cinder_ceph_config(self):
"""Verify the data in the ceph.conf file."""
u.log.debug('Checking cinder ceph config file data...')
# NOTE(beisner): disabled pending lp#1468511 landing in the cinder
# charm to resolve leading spaces in the ceph.conf template. That
# is necessary as configparser treats lines with leading spaces as
# continuation lines, and this test fails.
u.log.warn('Disabled due to bug lp 1468511')
return
unit = self.cinder_sentry
conf = '/etc/ceph/ceph.conf'
expected = {
'global': {
'auth_supported': 'none',
'keyring': '/etc/ceph/$cluster.$name.keyring',
'mon host': u.not_null,
'log to syslog': 'false'
}
}
for section, pairs in expected.iteritems():
ret = u.validate_config_data(unit, conf, section, pairs)
if ret:
message = "cinder ceph config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message)
def test_400_cinder_api_connection(self):
"""Simple api call to check service is up and responding"""
u.log.debug('Checking basic cinder api functionality...')
check = list(self.cinder.volumes.list())
u.log.debug('Cinder api check (volumes.list): {}'.format(check))
assert(check == [])
def test_401_create_delete_volume(self):
"""Create a cinder volume and delete it."""
u.log.debug('Creating, checking and deleting cinder volume...')
vol_new = u.create_cinder_volume(self.cinder)
vol_id = vol_new.id
u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume")
def test_409_ceph_check_osd_pools(self):
"""Check osd pools on all ceph units, expect them to be
identical, and expect specific pools to be present."""
u.log.debug('Checking pools on ceph units...')
expected_pools = self.get_ceph_expected_pools()
# Override expected pools
if 'glance' in expected_pools:
expected_pools.remove('glance')
if 'cinder' in expected_pools:
expected_pools.remove('cinder')
if 'cinder-backup' not in expected_pools:
expected_pools.append('cinder-backup')
if 'cinder-ceph' not in expected_pools:
expected_pools.append('cinder-ceph')
results = []
sentries = [
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
# Check for presence of expected pools on each unit
u.log.debug('Expected pools: {}'.format(expected_pools))
for sentry_unit in sentries:
pools = u.get_ceph_pools(sentry_unit)
results.append(pools)
for expected_pool in expected_pools:
if expected_pool not in pools:
msg = ('{} does not have pool: '
'{}'.format(sentry_unit.info['unit_name'],
expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
u.log.debug('{} has (at least) the expected '
'pools.'.format(sentry_unit.info['unit_name']))
# Check that all units returned the same pool name:id data
ret = u.validate_list_of_identical_dicts(results)
if ret:
u.log.debug('Pool list results: {}'.format(results))
msg = ('{}; Pool list results are not identical on all '
'ceph units.'.format(ret))
amulet.raise_status(amulet.FAIL, msg=msg)
else:
u.log.debug('Pool list on all ceph units produced the '
'same results (OK).')
def backup_volume(self, cinder, volume, bak_name="demo-bak"):
"""Create cinder volume backup. Wait for the new backup status to reach
the expected status, validate and return a resource pointer.
:param volume: volume to be backed up
:param bak_name: cinder volume backup display name
:returns: cinder backup pointer
"""
try:
bak_new = cinder.backups.create(volume.id, name=bak_name)
bak_id = bak_new.id
except Exception as e:
msg = 'Failed to create backup: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
# Wait for backup to reach available status
ret = u.resource_reaches_status(cinder.backups, bak_id,
expected_stat="available",
msg="Backup status wait")
if not ret:
msg = 'Cinder backup failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
return bak_new
def restore_volume(self, cinder, backup):
"""Restore cinder volume from backup.
:param backup: backup to restore from
"""
try:
cinder.restores.restore(backup.id)
except Exception as e:
msg = 'Failed to restore volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
# Wait for backup to reach available status
ret = u.resource_reaches_status(cinder.backups, backup.id,
expected_stat="available",
msg="Backup status wait")
if not ret:
msg = 'Cinder backup failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
def test_410_cinder_vol_create_backup_delete_restore_pool_inspect(self):
"""Create, backup, delete, restore a ceph-backed cinder volume, and
inspect ceph cinder pool object count as the volume is created
and deleted."""
sentry_unit = self.ceph0_sentry
obj_count_samples = []
pool_size_samples = []
pools = u.get_ceph_pools(self.ceph0_sentry)
expected_pool = 'cinder-ceph'
cinder_ceph_pool = pools[expected_pool]
# Check ceph cinder pool object count, disk space usage and pool name
u.log.debug('Checking ceph cinder pool original samples...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
sentry_unit, cinder_ceph_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
if pool_name != expected_pool:
msg = ('Ceph pool {} unexpected name (actual, expected): '
'{}. {}'.format(cinder_ceph_pool,
pool_name, expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
# Create ceph-backed cinder volume
cinder_vol = u.create_cinder_volume(self.cinder)
# Backup the volume
vol_backup = self.backup_volume(self.cinder, cinder_vol)
# Delete the volume
u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume")
# Restore the volume
self.restore_volume(self.cinder, vol_backup)
# Delete the backup
u.delete_resource(self.cinder.backups, vol_backup, msg="cinder backup")
# Re-check ceph cinder pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph cinder pool samples after volume create...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
sentry_unit, cinder_ceph_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
name = "demo-vol"
vols = self.cinder.volumes.list()
cinder_vols = [v for v in vols if v.display_name == name]
if not cinder_vols:
# NOTE(hopem): it appears that at some point cinder-backup stopped
# restoring volume metadata properly so revert to default name if
# original is not found
name = "restore_backup_%s" % (vol_backup.id)
cinder_vols = [v for v in vols if v.display_name == name]
if not cinder_vols:
msg = ("Could not find restore vol '%s' in %s" %
(name, [v.display_name for v in vols]))
u.log.error(msg)
amulet.raise_status(amulet.FAIL, msg=msg)
cinder_vol = cinder_vols[0]
# Delete restored cinder volume
u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume")
# Final check, ceph cinder pool object count and disk usage
time.sleep(10)
u.log.debug('Checking ceph cinder pool after volume delete...')
pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
sentry_unit, cinder_ceph_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
# Validate ceph cinder pool object count samples over time
ret = u.validate_ceph_pool_samples(obj_count_samples,
"cinder pool object count")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
# Validate ceph cinder pool disk space usage samples over time
ret = u.validate_ceph_pool_samples(pool_size_samples,
"cinder pool disk usage")
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)
def test_499_ceph_cmds_exit_zero(self):
"""Check basic functionality of ceph cli commands against
all ceph units, and the cinder-backup unit."""
sentry_units = [
self.cinder_backup_sentry,
self.ceph0_sentry,
self.ceph1_sentry,
self.ceph2_sentry
]
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
]
ret = u.check_commands_on_units(commands, sentry_units)
if ret:
amulet.raise_status(amulet.FAIL, msg=ret)

View File

@ -0,0 +1,38 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,95 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import os
import six
class AmuletDeployment(object):
"""Amulet deployment.
This class provides generic Amulet deployment and test runner
methods.
"""
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = None
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services.
Add services to the deployment where this_service is the local charm
that we're testing and other_services are the other services that
are being used in the local amulet tests.
"""
if this_service['name'] != os.path.basename(os.getcwd()):
s = this_service['name']
msg = "The charm's root directory name needs to be {}".format(s)
amulet.raise_status(amulet.FAIL, msg=msg)
if 'units' not in this_service:
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
constraints=this_service.get('constraints'))
for svc in other_services:
if 'location' in svc:
branch_location = svc['location']
elif self.series:
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
else:
branch_location = None
if 'units' not in svc:
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
constraints=svc.get('constraints'))
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in six.iteritems(relations):
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _deploy(self):
"""Deploy environment and wait for all hooks to finish executing."""
try:
self.d.setup(timeout=900)
self.d.sentry.wait(timeout=900)
except amulet.helpers.TimeoutError:
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
except Exception:
raise
def run_tests(self):
"""Run all of the methods that are prefixed with 'test_'."""
for test in dir(self):
if test.startswith('test_'):
getattr(self, test)()

View File

@ -0,0 +1,818 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import io
import json
import logging
import os
import re
import socket
import subprocess
import sys
import time
import uuid
import amulet
import distro_info
import six
from six.moves import configparser
if six.PY3:
from urllib import parse as urlparse
else:
import urlparse
class AmuletUtils(object):
"""Amulet utilities.
This class provides common utility functions that are used by Amulet
tests.
"""
def __init__(self, log_level=logging.ERROR):
self.log = self.get_logger(level=log_level)
self.ubuntu_releases = self.get_ubuntu_releases()
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def valid_ip(self, ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
else:
return False
def valid_url(self, url):
p = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
if p.match(url):
return True
else:
return False
def get_ubuntu_release_from_sentry(self, sentry_unit):
"""Get Ubuntu release codename from sentry unit.
:param sentry_unit: amulet sentry/service unit pointer
:returns: list of strings - release codename, failure message
"""
msg = None
cmd = 'lsb_release -cs'
release, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} lsb_release: {}'.format(
sentry_unit.info['unit_name'], release))
else:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, release, code))
if release not in self.ubuntu_releases:
msg = ("Release ({}) not found in Ubuntu releases "
"({})".format(release, self.ubuntu_releases))
return release, msg
def validate_services(self, commands):
"""Validate that lists of commands succeed on service units. Can be
used to verify system services are running on the corresponding
service units.
:param commands: dict with sentry keys and arbitrary command list vals
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# /!\ DEPRECATION WARNING (beisner):
# New and existing tests should be rewritten to use
# validate_services_by_name() as it is aware of init systems.
self.log.warn('DEPRECATION WARNING: use '
'validate_services_by_name instead of validate_services '
'due to init system differences.')
for k, v in six.iteritems(commands):
for cmd in v:
output, code = k.run(cmd)
self.log.debug('{} `{}` returned '
'{}'.format(k.info['unit_name'],
cmd, code))
if code != 0:
return "command `{}` returned {}".format(cmd, str(code))
return None
def validate_services_by_name(self, sentry_services):
"""Validate system service status by service name, automatically
detecting init system based on Ubuntu release codename.
:param sentry_services: dict with sentry keys and svc list values
:returns: None if successful, Failure string message otherwise
"""
self.log.debug('Checking status of system services...')
# Point at which systemd became a thing
systemd_switch = self.ubuntu_releases.index('vivid')
for sentry_unit, services_list in six.iteritems(sentry_services):
# Get lsb_release codename from unit
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
if ret:
return ret
for service_name in services_list:
if (self.ubuntu_releases.index(release) >= systemd_switch or
service_name in ['rabbitmq-server', 'apache2']):
# init is systemd (or regular sysv)
cmd = 'sudo service {} status'.format(service_name)
output, code = sentry_unit.run(cmd)
service_running = code == 0
elif self.ubuntu_releases.index(release) < systemd_switch:
# init is upstart
cmd = 'sudo status {}'.format(service_name)
output, code = sentry_unit.run(cmd)
service_running = code == 0 and "start/running" in output
self.log.debug('{} `{}` returned '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code))
if not service_running:
return u"command `{}` returned {} {}".format(
cmd, output, str(code))
return None
def _get_config(self, unit, filename):
"""Get a ConfigParser object for parsing a unit's config file."""
file_contents = unit.file_contents(filename)
# NOTE(beisner): by default, ConfigParser does not handle options
# with no value, such as the flags used in the mysql my.cnf file.
# https://bugs.python.org/issue7005
config = configparser.ConfigParser(allow_no_value=True)
config.readfp(io.StringIO(file_contents))
return config
def validate_config_data(self, sentry_unit, config_file, section,
expected):
"""Validate config file data.
Verify that the specified section of the config file contains
the expected option key:value pairs.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluates a variable and returns a
bool.
"""
self.log.debug('Validating config file data ({} in {} on {})'
'...'.format(section, config_file,
sentry_unit.info['unit_name']))
config = self._get_config(sentry_unit, config_file)
if section != 'DEFAULT' and not config.has_section(section):
return "section [{}] does not exist".format(section)
for k in expected.keys():
if not config.has_option(section, k):
return "section [{}] is missing option {}".format(section, k)
actual = config.get(section, k)
v = expected[k]
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
# handle explicit values
if actual != v:
return "section [{}] {}:{} != expected {}:{}".format(
section, k, actual, k, expected[k])
# handle function pointers, such as not_null or valid_ip
elif not v(actual):
return "section [{}] {}:{} != expected {}:{}".format(
section, k, actual, k, expected[k])
return None
def _validate_dict_data(self, expected, actual):
"""Validate dictionary data.
Compare expected dictionary data vs actual dictionary data.
The values in the 'expected' dictionary can be strings, bools, ints,
longs, or can be a function that evaluates a variable and returns a
bool.
"""
self.log.debug('actual: {}'.format(repr(actual)))
self.log.debug('expected: {}'.format(repr(expected)))
for k, v in six.iteritems(expected):
if k in actual:
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, six.integer_types)):
# handle explicit values
if v != actual[k]:
return "{}:{}".format(k, actual[k])
# handle function pointers, such as not_null or valid_ip
elif not v(actual[k]):
return "{}:{}".format(k, actual[k])
else:
return "key '{}' does not exist".format(k)
return None
def validate_relation_data(self, sentry_unit, relation, expected):
"""Validate actual relation data based on expected relation data."""
actual = sentry_unit.relation(relation[0], relation[1])
return self._validate_dict_data(expected, actual)
def _validate_list_data(self, expected, actual):
"""Compare expected list vs actual list data."""
for e in expected:
if e not in actual:
return "expected item {} not found in actual list".format(e)
return None
def not_null(self, string):
if string is not None:
return True
else:
return False
def _get_file_mtime(self, sentry_unit, filename):
"""Get last modification time of file."""
return sentry_unit.file_stat(filename)['mtime']
def _get_dir_mtime(self, sentry_unit, directory):
"""Get last modification time of directory."""
return sentry_unit.directory_stat(directory)['mtime']
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
"""Get start time of a process based on the last modification time
of the /proc/pid directory.
:sentry_unit: The sentry unit to check for the service on
:service: service name to look for in process table
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
:returns: epoch time of service process start
:param commands: list of bash commands
:param sentry_units: list of sentry unit pointers
:returns: None if successful; Failure message otherwise
"""
if pgrep_full is not None:
# /!\ DEPRECATION WARNING (beisner):
# No longer implemented, as pidof is now used instead of pgrep.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
'longer implemented re: lp 1474030.')
pid_list = self.get_process_id_list(sentry_unit, service)
pid = pid_list[0]
proc_dir = '/proc/{}'.format(pid)
self.log.debug('Pid for {} on {}: {}'.format(
service, sentry_unit.info['unit_name'], pid))
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=None, sleep_time=20):
"""Check if service was restarted.
Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted.
"""
# /!\ DEPRECATION WARNING (beisner):
# This method is prone to races in that no before-time is known.
# Use validate_service_config_changed instead.
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
self.log.warn('DEPRECATION WARNING: use '
'validate_service_config_changed instead of '
'service_restarted due to known races.')
time.sleep(sleep_time)
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
self._get_file_mtime(sentry_unit, filename)):
return True
else:
return False
def service_restarted_since(self, sentry_unit, mtime, service,
pgrep_full=None, sleep_time=20,
retry_count=30, retry_sleep_time=10):
"""Check if service was been started after a given time.
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if service found and its start time it newer than mtime,
False if service is older than mtime or if service was
not found.
"""
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
unit_name = sentry_unit.info['unit_name']
self.log.debug('Checking that %s service restarted since %s on '
'%s' % (service, mtime, unit_name))
time.sleep(sleep_time)
proc_start_time = None
tries = 0
while tries <= retry_count and not proc_start_time:
try:
proc_start_time = self._get_proc_start_time(sentry_unit,
service,
pgrep_full)
self.log.debug('Attempt {} to get {} proc start time on {} '
'OK'.format(tries, service, unit_name))
except IOError as e:
# NOTE(beisner) - race avoidance, proc may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} proc start time on {} '
'failed\n{}'.format(tries, service,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
if not proc_start_time:
self.log.warn('No proc start time found, assuming service did '
'not start')
return False
if proc_start_time >= mtime:
self.log.debug('Proc start time is newer than provided mtime'
'(%s >= %s) on %s (OK)' % (proc_start_time,
mtime, unit_name))
return True
else:
self.log.warn('Proc start time (%s) is older than provided mtime '
'(%s) on %s, service did not '
'restart' % (proc_start_time, mtime, unit_name))
return False
def config_updated_since(self, sentry_unit, filename, mtime,
sleep_time=20, retry_count=30,
retry_sleep_time=10):
"""Check if file was modified after a given time.
Args:
sentry_unit (sentry): The sentry unit to check the file mtime on
filename (string): The file to check mtime of
mtime (float): The epoch time to check against
sleep_time (int): Initial sleep time (s) before looking for file
retry_sleep_time (int): Time (s) to sleep between retries
retry_count (int): If file is not found, how many times to retry
Returns:
bool: True if file was modified more recently than mtime, False if
file was modified before mtime, or if file not found.
"""
unit_name = sentry_unit.info['unit_name']
self.log.debug('Checking that %s updated since %s on '
'%s' % (filename, mtime, unit_name))
time.sleep(sleep_time)
file_mtime = None
tries = 0
while tries <= retry_count and not file_mtime:
try:
file_mtime = self._get_file_mtime(sentry_unit, filename)
self.log.debug('Attempt {} to get {} file mtime on {} '
'OK'.format(tries, filename, unit_name))
except IOError as e:
# NOTE(beisner) - race avoidance, file may not exist yet.
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
self.log.debug('Attempt {} to get {} file mtime on {} '
'failed\n{}'.format(tries, filename,
unit_name, e))
time.sleep(retry_sleep_time)
tries += 1
if not file_mtime:
self.log.warn('Could not determine file mtime, assuming '
'file does not exist')
return False
if file_mtime >= mtime:
self.log.debug('File mtime is newer than provided mtime '
'(%s >= %s) on %s (OK)' % (file_mtime,
mtime, unit_name))
return True
else:
self.log.warn('File mtime is older than provided mtime'
'(%s < on %s) on %s' % (file_mtime,
mtime, unit_name))
return False
def validate_service_config_changed(self, sentry_unit, mtime, service,
filename, pgrep_full=None,
sleep_time=20, retry_count=30,
retry_sleep_time=10):
"""Check service and file were updated after mtime
Args:
sentry_unit (sentry): The sentry unit to check for the service on
mtime (float): The epoch time to check against
service (string): service name to look for in process table
filename (string): The file to check mtime of
pgrep_full: [Deprecated] Use full command line search mode with pgrep
sleep_time (int): Initial sleep in seconds to pass to test helpers
retry_count (int): If service is not found, how many times to retry
retry_sleep_time (int): Time in seconds to wait between retries
Typical Usage:
u = OpenStackAmuletUtils(ERROR)
...
mtime = u.get_sentry_time(self.cinder_sentry)
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
if not u.validate_service_config_changed(self.cinder_sentry,
mtime,
'cinder-api',
'/etc/cinder/cinder.conf')
amulet.raise_status(amulet.FAIL, msg='update failed')
Returns:
bool: True if both service and file where updated/restarted after
mtime, False if service is older than mtime or if service was
not found or if filename was modified before mtime.
"""
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
# used instead of pgrep. pgrep_full is still passed through to ensure
# deprecation WARNS. lp1474030
service_restart = self.service_restarted_since(
sentry_unit, mtime,
service,
pgrep_full=pgrep_full,
sleep_time=sleep_time,
retry_count=retry_count,
retry_sleep_time=retry_sleep_time)
config_update = self.config_updated_since(
sentry_unit,
filename,
mtime,
sleep_time=sleep_time,
retry_count=retry_count,
retry_sleep_time=retry_sleep_time)
return service_restart and config_update
def get_sentry_time(self, sentry_unit):
"""Return current epoch time on a sentry"""
cmd = "date +'%s'"
return float(sentry_unit.run(cmd)[0])
def relation_error(self, name, data):
return 'unexpected relation data in {} - {}'.format(name, data)
def endpoint_error(self, name, data):
return 'unexpected endpoint data in {} - {}'.format(name, data)
def get_ubuntu_releases(self):
"""Return a list of all Ubuntu releases in order of release."""
_d = distro_info.UbuntuDistroInfo()
_release_list = _d.all
return _release_list
def file_to_url(self, file_rel_path):
"""Convert a relative file path to a file URL."""
_abs_path = os.path.abspath(file_rel_path)
return urlparse.urlparse(_abs_path, scheme='file').geturl()
def check_commands_on_units(self, commands, sentry_units):
"""Check that all commands in a list exit zero on all
sentry units in a list.
:param commands: list of bash commands
:param sentry_units: list of sentry unit pointers
:returns: None if successful; Failure message otherwise
"""
self.log.debug('Checking exit codes for {} commands on {} '
'sentry units...'.format(len(commands),
len(sentry_units)))
for sentry_unit in sentry_units:
for cmd in commands:
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
return ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
return None
def get_process_id_list(self, sentry_unit, process_name,
expect_success=True):
"""Get a list of process ID(s) from a single sentry juju unit
for a single process name.
:param sentry_unit: Amulet sentry instance (juju unit)
:param process_name: Process name
:param expect_success: If False, expect the PID to be missing,
raise if it is present.
:returns: List of process IDs
"""
cmd = 'pidof -x {}'.format(process_name)
if not expect_success:
cmd += " || exit 0 && exit 1"
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output).split()
def get_unit_process_ids(self, unit_processes, expect_success=True):
"""Construct a dict containing unit sentries, process names, and
process IDs.
:param unit_processes: A dictionary of Amulet sentry instance
to list of process names.
:param expect_success: if False expect the processes to not be
running, raise if they are.
:returns: Dictionary of Amulet sentry instance to dictionary
of process names to PIDs.
"""
pid_dict = {}
for sentry_unit, process_list in six.iteritems(unit_processes):
pid_dict[sentry_unit] = {}
for process in process_list:
pids = self.get_process_id_list(
sentry_unit, process, expect_success=expect_success)
pid_dict[sentry_unit].update({process: pids})
return pid_dict
def validate_unit_process_ids(self, expected, actual):
"""Validate process id quantities for services on units."""
self.log.debug('Checking units for running processes...')
self.log.debug('Expected PIDs: {}'.format(expected))
self.log.debug('Actual PIDs: {}'.format(actual))
if len(actual) != len(expected):
return ('Unit count mismatch. expected, actual: {}, '
'{} '.format(len(expected), len(actual)))
for (e_sentry, e_proc_names) in six.iteritems(expected):
e_sentry_name = e_sentry.info['unit_name']
if e_sentry in actual.keys():
a_proc_names = actual[e_sentry]
else:
return ('Expected sentry ({}) not found in actual dict data.'
'{}'.format(e_sentry_name, e_sentry))
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
return ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
return ('Process name mismatch. expected, actual: {}, '
'{}'.format(e_proc_name, a_proc_name))
a_pids_length = len(a_pids)
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids_length,
a_pids))
# If expected is not bool, ensure PID quantities match
if not isinstance(e_pids_length, bool) and \
a_pids_length != e_pids_length:
return fail_msg
# If expected is bool True, ensure 1 or more PIDs exist
elif isinstance(e_pids_length, bool) and \
e_pids_length is True and a_pids_length < 1:
return fail_msg
# If expected is bool False, ensure 0 PIDs exist
elif isinstance(e_pids_length, bool) and \
e_pids_length is False and a_pids_length != 0:
return fail_msg
else:
self.log.debug('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name, e_proc_name,
e_pids_length, a_pids))
return None
def validate_list_of_identical_dicts(self, list_of_dicts):
"""Check that all dicts within a list are identical."""
hashes = []
for _dict in list_of_dicts:
hashes.append(hash(frozenset(_dict.items())))
self.log.debug('Hashes: {}'.format(hashes))
if len(set(hashes)) == 1:
self.log.debug('Dicts within list are identical')
else:
return 'Dicts within list are not identical'
return None
def validate_sectionless_conf(self, file_contents, expected):
"""A crude conf parser. Useful to inspect configuration files which
do not have section headers (as would be necessary in order to use
the configparser). Such as openstack-dashboard or rabbitmq confs."""
for line in file_contents.split('\n'):
if '=' in line:
args = line.split('=')
if len(args) <= 1:
continue
key = args[0].strip()
value = args[1].strip()
if key in expected.keys():
if expected[key] != value:
msg = ('Config mismatch. Expected, actual: {}, '
'{}'.format(expected[key], value))
amulet.raise_status(amulet.FAIL, msg=msg)
def get_unit_hostnames(self, units):
"""Return a dict of juju unit names to hostnames."""
host_names = {}
for unit in units:
host_names[unit.info['unit_name']] = \
str(unit.file_contents('/etc/hostname').strip())
self.log.debug('Unit host names: {}'.format(host_names))
return host_names
def run_cmd_unit(self, sentry_unit, cmd):
"""Run a command on a unit, return the output and exit code."""
output, code = sentry_unit.run(cmd)
if code == 0:
self.log.debug('{} `{}` command returned {} '
'(OK)'.format(sentry_unit.info['unit_name'],
cmd, code))
else:
msg = ('{} `{}` command returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return str(output), code
def file_exists_on_unit(self, sentry_unit, file_name):
"""Check if a file exists on a unit."""
try:
sentry_unit.file_stat(file_name)
return True
except IOError:
return False
except Exception as e:
msg = 'Error checking file {}: {}'.format(file_name, e)
amulet.raise_status(amulet.FAIL, msg=msg)
def file_contents_safe(self, sentry_unit, file_name,
max_wait=60, fatal=False):
"""Get file contents from a sentry unit. Wrap amulet file_contents
with retry logic to address races where a file checks as existing,
but no longer exists by the time file_contents is called.
Return None if file not found. Optionally raise if fatal is True."""
unit_name = sentry_unit.info['unit_name']
file_contents = False
tries = 0
while not file_contents and tries < (max_wait / 4):
try:
file_contents = sentry_unit.file_contents(file_name)
except IOError:
self.log.debug('Attempt {} to open file {} from {} '
'failed'.format(tries, file_name,
unit_name))
time.sleep(4)
tries += 1
if file_contents:
return file_contents
elif not fatal:
return None
elif fatal:
msg = 'Failed to get file contents from unit.'
amulet.raise_status(amulet.FAIL, msg)
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
"""Open a TCP socket to check for a listening sevice on a host.
:param host: host name or IP address, default to localhost
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:returns: True if successful, False if connect failed
"""
# Resolve host name if possible
try:
connect_host = socket.gethostbyname(host)
host_human = "{} ({})".format(connect_host, host)
except socket.error as e:
self.log.warn('Unable to resolve address: '
'{} ({}) Trying anyway!'.format(host, e))
connect_host = host
host_human = connect_host
# Attempt socket connection
try:
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
knock.settimeout(timeout)
knock.connect((connect_host, port))
knock.close()
self.log.debug('Socket connect OK for host '
'{} on port {}.'.format(host_human, port))
return True
except socket.error as e:
self.log.debug('Socket connect FAIL for'
' {} port {} ({})'.format(host_human, port, e))
return False
def port_knock_units(self, sentry_units, port=22,
timeout=15, expect_success=True):
"""Open a TCP socket to check for a listening sevice on each
listed juju unit.
:param sentry_units: list of sentry unit pointers
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:expect_success: True by default, set False to invert logic
:returns: None if successful, Failure message otherwise
"""
for unit in sentry_units:
host = unit.info['public-address']
connected = self.port_knock_tcp(host, port, timeout)
if not connected and expect_success:
return 'Socket connect failed.'
elif connected and not expect_success:
return 'Socket connected unexpectedly.'
def get_uuid_epoch_stamp(self):
"""Returns a stamp string based on uuid4 and epoch time. Useful in
generating test messages which need to be unique-ish."""
return '[{}-{}]'.format(uuid.uuid4(), time.time())
# amulet juju action helpers:
def run_action(self, unit_sentry, action,
_check_output=subprocess.check_output):
"""Run the named action on a given unit sentry.
_check_output parameter is used for dependency injection.
@return action_id.
"""
unit_id = unit_sentry.info["unit_name"]
command = ["juju", "action", "do", "--format=json", unit_id, action]
self.log.info("Running command: %s\n" % " ".join(command))
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
action_id = data[u'Action queued with id']
return action_id
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
"""Wait for a given action, returning if it completed or not.
_check_output parameter is used for dependency injection.
"""
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
action_id]
output = _check_output(command, universal_newlines=True)
data = json.loads(output)
return data.get(u"status") == "completed"
def status_get(self, unit):
"""Return the current service status of this unit."""
raw_status, return_code = unit.run(
"status-get --format=json --include-data")
if return_code != 0:
return ("unknown", "")
status = json.loads(raw_status)
return (status["status"], status["message"])

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,15 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.

View File

@ -0,0 +1,302 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the lp:~openstack-charmers namespace
base_charms = ['mysql', 'mongodb', 'nrpe']
# Force these charms to current series even when using an older series.
# ie. Use trusty/nrpe even when series is precise, as the P charm
# does not possess the necessary external master config and hooks.
force_series_current = ['nrpe']
if self.series in ['precise', 'trusty']:
base_series = self.series
else:
base_series = self.current_next
for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if self.stable:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
# Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup']
if self.openstack:
for svc in services:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=1800):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
self.log.info('Waiting for extended status on units...')
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
self.wily_liberty, self.trusty_mitaka,
self.xenial_mitaka) = range(14)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('utopic', None): self.utopic_juno,
('vivid', None): self.vivid_kilo,
('wily', None): self.wily_liberty,
('xenial', None): self.xenial_mitaka}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier
pools = [
'data',
'metadata',
'rbd',
'cinder',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

View File

@ -0,0 +1,985 @@
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import amulet
import json
import logging
import os
import re
import six
import time
import urllib
import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import pika
import swiftclient
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charm tests.
"""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
self.log.debug('Validating endpoint data...')
self.log.debug('actual: {}'.format(repr(endpoints)))
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if (admin_port in ep.adminurl and
internal_port in ep.internalurl and
public_port in ep.publicurl):
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('Validating service catalog endpoint data...')
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in six.iteritems(expected):
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate tenant data.
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('Validating tenant data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate role data.
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('Validating role data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('Validating user data...')
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate flavor data.
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('Validating flavor data...')
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists."""
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant):
"""Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens.
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
self.log.debug('Authenticating keystone user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
self.log.debug('Authenticating glance admin...')
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_heat_admin(self, keystone):
"""Authenticates the admin user with heat."""
self.log.debug('Authenticating heat admin...')
ep = keystone.service_catalog.url_for(service_type='orchestration',
endpoint_type='publicURL')
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with swift api."""
self.log.debug('Authenticating swift user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return swiftclient.Connection(authurl=ep,
user=user,
key=password,
tenant_name=tenant,
auth_version='2.0')
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance,
validate and return a resource pointer.
:param glance: pointer to authenticated glance connection
:param image_name: display name for new image
:returns: glance image pointer
"""
self.log.debug('Creating glance cirros image '
'({})...'.format(image_name))
# Download cirros image
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open('http://download.cirros-cloud.net/version/released')
version = f.read().strip()
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(local_path):
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
version, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
# Create glance image
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
# Wait for image to reach active status
img_id = image.id
ret = self.resource_reaches_status(glance.images, img_id,
expected_stat='active',
msg='Image status wait')
if not ret:
msg = 'Glance image failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
# Re-validate new image
self.log.debug('Validating image attributes...')
val_img_name = glance.images.get(img_id).name
val_img_stat = glance.images.get(img_id).status
val_img_pub = glance.images.get(img_id).is_public
val_img_cfmt = glance.images.get(img_id).container_format
val_img_dfmt = glance.images.get(img_id).disk_format
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
'container fmt:{} disk fmt:{}'.format(
val_img_name, val_img_pub, img_id,
val_img_stat, val_img_cfmt, val_img_dfmt))
if val_img_name == image_name and val_img_stat == 'active' \
and val_img_pub is True and val_img_cfmt == 'bare' \
and val_img_dfmt == 'qcow2':
self.log.debug(msg_attr)
else:
msg = ('Volume validation failed, {}'.format(msg_attr))
amulet.raise_status(amulet.FAIL, msg=msg)
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_image.')
self.log.debug('Deleting glance image ({})...'.format(image))
return self.delete_resource(glance.images, image, msg='glance image')
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
self.log.debug('Creating instance '
'({}|{}|{})'.format(instance_name, image_name, flavor))
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'delete_resource instead of delete_instance.')
self.log.debug('Deleting instance ({})...'.format(instance))
return self.delete_resource(nova.servers, instance,
msg='nova instance')
def create_or_get_keypair(self, nova, keypair_name="testkey"):
"""Create a new keypair, or return pointer if it already exists."""
try:
_keypair = nova.keypairs.get(keypair_name)
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
except:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
img_id=None, src_vol_id=None, snap_id=None):
"""Create cinder volume, optionally from a glance image, OR
optionally as a clone of an existing volume, OR optionally
from a snapshot. Wait for the new volume status to reach
the expected status, validate and return a resource pointer.
:param vol_name: cinder volume display name
:param vol_size: size in gigabytes
:param img_id: optional glance image id
:param src_vol_id: optional source volume id to clone
:param snap_id: optional snapshot id to use
:returns: cinder volume pointer
"""
# Handle parameter input and avoid impossible combinations
if img_id and not src_vol_id and not snap_id:
# Create volume from image
self.log.debug('Creating cinder volume from glance image...')
bootable = 'true'
elif src_vol_id and not img_id and not snap_id:
# Clone an existing volume
self.log.debug('Cloning cinder volume...')
bootable = cinder.volumes.get(src_vol_id).bootable
elif snap_id and not src_vol_id and not img_id:
# Create volume from snapshot
self.log.debug('Creating cinder volume from snapshot...')
snap = cinder.volume_snapshots.find(id=snap_id)
vol_size = snap.size
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
bootable = cinder.volumes.get(snap_vol_id).bootable
elif not img_id and not src_vol_id and not snap_id:
# Create volume
self.log.debug('Creating cinder volume...')
bootable = 'false'
else:
# Impossible combination of parameters
msg = ('Invalid method use - name:{} size:{} img_id:{} '
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
img_id, src_vol_id,
snap_id))
amulet.raise_status(amulet.FAIL, msg=msg)
# Create new volume
try:
vol_new = cinder.volumes.create(display_name=vol_name,
imageRef=img_id,
size=vol_size,
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
except Exception as e:
msg = 'Failed to create volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
# Wait for volume to reach available status
ret = self.resource_reaches_status(cinder.volumes, vol_id,
expected_stat="available",
msg="Volume status wait")
if not ret:
msg = 'Cinder volume failed to reach expected state.'
amulet.raise_status(amulet.FAIL, msg=msg)
# Re-validate new volume
self.log.debug('Validating volume attributes...')
val_vol_name = cinder.volumes.get(vol_id).display_name
val_vol_boot = cinder.volumes.get(vol_id).bootable
val_vol_stat = cinder.volumes.get(vol_id).status
val_vol_size = cinder.volumes.get(vol_id).size
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
'{} size:{}'.format(val_vol_name, vol_id,
val_vol_stat, val_vol_boot,
val_vol_size))
if val_vol_boot == bootable and val_vol_stat == 'available' \
and val_vol_name == vol_name and val_vol_size == vol_size:
self.log.debug(msg_attr)
else:
msg = ('Volume validation failed, {}'.format(msg_attr))
amulet.raise_status(amulet.FAIL, msg=msg)
return vol_new
def delete_resource(self, resource, resource_id,
msg="resource", max_wait=120):
"""Delete one openstack resource, such as one instance, keypair,
image, volume, stack, etc., and confirm deletion within max wait time.
:param resource: pointer to os resource type, ex:glance_client.images
:param resource_id: unique name or id for the openstack resource
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, otherwise False
"""
self.log.debug('Deleting OpenStack resource '
'{} ({})'.format(resource_id, msg))
num_before = len(list(resource.list()))
resource.delete(resource_id)
tries = 0
num_after = len(list(resource.list()))
while num_after != (num_before - 1) and tries < (max_wait / 4):
self.log.debug('{} delete check: '
'{} [{}:{}] {}'.format(msg, tries,
num_before,
num_after,
resource_id))
time.sleep(4)
num_after = len(list(resource.list()))
tries += 1
self.log.debug('{}: expected, actual count = {}, '
'{}'.format(msg, num_before - 1, num_after))
if num_after == (num_before - 1):
return True
else:
self.log.error('{} delete timed out'.format(msg))
return False
def resource_reaches_status(self, resource, resource_id,
expected_stat='available',
msg='resource', max_wait=120):
"""Wait for an openstack resources status to reach an
expected status within a specified time. Useful to confirm that
nova instances, cinder vols, snapshots, glance images, heat stacks
and other resources eventually reach the expected status.
:param resource: pointer to os resource type, ex: heat_client.stacks
:param resource_id: unique id for the openstack resource
:param expected_stat: status to expect resource to reach
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, False if status is not reached
"""
tries = 0
resource_stat = resource.get(resource_id).status
while resource_stat != expected_stat and tries < (max_wait / 4):
self.log.debug('{} status check: '
'{} [{}:{}] {}'.format(msg, tries,
resource_stat,
expected_stat,
resource_id))
time.sleep(4)
resource_stat = resource.get(resource_id).status
tries += 1
self.log.debug('{}: expected, actual status = {}, '
'{}'.format(msg, resource_stat, expected_stat))
if resource_stat == expected_stat:
return True
else:
self.log.debug('{} never reached expected status: '
'{}'.format(resource_id, expected_stat))
return False
def get_ceph_osd_id_cmd(self, index):
"""Produce a shell command that will return a ceph-osd id."""
return ("`initctl list | grep 'ceph-osd ' | "
"awk 'NR=={} {{ print $2 }}' | "
"grep -o '[0-9]*'`".format(index + 1))
def get_ceph_pools(self, sentry_unit):
"""Return a dict of ceph pools from a single ceph unit, with
pool name as keys, pool id as vals."""
pools = {}
cmd = 'sudo ceph osd lspools'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
for pool in str(output).split(','):
pool_id_name = pool.split(' ')
if len(pool_id_name) == 2:
pool_id = pool_id_name[0]
pool_name = pool_id_name[1]
pools[pool_name] = int(pool_id)
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
pools))
return pools
def get_ceph_df(self, sentry_unit):
"""Return dict of ceph df json output, including ceph pool state.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:returns: Dict of ceph df output
"""
cmd = 'sudo ceph df --format=json'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
return json.loads(output)
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
"""Take a sample of attributes of a ceph pool, returning ceph
pool name, object count and disk space used for the specified
pool ID number.
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
:param pool_id: Ceph pool ID
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))
return pool_name, obj_count, kb_used
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
"""Validate ceph pool samples taken over time, such as pool
object counts or pool kb used, before adding, after adding, and
after deleting items which affect those pool attributes. The
2nd element is expected to be greater than the 1st; 3rd is expected
to be less than the 2nd.
:param samples: List containing 3 data samples
:param sample_type: String for logging and usage context
:returns: None if successful, Failure message otherwise
"""
original, created, deleted = range(3)
if samples[created] <= samples[original] or \
samples[deleted] >= samples[created]:
return ('Ceph {} samples ({}) '
'unexpected.'.format(sample_type, samples))
else:
self.log.debug('Ceph {} samples (OK): '
'{}'.format(sample_type, samples))
return None
# rabbitmq/amqp specific helpers:
def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
"""Wait for rmq units extended status to show cluster readiness,
after an optional initial sleep period. Initial sleep is likely
necessary to be effective following a config change, as status
message may not instantly update to non-ready."""
if init_sleep:
time.sleep(init_sleep)
message = re.compile('^Unit is ready and clustered$')
deployment._auto_wait_for_status(message=message,
timeout=timeout,
include_only=['rabbitmq-server'])
def add_rmq_test_user(self, sentry_units,
username="testuser1", password="changeme"):
"""Add a test user via the first rmq juju unit, check connection as
the new user against all sentry units.
:param sentry_units: list of sentry unit pointers
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: None if successful. Raise on error.
"""
self.log.debug('Adding rmq user ({})...'.format(username))
# Check that user does not already exist
cmd_user_list = 'rabbitmqctl list_users'
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
if username in output:
self.log.warning('User ({}) already exists, returning '
'gracefully.'.format(username))
return
perms = '".*" ".*" ".*"'
cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
'rabbitmqctl set_permissions {} {}'.format(username, perms)]
# Add user via first unit
for cmd in cmds:
output, _ = self.run_cmd_unit(sentry_units[0], cmd)
# Check connection against the other sentry_units
self.log.debug('Checking user connect against units...')
for sentry_unit in sentry_units:
connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
username=username,
password=password)
connection.close()
def delete_rmq_test_user(self, sentry_units, username="testuser1"):
"""Delete a rabbitmq user via the first rmq juju unit.
:param sentry_units: list of sentry unit pointers
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: None if successful or no such user.
"""
self.log.debug('Deleting rmq user ({})...'.format(username))
# Check that the user exists
cmd_user_list = 'rabbitmqctl list_users'
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
if username not in output:
self.log.warning('User ({}) does not exist, returning '
'gracefully.'.format(username))
return
# Delete the user
cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
def get_rmq_cluster_status(self, sentry_unit):
"""Execute rabbitmq cluster status command on a unit and return
the full output.
:param unit: sentry unit
:returns: String containing console output of cluster status command
"""
cmd = 'rabbitmqctl cluster_status'
output, _ = self.run_cmd_unit(sentry_unit, cmd)
self.log.debug('{} cluster_status:\n{}'.format(
sentry_unit.info['unit_name'], output))
return str(output)
def get_rmq_cluster_running_nodes(self, sentry_unit):
"""Parse rabbitmqctl cluster_status output string, return list of
running rabbitmq cluster nodes.
:param unit: sentry unit
:returns: List containing node names of running nodes
"""
# NOTE(beisner): rabbitmqctl cluster_status output is not
# json-parsable, do string chop foo, then json.loads that.
str_stat = self.get_rmq_cluster_status(sentry_unit)
if 'running_nodes' in str_stat:
pos_start = str_stat.find("{running_nodes,") + 15
pos_end = str_stat.find("]},", pos_start) + 1
str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
run_nodes = json.loads(str_run_nodes)
return run_nodes
else:
return []
def validate_rmq_cluster_running_nodes(self, sentry_units):
"""Check that all rmq unit hostnames are represented in the
cluster_status output of all units.
:param host_names: dict of juju unit names to host names
:param units: list of sentry unit pointers (all rmq units)
:returns: None if successful, otherwise return error message
"""
host_names = self.get_unit_hostnames(sentry_units)
errors = []
# Query every unit for cluster_status running nodes
for query_unit in sentry_units:
query_unit_name = query_unit.info['unit_name']
running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
# Confirm that every unit is represented in the queried unit's
# cluster_status running nodes output.
for validate_unit in sentry_units:
val_host_name = host_names[validate_unit.info['unit_name']]
val_node_name = 'rabbit@{}'.format(val_host_name)
if val_node_name not in running_nodes:
errors.append('Cluster member check failed on {}: {} not '
'in {}\n'.format(query_unit_name,
val_node_name,
running_nodes))
if errors:
return ''.join(errors)
def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
"""Check a single juju rmq unit for ssl and port in the config file."""
host = sentry_unit.info['public-address']
unit_name = sentry_unit.info['unit_name']
conf_file = '/etc/rabbitmq/rabbitmq.config'
conf_contents = str(self.file_contents_safe(sentry_unit,
conf_file, max_wait=16))
# Checks
conf_ssl = 'ssl' in conf_contents
conf_port = str(port) in conf_contents
# Port explicitly checked in config
if port and conf_port and conf_ssl:
self.log.debug('SSL is enabled @{}:{} '
'({})'.format(host, port, unit_name))
return True
elif port and not conf_port and conf_ssl:
self.log.debug('SSL is enabled @{} but not on port {} '
'({})'.format(host, port, unit_name))
return False
# Port not checked (useful when checking that ssl is disabled)
elif not port and conf_ssl:
self.log.debug('SSL is enabled @{}:{} '
'({})'.format(host, port, unit_name))
return True
elif not conf_ssl:
self.log.debug('SSL not enabled @{}:{} '
'({})'.format(host, port, unit_name))
return False
else:
msg = ('Unknown condition when checking SSL status @{}:{} '
'({})'.format(host, port, unit_name))
amulet.raise_status(amulet.FAIL, msg)
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
"""Check that ssl is enabled on rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:param port: optional ssl port override to validate
:returns: None if successful, otherwise return error message
"""
for sentry_unit in sentry_units:
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
return ('Unexpected condition: ssl is disabled on unit '
'({})'.format(sentry_unit.info['unit_name']))
return None
def validate_rmq_ssl_disabled_units(self, sentry_units):
"""Check that ssl is enabled on listed rmq juju sentry units.
:param sentry_units: list of all rmq sentry units
:returns: True if successful. Raise on error.
"""
for sentry_unit in sentry_units:
if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
return ('Unexpected condition: ssl is enabled on unit '
'({})'.format(sentry_unit.info['unit_name']))
return None
def configure_rmq_ssl_on(self, sentry_units, deployment,
port=None, max_wait=60):
"""Turn ssl charm config option on, with optional non-default
ssl port specification. Confirm that it is enabled on every
unit.
:param sentry_units: list of sentry units
:param deployment: amulet deployment object pointer
:param port: amqp port, use defaults if None
:param max_wait: maximum time to wait in seconds to confirm
:returns: None if successful. Raise on error.
"""
self.log.debug('Setting ssl charm config option: on')
# Enable RMQ SSL
config = {'ssl': 'on'}
if port:
config['ssl_port'] = port
deployment.d.configure('rabbitmq-server', config)
# Wait for unit status
self.rmq_wait_for_cluster(deployment)
# Confirm
tries = 0
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
while ret and tries < (max_wait / 4):
time.sleep(4)
self.log.debug('Attempt {}: {}'.format(tries, ret))
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
tries += 1
if ret:
amulet.raise_status(amulet.FAIL, ret)
def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
"""Turn ssl charm config option off, confirm that it is disabled
on every unit.
:param sentry_units: list of sentry units
:param deployment: amulet deployment object pointer
:param max_wait: maximum time to wait in seconds to confirm
:returns: None if successful. Raise on error.
"""
self.log.debug('Setting ssl charm config option: off')
# Disable RMQ SSL
config = {'ssl': 'off'}
deployment.d.configure('rabbitmq-server', config)
# Wait for unit status
self.rmq_wait_for_cluster(deployment)
# Confirm
tries = 0
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
while ret and tries < (max_wait / 4):
time.sleep(4)
self.log.debug('Attempt {}: {}'.format(tries, ret))
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
tries += 1
if ret:
amulet.raise_status(amulet.FAIL, ret)
def connect_amqp_by_unit(self, sentry_unit, ssl=False,
port=None, fatal=True,
username="testuser1", password="changeme"):
"""Establish and return a pika amqp connection to the rabbitmq service
running on a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:param fatal: boolean, default to True (raises on connect error)
:param username: amqp user name, default to testuser1
:param password: amqp user password
:returns: pika amqp connection pointer or None if failed and non-fatal
"""
host = sentry_unit.info['public-address']
unit_name = sentry_unit.info['unit_name']
# Default port logic if port is not specified
if ssl and not port:
port = 5671
elif not ssl and not port:
port = 5672
self.log.debug('Connecting to amqp on {}:{} ({}) as '
'{}...'.format(host, port, unit_name, username))
try:
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(host=host, port=port,
credentials=credentials,
ssl=ssl,
connection_attempts=3,
retry_delay=5,
socket_timeout=1)
connection = pika.BlockingConnection(parameters)
assert connection.server_properties['product'] == 'RabbitMQ'
self.log.debug('Connect OK')
return connection
except Exception as e:
msg = ('amqp connection failed to {}:{} as '
'{} ({})'.format(host, port, username, str(e)))
if fatal:
amulet.raise_status(amulet.FAIL, msg)
else:
self.log.warn(msg)
return None
def publish_amqp_message_by_unit(self, sentry_unit, message,
queue="test", ssl=False,
username="testuser1",
password="changeme",
port=None):
"""Publish an amqp message to a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param message: amqp message string
:param queue: message queue, default to test
:param username: amqp user name, default to testuser1
:param password: amqp user password
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:returns: None. Raises exception if publish failed.
"""
self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
message))
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
port=port,
username=username,
password=password)
# NOTE(beisner): extra debug here re: pika hang potential:
# https://github.com/pika/pika/issues/297
# https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
self.log.debug('Defining channel...')
channel = connection.channel()
self.log.debug('Declaring queue...')
channel.queue_declare(queue=queue, auto_delete=False, durable=True)
self.log.debug('Publishing message...')
channel.basic_publish(exchange='', routing_key=queue, body=message)
self.log.debug('Closing channel...')
channel.close()
self.log.debug('Closing connection...')
connection.close()
def get_amqp_message_by_unit(self, sentry_unit, queue="test",
username="testuser1",
password="changeme",
ssl=False, port=None):
"""Get an amqp message from a rmq juju unit.
:param sentry_unit: sentry unit pointer
:param queue: message queue, default to test
:param username: amqp user name, default to testuser1
:param password: amqp user password
:param ssl: boolean, default to False
:param port: amqp port, use defaults if None
:returns: amqp message body as string. Raise if get fails.
"""
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
port=port,
username=username,
password=password)
channel = connection.channel()
method_frame, _, body = channel.basic_get(queue)
if method_frame:
self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
body))
channel.basic_ack(method_frame.delivery_tag)
channel.close()
connection.close()
return body
else:
msg = 'No message retrieved.'
amulet.raise_status(amulet.FAIL, msg)

20
tests/tests.yaml Normal file
View File

@ -0,0 +1,20 @@
bootstrap: true
reset: true
virtualenv: true
makefile:
- lint
- test
sources:
- ppa:juju/stable
packages:
- amulet
- distro-info-data
- python-cinderclient
- python-distro-info
- python-glanceclient
- python-heatclient
- python-keystoneclient
- python-neutronclient
- python-novaclient
- python-pika
- python-swiftclient