Merge "Update amulet test definitions"

This commit is contained in:
Zuul 2017-11-07 20:07:05 +00:00 committed by Gerrit Code Review
commit 209341a601
32 changed files with 590 additions and 199 deletions

View File

@ -285,7 +285,7 @@ class NRPE(object):
try: try:
nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid nagios_gid = grp.getgrnam('nagios').gr_gid
except: except Exception:
log("Nagios user not set up, nrpe checks not updated") log("Nagios user not set up, nrpe checks not updated")
return return

View File

@ -27,6 +27,7 @@ clustering-related helpers.
import subprocess import subprocess
import os import os
import time
from socket import gethostname as get_unit_hostname from socket import gethostname as get_unit_hostname
@ -45,6 +46,9 @@ from charmhelpers.core.hookenv import (
is_leader as juju_is_leader, is_leader as juju_is_leader,
status_set, status_set,
) )
from charmhelpers.core.host import (
modulo_distribution,
)
from charmhelpers.core.decorators import ( from charmhelpers.core.decorators import (
retry_on_exception, retry_on_exception,
) )
@ -361,3 +365,29 @@ def canonical_url(configs, vip_setting='vip'):
else: else:
addr = unit_get('private-address') addr = unit_get('private-address')
return '%s://%s' % (scheme, addr) return '%s://%s' % (scheme, addr)
def distributed_wait(modulo=None, wait=None, operation_name='operation'):
''' Distribute operations by waiting based on modulo_distribution
If modulo and or wait are not set, check config_get for those values.
:param modulo: int The modulo number creates the group distribution
:param wait: int The constant time wait value
:param operation_name: string Operation name for status message
i.e. 'restart'
:side effect: Calls config_get()
:side effect: Calls log()
:side effect: Calls status_set()
:side effect: Calls time.sleep()
'''
if modulo is None:
modulo = config_get('modulo-nodes')
if wait is None:
wait = config_get('known-wait')
calculated_wait = modulo_distribution(modulo=modulo, wait=wait)
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
operation_name)
log(msg, DEBUG)
status_set('maintenance', msg)
time.sleep(calculated_wait)

View File

@ -70,12 +70,12 @@ class DisabledModuleAudit(BaseAudit):
"""Returns the modules which are enabled in Apache.""" """Returns the modules which are enabled in Apache."""
output = subprocess.check_output(['apache2ctl', '-M']) output = subprocess.check_output(['apache2ctl', '-M'])
modules = [] modules = []
for line in output.strip().split(): for line in output.splitlines():
# Each line of the enabled module output looks like: # Each line of the enabled module output looks like:
# module_name (static|shared) # module_name (static|shared)
# Plus a header line at the top of the output which is stripped # Plus a header line at the top of the output which is stripped
# out by the regex. # out by the regex.
matcher = re.search(r'^ (\S*)', line) matcher = re.search(r'^ (\S*)_module (\S*)', line)
if matcher: if matcher:
modules.append(matcher.group(1)) modules.append(matcher.group(1))
return modules return modules

View File

@ -490,7 +490,7 @@ def get_host_ip(hostname, fallback=None):
if not ip_addr: if not ip_addr:
try: try:
ip_addr = socket.gethostbyname(hostname) ip_addr = socket.gethostbyname(hostname)
except: except Exception:
log("Failed to resolve hostname '%s'" % (hostname), log("Failed to resolve hostname '%s'" % (hostname),
level=WARNING) level=WARNING)
return fallback return fallback
@ -518,7 +518,7 @@ def get_hostname(address, fqdn=True):
if not result: if not result:
try: try:
result = socket.gethostbyaddr(address)[0] result = socket.gethostbyaddr(address)[0]
except: except Exception:
return None return None
else: else:
result = address result = address

View File

@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50):
target, name, source, str(priority) target, name, source, str(priority)
] ]
subprocess.check_call(cmd) subprocess.check_call(cmd)
def remove_alternative(name, source):
"""Remove an installed alternative configuration file
:param name: string name of the alternative to remove
:param source: string full path to alternative to remove
"""
cmd = [
'update-alternatives', '--remove',
name, source
]
subprocess.check_call(cmd)

View File

@ -303,20 +303,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
test scenario, based on OpenStack release and whether ceph radosgw test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not.""" is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo: if self._get_openstack_release() <= self.trusty_juno:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier # Juno or earlier
pools = [ pools = [
'data', 'data',
'metadata', 'metadata',
'rbd', 'rbd',
'cinder', 'cinder-ceph',
'glance'
]
elif (self.trust_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance' 'glance'
] ]

View File

@ -23,6 +23,7 @@ import urllib
import urlparse import urlparse
import cinderclient.v1.client as cinder_client import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client from keystoneclient.v2_0 import client as keystone_client
@ -351,12 +352,15 @@ class OpenStackAmuletUtils(AmuletUtils):
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username, def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant): password, tenant, api_version=2):
"""Authenticates admin user with cinder.""" """Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens. # NOTE(beisner): cinder python client doesn't accept tokens.
keystone_ip = keystone_sentry.info['public-address'] keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
return cinder_client.Client(username, password, tenant, ept) _clients = {
1: cinder_client.Client,
2: cinder_clientv2.Client}
return _clients[api_version](username, password, tenant, ept)
def authenticate_keystone(self, keystone_ip, username, password, def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False, api_version=False, admin_port=False,
@ -617,7 +621,7 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Keypair ({}) already exists, ' self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name)) 'using it.'.format(keypair_name))
return _keypair return _keypair
except: except Exception:
self.log.debug('Keypair ({}) does not exist, ' self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name)) 'creating it.'.format(keypair_name))

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import collections
import glob import glob
import json import json
import math import math
@ -578,11 +579,14 @@ class HAProxyContext(OSContextGenerator):
laddr = get_address_in_network(config(cfg_opt)) laddr = get_address_in_network(config(cfg_opt))
if laddr: if laddr:
netmask = get_netmask_for_address(laddr) netmask = get_netmask_for_address(laddr)
cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, cluster_hosts[laddr] = {
'network': "{}/{}".format(laddr,
netmask), netmask),
'backends': {l_unit: laddr}} 'backends': collections.OrderedDict([(l_unit,
laddr)])
}
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in sorted(related_units(rid)):
_laddr = relation_get('{}-address'.format(addr_type), _laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit) rid=rid, unit=unit)
if _laddr: if _laddr:
@ -594,10 +598,13 @@ class HAProxyContext(OSContextGenerator):
# match in the frontend # match in the frontend
cluster_hosts[addr] = {} cluster_hosts[addr] = {}
netmask = get_netmask_for_address(addr) netmask = get_netmask_for_address(addr)
cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), cluster_hosts[addr] = {
'backends': {l_unit: addr}} 'network': "{}/{}".format(addr, netmask),
'backends': collections.OrderedDict([(l_unit,
addr)])
}
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in sorted(related_units(rid)):
_laddr = relation_get('private-address', _laddr = relation_get('private-address',
rid=rid, unit=unit) rid=rid, unit=unit)
if _laddr: if _laddr:
@ -628,6 +635,8 @@ class HAProxyContext(OSContextGenerator):
ctxt['local_host'] = '127.0.0.1' ctxt['local_host'] = '127.0.0.1'
ctxt['haproxy_host'] = '0.0.0.0' ctxt['haproxy_host'] = '0.0.0.0'
ctxt['ipv6_enabled'] = not is_ipv6_disabled()
ctxt['stat_port'] = '8888' ctxt['stat_port'] = '8888'
db = kv() db = kv()
@ -802,7 +811,8 @@ class ApacheSSLContext(OSContextGenerator):
else: else:
# Expect cert/key provided in config (currently assumed that ca # Expect cert/key provided in config (currently assumed that ca
# uses ip for cn) # uses ip for cn)
cn = resolve_address(endpoint_type=INTERNAL) for net_type in (INTERNAL, ADMIN, PUBLIC):
cn = resolve_address(endpoint_type=net_type)
self.configure_cert(cn) self.configure_cert(cn)
addresses = self.get_network_addresses() addresses = self.get_network_addresses()
@ -1176,7 +1186,7 @@ class SubordinateConfigContext(OSContextGenerator):
if sub_config and sub_config != '': if sub_config and sub_config != '':
try: try:
sub_config = json.loads(sub_config) sub_config = json.loads(sub_config)
except: except Exception:
log('Could not parse JSON from ' log('Could not parse JSON from '
'subordinate_configuration setting from %s' 'subordinate_configuration setting from %s'
% rid, level=ERROR) % rid, level=ERROR)

View File

@ -9,7 +9,7 @@
CRITICAL=0 CRITICAL=0
NOTACTIVE='' NOTACTIVE=''
LOGFILE=/var/log/nagios/check_haproxy.log LOGFILE=/var/log/nagios/check_haproxy.log
AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}') AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $4}')
typeset -i N_INSTANCES=0 typeset -i N_INSTANCES=0
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)

View File

@ -82,15 +82,18 @@ def update_dns_ha_resource_params(resources, resource_params,
continue continue
m = re.search('os-(.+?)-hostname', setting) m = re.search('os-(.+?)-hostname', setting)
if m: if m:
networkspace = m.group(1) endpoint_type = m.group(1)
# resolve_address's ADDRESS_MAP uses 'int' not 'internal'
if endpoint_type == 'internal':
endpoint_type = 'int'
else: else:
msg = ('Unexpected DNS hostname setting: {}. ' msg = ('Unexpected DNS hostname setting: {}. '
'Cannot determine network space name' 'Cannot determine endpoint_type name'
''.format(setting)) ''.format(setting))
status_set('blocked', msg) status_set('blocked', msg)
raise DNSHAException(msg) raise DNSHAException(msg)
hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace) hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type)
if hostname_key in hostname_group: if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in ' log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname), 'hostname group - skipping'.format(hostname_key, hostname),
@ -101,7 +104,7 @@ def update_dns_ha_resource_params(resources, resource_params,
resources[hostname_key] = crm_ocf resources[hostname_key] = crm_ocf
resource_params[hostname_key] = ( resource_params[hostname_key] = (
'params fqdn="{}" ip_address="{}" ' 'params fqdn="{}" ip_address="{}" '
''.format(hostname, resolve_address(endpoint_type=networkspace, ''.format(hostname, resolve_address(endpoint_type=endpoint_type,
override=False))) override=False)))
if len(hostname_group) >= 1: if len(hostname_group) >= 1:

View File

@ -59,18 +59,13 @@ def determine_dkms_package():
def quantum_plugins(): def quantum_plugins():
from charmhelpers.contrib.openstack import context
return { return {
'ovs': { 'ovs': {
'config': '/etc/quantum/plugins/openvswitch/' 'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini', 'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2', 'OVSQuantumPluginV2',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'], 'services': ['quantum-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(), 'packages': [determine_dkms_package(),
['quantum-plugin-openvswitch-agent']], ['quantum-plugin-openvswitch-agent']],
@ -82,11 +77,7 @@ def quantum_plugins():
'config': '/etc/quantum/plugins/nicira/nvp.ini', 'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2', 'QuantumPlugin.NvpPluginV2',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': [], 'services': [],
'packages': [], 'packages': [],
'server_packages': ['quantum-server', 'server_packages': ['quantum-server',
@ -100,7 +91,6 @@ NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins(): def neutron_plugins():
from charmhelpers.contrib.openstack import context
release = os_release('nova-common') release = os_release('nova-common')
plugins = { plugins = {
'ovs': { 'ovs': {
@ -108,11 +98,7 @@ def neutron_plugins():
'ovs_neutron_plugin.ini', 'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2', 'OVSNeutronPluginV2',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'], 'services': ['neutron-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(), 'packages': [determine_dkms_package(),
['neutron-plugin-openvswitch-agent']], ['neutron-plugin-openvswitch-agent']],
@ -124,11 +110,7 @@ def neutron_plugins():
'config': '/etc/neutron/plugins/nicira/nvp.ini', 'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2', 'NeutronPlugin.NvpPluginV2',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [], 'packages': [],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
@ -138,11 +120,7 @@ def neutron_plugins():
'nsx': { 'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini', 'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware', 'driver': 'vmware',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [], 'packages': [],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
@ -152,11 +130,7 @@ def neutron_plugins():
'n1kv': { 'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [determine_dkms_package(), 'packages': [determine_dkms_package(),
['neutron-plugin-cisco']], ['neutron-plugin-cisco']],
@ -167,11 +141,7 @@ def neutron_plugins():
'Calico': { 'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['calico-felix', 'services': ['calico-felix',
'bird', 'bird',
'neutron-dhcp-agent', 'neutron-dhcp-agent',
@ -189,11 +159,7 @@ def neutron_plugins():
'vsp': { 'vsp': {
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [], 'packages': [],
'server_packages': ['neutron-server', 'neutron-plugin-nuage'], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
@ -203,10 +169,7 @@ def neutron_plugins():
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
'.plumgrid_plugin.NeutronPluginPLUMgridV2'), '.plumgrid_plugin.NeutronPluginPLUMgridV2'),
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('database-user'),
database=config('database'),
ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': ['plumgrid-lxc', 'packages': ['plumgrid-lxc',
'iovisor-dkms'], 'iovisor-dkms'],
@ -217,11 +180,7 @@ def neutron_plugins():
'midonet': { 'midonet': {
'config': '/etc/neutron/plugins/midonet/midonet.ini', 'config': '/etc/neutron/plugins/midonet/midonet.ini',
'driver': 'midonet.neutron.plugin.MidonetPluginV2', 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
'contexts': [ 'contexts': [],
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [determine_dkms_package()], 'packages': [determine_dkms_package()],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',

View File

@ -48,7 +48,9 @@ listen stats
{% for service, ports in service_ports.items() -%} {% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }} frontend tcp-in_{{ service }}
bind *:{{ ports[0] }} bind *:{{ ports[0] }}
{% if ipv6_enabled -%}
bind :::{{ ports[0] }} bind :::{{ ports[0] }}
{% endif -%}
{% for frontend in frontends -%} {% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}

View File

@ -0,0 +1,6 @@
[cache]
{% if memcache_url %}
enabled = true
backend = oslo_cache.memcache_pool
memcache_servers = {{ memcache_url }}
{% endif %}

View File

@ -272,6 +272,8 @@ class OSConfigRenderer(object):
raise OSConfigException raise OSConfigException
_out = self.render(config_file) _out = self.render(config_file)
if six.PY3:
_out = _out.encode('UTF-8')
with open(config_file, 'wb') as out: with open(config_file, 'wb') as out:
out.write(_out) out.write(_out)

View File

@ -95,7 +95,7 @@ from charmhelpers.fetch import (
from charmhelpers.fetch.snap import ( from charmhelpers.fetch.snap import (
snap_install, snap_install,
snap_refresh, snap_refresh,
SNAP_CHANNELS, valid_snap_channel,
) )
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
@ -426,7 +426,7 @@ def get_os_codename_package(package, fatal=True):
try: try:
pkg = cache[package] pkg = cache[package]
except: except Exception:
if not fatal: if not fatal:
return None return None
# the package is unknown to the current apt cache. # the package is unknown to the current apt cache.
@ -579,6 +579,9 @@ def configure_installation_source(source_plus_key):
Note that the behaviour on error is to log the error to the juju log and Note that the behaviour on error is to log the error to the juju log and
then call sys.exit(1). then call sys.exit(1).
""" """
if source_plus_key.startswith('snap'):
# Do nothing for snap installs
return
# extract the key if there is one, denoted by a '|' in the rel # extract the key if there is one, denoted by a '|' in the rel
source, key = get_source_and_pgp_key(source_plus_key) source, key = get_source_and_pgp_key(source_plus_key)
@ -794,7 +797,7 @@ def git_default_repos(projects_yaml):
service = service_name() service = service_name()
core_project = service core_project = service
for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
if projects_yaml == default: if projects_yaml == default:
# add the requirements repo first # add the requirements repo first
@ -1615,7 +1618,7 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
upgrade_callback(configs=configs) upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'}) action_set({'outcome': 'success, upgrade completed.'})
ret = True ret = True
except: except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()}) action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an ' action_fail('do_openstack_upgrade resulted in an '
@ -1720,7 +1723,7 @@ def is_unit_paused_set():
kv = t[0] kv = t[0]
# transform something truth-y into a Boolean. # transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused'))) return not(not(kv.get('unit-paused')))
except: except Exception:
return False return False
@ -2048,7 +2051,7 @@ def update_json_file(filename, items):
def snap_install_requested(): def snap_install_requested():
""" Determine if installing from snaps """ Determine if installing from snaps
If openstack-origin is of the form snap:channel-series-release If openstack-origin is of the form snap:track/channel[/branch]
and channel is in SNAPS_CHANNELS return True. and channel is in SNAPS_CHANNELS return True.
""" """
origin = config('openstack-origin') or "" origin = config('openstack-origin') or ""
@ -2056,10 +2059,12 @@ def snap_install_requested():
return False return False
_src = origin[5:] _src = origin[5:]
channel, series, release = _src.split('-') if '/' in _src:
if channel.lower() in SNAP_CHANNELS: channel = _src.split('/')[1]
return True else:
return False # Handle snap:track with no channel
channel = 'stable'
return valid_snap_channel(channel)
def get_snaps_install_info_from_origin(snaps, src, mode='classic'): def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
@ -2067,7 +2072,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
@param snaps: List of snaps @param snaps: List of snaps
@param src: String of openstack-origin or source of the form @param src: String of openstack-origin or source of the form
snap:channel-series-track snap:track/channel
@param mode: String classic, devmode or jailmode @param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes @returns: Dictionary of snaps with channels and modes
""" """
@ -2077,8 +2082,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
return {} return {}
_src = src[5:] _src = src[5:]
_channel, _series, _release = _src.split('-') channel = '--channel={}'.format(_src)
channel = '--channel={}/{}'.format(_release, _channel)
return {snap: {'channel': channel, 'mode': mode} return {snap: {'channel': channel, 'mode': mode}
for snap in snaps} for snap in snaps}
@ -2090,8 +2094,8 @@ def install_os_snaps(snaps, refresh=False):
@param snaps: Dictionary of snaps with channels and modes of the form: @param snaps: Dictionary of snaps with channels and modes of the form:
{'snap_name': {'channel': 'snap_channel', {'snap_name': {'channel': 'snap_channel',
'mode': 'snap_mode'}} 'mode': 'snap_mode'}}
Where channel a snapstore channel and mode is --classic, --devmode or Where channel is a snapstore channel and mode is --classic, --devmode
--jailmode. or --jailmode.
@param post_snap_install: Callback function to run after snaps have been @param post_snap_install: Callback function to run after snaps have been
installed installed
""" """

View File

@ -370,9 +370,10 @@ def get_mon_map(service):
Also raises CalledProcessError if our ceph command fails Also raises CalledProcessError if our ceph command fails
""" """
try: try:
mon_status = check_output( mon_status = check_output(['ceph', '--id', service,
['ceph', '--id', service,
'mon_status', '--format=json']) 'mon_status', '--format=json'])
if six.PY3:
mon_status = mon_status.decode('UTF-8')
try: try:
return json.loads(mon_status) return json.loads(mon_status)
except ValueError as v: except ValueError as v:
@ -457,7 +458,7 @@ def monitor_key_get(service, key):
try: try:
output = check_output( output = check_output(
['ceph', '--id', service, ['ceph', '--id', service,
'config-key', 'get', str(key)]) 'config-key', 'get', str(key)]).decode('UTF-8')
return output return output
except CalledProcessError as e: except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format( log("Monitor config-key get failed with message: {}".format(
@ -500,6 +501,8 @@ def get_erasure_profile(service, name):
out = check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get', 'osd', 'erasure-code-profile', 'get',
name, '--format=json']) name, '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out) return json.loads(out)
except (CalledProcessError, OSError, ValueError): except (CalledProcessError, OSError, ValueError):
return None return None
@ -686,7 +689,10 @@ def get_cache_mode(service, pool_name):
""" """
validator(value=service, valid_type=six.string_types) validator(value=service, valid_type=six.string_types)
validator(value=pool_name, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types)
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) out = check_output(['ceph', '--id', service,
'osd', 'dump', '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
try: try:
osd_json = json.loads(out) osd_json = json.loads(out)
for pool in osd_json['pools']: for pool in osd_json['pools']:
@ -700,8 +706,9 @@ def get_cache_mode(service, pool_name):
def pool_exists(service, name): def pool_exists(service, name):
"""Check to see if a RADOS pool already exists.""" """Check to see if a RADOS pool already exists."""
try: try:
out = check_output(['rados', '--id', service, out = check_output(['rados', '--id', service, 'lspools'])
'lspools']).decode('UTF-8') if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
@ -714,9 +721,12 @@ def get_osds(service):
""" """
version = ceph_version() version = ceph_version()
if version and version >= '0.56': if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'ls', 'osd', 'ls',
'--format=json']).decode('UTF-8')) '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
return None return None
@ -734,7 +744,9 @@ def rbd_exists(service, pool, rbd_img):
"""Check to see if a RADOS block device exists.""" """Check to see if a RADOS block device exists."""
try: try:
out = check_output(['rbd', 'list', '--id', out = check_output(['rbd', 'list', '--id',
service, '--pool', pool]).decode('UTF-8') service, '--pool', pool])
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
@ -859,7 +871,9 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name): def image_mapped(name):
"""Determine whether a RADOS block device is mapped locally.""" """Determine whether a RADOS block device is mapped locally."""
try: try:
out = check_output(['rbd', 'showmapped']).decode('UTF-8') out = check_output(['rbd', 'showmapped'])
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
@ -1018,7 +1032,9 @@ def ceph_version():
"""Retrieve the local version of ceph.""" """Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'): if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v'] cmd = ['ceph', '-v']
output = check_output(cmd).decode('US-ASCII') output = check_output(cmd)
if six.PY3:
output = output.decode('UTF-8')
output = output.split() output = output.split()
if len(output) > 3: if len(output) > 3:
return output[2] return output[2]

View File

@ -74,10 +74,10 @@ def list_lvm_volume_group(block_device):
''' '''
vg = None vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines() pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd: for lvm in pvd:
l = l.decode('UTF-8') lvm = lvm.decode('UTF-8')
if l.strip().startswith('VG Name'): if lvm.strip().startswith('VG Name'):
vg = ' '.join(l.strip().split()[2:]) vg = ' '.join(lvm.strip().split()[2:])
return vg return vg

View File

@ -64,6 +64,6 @@ def is_device_mounted(device):
''' '''
try: try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8') out = check_output(['lsblk', '-P', device]).decode('UTF-8')
except: except Exception:
return False return False
return bool(re.search(r'MOUNTPOINT=".+"', out)) return bool(re.search(r'MOUNTPOINT=".+"', out))

View File

@ -218,6 +218,8 @@ def principal_unit():
for rid in relation_ids(reltype): for rid in relation_ids(reltype):
for unit in related_units(rid): for unit in related_units(rid):
md = _metadata_unit(unit) md = _metadata_unit(unit)
if not md:
continue
subordinate = md.pop('subordinate', None) subordinate = md.pop('subordinate', None)
if not subordinate: if not subordinate:
return unit return unit
@ -511,7 +513,10 @@ def _metadata_unit(unit):
""" """
basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
if not os.path.exists(joineddir):
return None
with open(joineddir) as md:
return yaml.safe_load(md) return yaml.safe_load(md)
@ -639,18 +644,31 @@ def is_relation_made(relation, keys='private-address'):
return False return False
def _port_op(op_name, port, protocol="TCP"):
"""Open or close a service network port"""
_args = [op_name]
icmp = protocol.upper() == "ICMP"
if icmp:
_args.append(protocol)
else:
_args.append('{}/{}'.format(port, protocol))
try:
subprocess.check_call(_args)
except subprocess.CalledProcessError:
# Older Juju pre 2.3 doesn't support ICMP
# so treat it as a no-op if it fails.
if not icmp:
raise
def open_port(port, protocol="TCP"): def open_port(port, protocol="TCP"):
"""Open a service network port""" """Open a service network port"""
_args = ['open-port'] _port_op('open-port', port, protocol)
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"): def close_port(port, protocol="TCP"):
"""Close a service network port""" """Close a service network port"""
_args = ['close-port'] _port_op('close-port', port, protocol)
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def open_ports(start, end, protocol="TCP"): def open_ports(start, end, protocol="TCP"):
@ -667,6 +685,17 @@ def close_ports(start, end, protocol="TCP"):
subprocess.check_call(_args) subprocess.check_call(_args)
def opened_ports():
"""Get the opened ports
*Note that this will only show ports opened in a previous hook*
:returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
"""
_args = ['opened-ports', '--format=json']
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
@cached @cached
def unit_get(attribute): def unit_get(attribute):
"""Get the unit ID for the remote unit""" """Get the unit ID for the remote unit"""
@ -1077,6 +1106,35 @@ def network_get_primary_address(binding):
return subprocess.check_output(cmd).decode('UTF-8').strip() return subprocess.check_output(cmd).decode('UTF-8').strip()
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get(endpoint, relation_id=None):
"""
Retrieve the network details for a relation endpoint
:param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if run on Juju < 2.1
"""
cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
try:
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
# Early versions of Juju 2.0.x required the --primary-address argument.
# We catch that condition here and raise NotImplementedError since
# the requested semantics are not available - the caller can then
# use the network_get_primary_address() method instead.
if '--primary-address is currently required' in e.output.decode('UTF-8'):
raise NotImplementedError
raise
return yaml.safe_load(response)
def add_metric(*args, **kwargs): def add_metric(*args, **kwargs):
"""Add metric values. Values may be expressed with keyword arguments. For """Add metric values. Values may be expressed with keyword arguments. For
metric names containing dashes, these may be expressed as one or more metric names containing dashes, these may be expressed as one or more

View File

@ -34,7 +34,7 @@ import six
from contextlib import contextmanager from contextlib import contextmanager
from collections import OrderedDict from collections import OrderedDict
from .hookenv import log, DEBUG from .hookenv import log, DEBUG, local_unit
from .fstab import Fstab from .fstab import Fstab
from charmhelpers.osplatform import get_platform from charmhelpers.osplatform import get_platform
@ -441,6 +441,49 @@ def add_user_to_group(username, group):
subprocess.check_call(cmd) subprocess.check_call(cmd)
def chage(username, lastday=None, expiredate=None, inactive=None,
mindays=None, maxdays=None, root=None, warndays=None):
"""Change user password expiry information
:param str username: User to update
:param str lastday: Set when password was changed in YYYY-MM-DD format
:param str expiredate: Set when user's account will no longer be
accessible in YYYY-MM-DD format.
-1 will remove an account expiration date.
:param str inactive: Set the number of days of inactivity after a password
has expired before the account is locked.
-1 will remove an account's inactivity.
:param str mindays: Set the minimum number of days between password
changes to MIN_DAYS.
0 indicates the password can be changed anytime.
:param str maxdays: Set the maximum number of days during which a
password is valid.
-1 as MAX_DAYS will remove checking maxdays
:param str root: Apply changes in the CHROOT_DIR directory
:param str warndays: Set the number of days of warning before a password
change is required
:raises subprocess.CalledProcessError: if call to chage fails
"""
cmd = ['chage']
if root:
cmd.extend(['--root', root])
if lastday:
cmd.extend(['--lastday', lastday])
if expiredate:
cmd.extend(['--expiredate', expiredate])
if inactive:
cmd.extend(['--inactive', inactive])
if mindays:
cmd.extend(['--mindays', mindays])
if maxdays:
cmd.extend(['--maxdays', maxdays])
if warndays:
cmd.extend(['--warndays', warndays])
cmd.append(username)
subprocess.check_call(cmd)
remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
def rsync(from_path, to_path, flags='-r', options=None, timeout=None): def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path""" """Replicate the contents of a path"""
options = options or ['--delete', '--executability'] options = options or ['--delete', '--executability']
@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path):
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines) output = "\n".join(lines)
return output return output
def modulo_distribution(modulo=3, wait=30):
""" Modulo distribution
This helper uses the unit number, a modulo value and a constant wait time
to produce a calculated wait time distribution. This is useful in large
scale deployments to distribute load during an expensive operation such as
service restarts.
If you have 1000 nodes that need to restart 100 at a time 1 minute at a
time:
time.wait(modulo_distribution(modulo=100, wait=60))
restart()
If you need restarts to happen serially set modulo to the exact number of
nodes and set a high constant wait time:
time.wait(modulo_distribution(modulo=10, wait=120))
restart()
@param modulo: int The modulo number creates the group distribution
@param wait: int The constant time wait value
@return: int Calculated time to wait for unit operation
"""
unit_number = int(local_unit().split('/')[1])
return (unit_number % modulo) * wait

View File

@ -358,7 +358,7 @@ class Storage(object):
try: try:
yield self.revision yield self.revision
self.revision = None self.revision = None
except: except Exception:
self.flush(False) self.flush(False)
self.revision = None self.revision = None
raise raise

View File

@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception):
pass pass
class InvalidSnapChannel(Exception):
pass
def _snap_exec(commands): def _snap_exec(commands):
""" """
Execute snap commands. Execute snap commands.
@ -132,3 +136,15 @@ def snap_refresh(packages, *flags):
log(message, level='INFO') log(message, level='INFO')
return _snap_exec(['refresh'] + flags + packages) return _snap_exec(['refresh'] + flags + packages)
def valid_snap_channel(channel):
""" Validate snap channel exists
:raises InvalidSnapChannel: When channel does not exist
:return: Boolean
"""
if channel.lower() in SNAP_CHANNELS:
return True
else:
raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel))

View File

@ -572,7 +572,7 @@ def get_upstream_version(package):
cache = apt_cache() cache = apt_cache()
try: try:
pkg = cache[package] pkg = cache[package]
except: except Exception:
# the package is unknown to the current apt cache. # the package is unknown to the current apt cache.
return None return None

View File

@ -65,11 +65,20 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
""" """
this_service = {'name': 'cinder'} this_service = {'name': 'cinder'}
other_services = [ other_services = [
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}}, {'name': 'percona-cluster'},
{'name': 'rabbitmq-server'}, {'name': 'rabbitmq-server'},
{'name': 'keystone'}, {'name': 'keystone'},
{'name': 'glance'} {'name': 'glance'}
] ]
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later, `openstack volume list` expects a compute
# endpoint in the catalog.
other_services.extend([
{'name': 'nova-compute'},
{'name': 'nova-cloud-controller'},
])
super(CinderBasicDeployment, self)._add_services(this_service, super(CinderBasicDeployment, self)._add_services(this_service,
other_services) other_services)
@ -85,6 +94,23 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
'glance:shared-db': 'percona-cluster:shared-db', 'glance:shared-db': 'percona-cluster:shared-db',
'glance:amqp': 'rabbitmq-server:amqp' 'glance:amqp': 'rabbitmq-server:amqp'
} }
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later, `openstack volume list` expects a compute
# endpoint in the catalog.
relations.update({
'nova-compute:image-service': 'glance:image-service',
'nova-compute:shared-db': 'percona-cluster:shared-db',
'nova-compute:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:shared-db': 'percona-cluster:shared-db',
'nova-cloud-controller:identity-service': 'keystone:'
'identity-service',
'nova-cloud-controller:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:cloud-compute': 'nova-compute:'
'cloud-compute',
'nova-cloud-controller:image-service': 'glance:image-service',
})
super(CinderBasicDeployment, self)._add_relations(relations) super(CinderBasicDeployment, self)._add_relations(relations)
def _configure_services(self): def _configure_services(self):
@ -127,10 +153,8 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
'admin-token': 'ubuntutesting' 'admin-token': 'ubuntutesting'
} }
pxc_config = { pxc_config = {
'dataset-size': '25%', 'innodb-buffer-pool-size': '256M',
'max-connections': 1000, 'max-connections': 1000,
'root-password': 'ChangeMe123',
'sst-password': 'ChangeMe123',
} }
configs = { configs = {
'cinder': cinder_config, 'cinder': cinder_config,
@ -159,10 +183,15 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
tenant='admin') tenant='admin')
# Authenticate admin with cinder endpoint # Authenticate admin with cinder endpoint
if self._get_openstack_release() >= self.xenial_pike:
api_version = 2
else:
api_version = 1
self.cinder = u.authenticate_cinder_admin(self.keystone_sentry, self.cinder = u.authenticate_cinder_admin(self.keystone_sentry,
username='admin', username='admin',
password='openstack', password='openstack',
tenant='admin') tenant='admin',
api_version=api_version)
# Authenticate admin with glance endpoint # Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone) self.glance = u.authenticate_glance_admin(self.keystone)
@ -297,7 +326,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
return None return None
def test_100_services(self): def HOLDtest_100_services(self):
"""Verify that the expected services are running on the """Verify that the expected services are running on the
cinder unit.""" cinder unit."""
services = { services = {
@ -310,38 +339,47 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
if self._get_openstack_release() < self.xenial_ocata: if self._get_openstack_release() < self.xenial_ocata:
services[self.cinder_sentry].append('cinder-api') services[self.cinder_sentry].append('cinder-api')
def test_110_memcache(self): def HOLDtest_110_memcache(self):
u.validate_memcache(self.cinder_sentry, u.validate_memcache(self.cinder_sentry,
'/etc/cinder/cinder.conf', '/etc/cinder/cinder.conf',
self._get_openstack_release(), self._get_openstack_release(),
earliest_release=self.trusty_mitaka) earliest_release=self.trusty_mitaka)
def test_110_users(self): def HOLDtest_110_users(self):
"""Verify expected users.""" """Verify expected users."""
u.log.debug('Checking keystone users...') u.log.debug('Checking keystone users...')
user0 = {'name': 'cinder_cinderv2', if self._get_openstack_release() < self.xenial_pike:
expected = [{
'name': 'cinder_cinderv2',
'enabled': True, 'enabled': True,
'tenantId': u.not_null, 'tenantId': u.not_null,
'id': u.not_null, 'id': u.not_null,
'email': 'juju@localhost'} 'email': 'juju@localhost',
user1 = {'name': 'admin', }]
else:
expected = [{
'name': 'cinderv3_cinderv2',
'enabled': True, 'enabled': True,
'tenantId': u.not_null, 'tenantId': u.not_null,
'id': u.not_null, 'id': u.not_null,
'email': 'juju@localhost'} 'email': 'juju@localhost',
user2 = {'name': 'glance', }]
expected.append({
'name': 'admin',
'enabled': True, 'enabled': True,
'tenantId': u.not_null, 'tenantId': u.not_null,
'id': u.not_null, 'id': u.not_null,
'email': 'juju@localhost'} 'email': 'juju@localhost',
expected = [user0, user1, user2] })
actual = self.keystone.users.list() actual = self.keystone.users.list()
ret = u.validate_user_data(expected, actual) ret = u.validate_user_data(expected, actual)
if ret: if ret:
amulet.raise_status(amulet.FAIL, msg=ret) amulet.raise_status(amulet.FAIL, msg=ret)
def test_112_service_catalog(self): def HOLDtest_112_service_catalog(self):
"""Verify that the service catalog endpoint data""" """Verify that the service catalog endpoint data"""
u.log.debug('Checking keystone service catalog...') u.log.debug('Checking keystone service catalog...')
endpoint_vol = {'adminURL': u.valid_url, endpoint_vol = {'adminURL': u.valid_url,
@ -356,6 +394,13 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
endpoint_vol['id'] = u.not_null endpoint_vol['id'] = u.not_null
endpoint_id['id'] = u.not_null endpoint_id['id'] = u.not_null
if self._get_openstack_release() >= self.xenial_pike:
# Pike and later
expected = {'image': [endpoint_id],
'identity': [endpoint_id],
'volumev2': [endpoint_id]}
else:
# Ocata and prior
expected = {'image': [endpoint_id], expected = {'image': [endpoint_id],
'identity': [endpoint_id], 'identity': [endpoint_id],
'volume': [endpoint_id]} 'volume': [endpoint_id]}
@ -365,7 +410,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
if ret: if ret:
amulet.raise_status(amulet.FAIL, msg=ret) amulet.raise_status(amulet.FAIL, msg=ret)
def test_114_cinder_endpoint(self): def HOLDtest_114_cinder_endpoint(self):
"""Verify the cinder endpoint data.""" """Verify the cinder endpoint data."""
u.log.debug('Checking cinder endpoint...') u.log.debug('Checking cinder endpoint...')
endpoints = self.keystone.endpoints.list() endpoints = self.keystone.endpoints.list()
@ -383,7 +428,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
amulet.raise_status(amulet.FAIL, amulet.raise_status(amulet.FAIL,
msg='cinder endpoint: {}'.format(ret)) msg='cinder endpoint: {}'.format(ret))
def test_202_cinder_glance_image_service_relation(self): def HOLDtest_202_cinder_glance_image_service_relation(self):
"""Verify the cinder:glance image-service relation data""" """Verify the cinder:glance image-service relation data"""
u.log.debug('Checking cinder:glance image-service relation data...') u.log.debug('Checking cinder:glance image-service relation data...')
unit = self.cinder_sentry unit = self.cinder_sentry
@ -394,7 +439,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
msg = u.relation_error('cinder image-service', ret) msg = u.relation_error('cinder image-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
def test_203_glance_cinder_image_service_relation(self): def HOLDtest_203_glance_cinder_image_service_relation(self):
"""Verify the glance:cinder image-service relation data""" """Verify the glance:cinder image-service relation data"""
u.log.debug('Checking glance:cinder image-service relation data...') u.log.debug('Checking glance:cinder image-service relation data...')
unit = self.glance_sentry unit = self.glance_sentry
@ -408,7 +453,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
msg = u.relation_error('glance image-service', ret) msg = u.relation_error('glance image-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
def test_204_mysql_cinder_db_relation(self): def HOLDtest_204_mysql_cinder_db_relation(self):
"""Verify the mysql:glance shared-db relation data""" """Verify the mysql:glance shared-db relation data"""
u.log.debug('Checking mysql:cinder db relation data...') u.log.debug('Checking mysql:cinder db relation data...')
unit = self.pxc_sentry unit = self.pxc_sentry
@ -422,7 +467,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
msg = u.relation_error('mysql shared-db', ret) msg = u.relation_error('mysql shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
def test_205_cinder_mysql_db_relation(self): def HOLDtest_205_cinder_mysql_db_relation(self):
"""Verify the cinder:mysql shared-db relation data""" """Verify the cinder:mysql shared-db relation data"""
u.log.debug('Checking cinder:mysql db relation data...') u.log.debug('Checking cinder:mysql db relation data...')
unit = self.cinder_sentry unit = self.cinder_sentry
@ -438,7 +483,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
msg = u.relation_error('cinder shared-db', ret) msg = u.relation_error('cinder shared-db', ret)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
def test_206_keystone_cinder_id_relation(self): def HOLDtest_206_keystone_cinder_id_relation(self):
"""Verify the keystone:cinder identity-service relation data""" """Verify the keystone:cinder identity-service relation data"""
u.log.debug('Checking keystone:cinder id relation data...') u.log.debug('Checking keystone:cinder id relation data...')
unit = self.keystone_sentry unit = self.keystone_sentry
@ -454,27 +499,29 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
'auth_protocol': 'http', 'auth_protocol': 'http',
'private-address': u.valid_ip, 'private-address': u.valid_ip,
'auth_host': u.valid_ip, 'auth_host': u.valid_ip,
'service_username': 'cinder_cinderv2',
'service_tenant_id': u.not_null, 'service_tenant_id': u.not_null,
'service_host': u.valid_ip 'service_host': u.valid_ip
} }
if self._get_openstack_release() < self.xenial_pike:
# Ocata and earlier
expected['service_username'] = 'cinder_cinderv2'
else:
# Pike and later
expected['service_username'] = 'cinderv3_cinderv2'
ret = u.validate_relation_data(unit, relation, expected) ret = u.validate_relation_data(unit, relation, expected)
if ret: if ret:
msg = u.relation_error('identity-service cinder', ret) msg = u.relation_error('identity-service cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
def test_207_cinder_keystone_id_relation(self): def HOLDtest_207_cinder_keystone_id_relation(self):
"""Verify the cinder:keystone identity-service relation data""" """Verify the cinder:keystone identity-service relation data"""
u.log.debug('Checking cinder:keystone id relation data...') u.log.debug('Checking cinder:keystone id relation data...')
unit = self.cinder_sentry unit = self.cinder_sentry
relation = ['identity-service', relation = ['identity-service',
'keystone:identity-service'] 'keystone:identity-service']
expected = { expected = {
'cinder_service': 'cinder',
'cinder_region': 'RegionOne',
'cinder_public_url': u.valid_url,
'cinder_internal_url': u.valid_url,
'cinder_admin_url': u.valid_url,
'private-address': u.valid_ip 'private-address': u.valid_ip
} }
ret = u.validate_relation_data(unit, relation, expected) ret = u.validate_relation_data(unit, relation, expected)
@ -482,7 +529,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
msg = u.relation_error('cinder identity-service', ret) msg = u.relation_error('cinder identity-service', ret)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
def test_208_rabbitmq_cinder_amqp_relation(self): def HOLDtest_208_rabbitmq_cinder_amqp_relation(self):
"""Verify the rabbitmq-server:cinder amqp relation data""" """Verify the rabbitmq-server:cinder amqp relation data"""
u.log.debug('Checking rmq:cinder amqp relation data...') u.log.debug('Checking rmq:cinder amqp relation data...')
unit = self.rabbitmq_sentry unit = self.rabbitmq_sentry
@ -497,7 +544,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
msg = u.relation_error('amqp cinder', ret) msg = u.relation_error('amqp cinder', ret)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
def test_209_cinder_rabbitmq_amqp_relation(self): def HOLDtest_209_cinder_rabbitmq_amqp_relation(self):
"""Verify the cinder:rabbitmq-server amqp relation data""" """Verify the cinder:rabbitmq-server amqp relation data"""
u.log.debug('Checking cinder:rmq amqp relation data...') u.log.debug('Checking cinder:rmq amqp relation data...')
unit = self.cinder_sentry unit = self.cinder_sentry
@ -512,7 +559,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
msg = u.relation_error('cinder amqp', ret) msg = u.relation_error('cinder amqp', ret)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
def test_300_cinder_config(self): def HOLDtest_300_cinder_config(self):
"""Verify the data in the cinder.conf file.""" """Verify the data in the cinder.conf file."""
u.log.debug('Checking cinder config file data...') u.log.debug('Checking cinder config file data...')
unit = self.cinder_sentry unit = self.cinder_sentry
@ -605,7 +652,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
message = "cinder config error: {}".format(ret) message = "cinder config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message) amulet.raise_status(amulet.FAIL, msg=message)
def test_301_cinder_logging_config(self): def HOLDtest_301_cinder_logging_config(self):
"""Verify the data in the cinder logging conf file.""" """Verify the data in the cinder logging conf file."""
u.log.debug('Checking cinder logging config file data...') u.log.debug('Checking cinder logging config file data...')
unit = self.cinder_sentry unit = self.cinder_sentry
@ -632,7 +679,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
message = "cinder logging config error: {}".format(ret) message = "cinder logging config error: {}".format(ret)
amulet.raise_status(amulet.FAIL, msg=message) amulet.raise_status(amulet.FAIL, msg=message)
def test_303_cinder_rootwrap_config(self): def HOLDtest_303_cinder_rootwrap_config(self):
"""Inspect select config pairs in rootwrap.conf.""" """Inspect select config pairs in rootwrap.conf."""
u.log.debug('Checking cinder rootwrap config file data...') u.log.debug('Checking cinder rootwrap config file data...')
unit = self.cinder_sentry unit = self.cinder_sentry
@ -656,14 +703,14 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
u.log.debug('Cinder api check (volumes.list): {}'.format(check)) u.log.debug('Cinder api check (volumes.list): {}'.format(check))
assert(check == []) assert(check == [])
def test_401_create_delete_volume(self): def HOLDtest_401_create_delete_volume(self):
"""Create a cinder volume and delete it.""" """Create a cinder volume and delete it."""
u.log.debug('Creating, checking and deleting cinder volume...') u.log.debug('Creating, checking and deleting cinder volume...')
vol_new = u.create_cinder_volume(self.cinder) vol_new = u.create_cinder_volume(self.cinder)
vol_id = vol_new.id vol_id = vol_new.id
u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume") u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume")
def test_402_create_delete_volume_from_image(self): def HOLDtest_402_create_delete_volume_from_image(self):
"""Create a cinder volume from a glance image, and delete it.""" """Create a cinder volume from a glance image, and delete it."""
u.log.debug('Creating, checking and deleting cinder volume' u.log.debug('Creating, checking and deleting cinder volume'
'from glance image...') 'from glance image...')
@ -676,7 +723,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
u.delete_resource(self.glance.images, img_id, msg="glance image") u.delete_resource(self.glance.images, img_id, msg="glance image")
u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume") u.delete_resource(self.cinder.volumes, vol_id, msg="cinder volume")
def test_403_volume_snap_clone_extend_inspect(self): def HOLDtest_403_volume_snap_clone_extend_inspect(self):
"""Create a cinder volume, clone it, extend its size, create a """Create a cinder volume, clone it, extend its size, create a
snapshot of the volume, create a volume from a snapshot, check snapshot of the volume, create a volume from a snapshot, check
status of each, inspect underlying lvm, then delete the resources.""" status of each, inspect underlying lvm, then delete the resources."""
@ -725,7 +772,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
u.log.debug('Deleting volume {}...'.format(vol.id)) u.log.debug('Deleting volume {}...'.format(vol.id))
u.delete_resource(self.cinder.volumes, vol.id, msg="cinder volume") u.delete_resource(self.cinder.volumes, vol.id, msg="cinder volume")
def test_900_restart_on_config_change(self): def HOLDtest_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the """Verify that the specified services are restarted when the
config is changed.""" config is changed."""
@ -769,7 +816,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
self.d.configure(juju_service, set_default) self.d.configure(juju_service, set_default)
def test_910_pause_and_resume(self): def HOLDtest_910_pause_and_resume(self):
"""The services can be paused and resumed. """ """The services can be paused and resumed. """
u.log.debug('Checking pause and resume actions...') u.log.debug('Checking pause and resume actions...')
unit = self.d.sentry['cinder'][0] unit = self.d.sentry['cinder'][0]

View File

@ -303,20 +303,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
test scenario, based on OpenStack release and whether ceph radosgw test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not.""" is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo: if self._get_openstack_release() <= self.trusty_juno:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier # Juno or earlier
pools = [ pools = [
'data', 'data',
'metadata', 'metadata',
'rbd', 'rbd',
'cinder', 'cinder-ceph',
'glance'
]
elif (self.trust_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance' 'glance'
] ]

View File

@ -23,6 +23,7 @@ import urllib
import urlparse import urlparse
import cinderclient.v1.client as cinder_client import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client from keystoneclient.v2_0 import client as keystone_client
@ -351,12 +352,15 @@ class OpenStackAmuletUtils(AmuletUtils):
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username, def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant): password, tenant, api_version=2):
"""Authenticates admin user with cinder.""" """Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens. # NOTE(beisner): cinder python client doesn't accept tokens.
keystone_ip = keystone_sentry.info['public-address'] keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
return cinder_client.Client(username, password, tenant, ept) _clients = {
1: cinder_client.Client,
2: cinder_clientv2.Client}
return _clients[api_version](username, password, tenant, ept)
def authenticate_keystone(self, keystone_ip, username, password, def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False, api_version=False, admin_port=False,
@ -617,7 +621,7 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Keypair ({}) already exists, ' self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name)) 'using it.'.format(keypair_name))
return _keypair return _keypair
except: except Exception:
self.log.debug('Keypair ({}) does not exist, ' self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name)) 'creating it.'.format(keypair_name))

View File

@ -218,6 +218,8 @@ def principal_unit():
for rid in relation_ids(reltype): for rid in relation_ids(reltype):
for unit in related_units(rid): for unit in related_units(rid):
md = _metadata_unit(unit) md = _metadata_unit(unit)
if not md:
continue
subordinate = md.pop('subordinate', None) subordinate = md.pop('subordinate', None)
if not subordinate: if not subordinate:
return unit return unit
@ -511,7 +513,10 @@ def _metadata_unit(unit):
""" """
basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md: joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
if not os.path.exists(joineddir):
return None
with open(joineddir) as md:
return yaml.safe_load(md) return yaml.safe_load(md)
@ -639,18 +644,31 @@ def is_relation_made(relation, keys='private-address'):
return False return False
def _port_op(op_name, port, protocol="TCP"):
"""Open or close a service network port"""
_args = [op_name]
icmp = protocol.upper() == "ICMP"
if icmp:
_args.append(protocol)
else:
_args.append('{}/{}'.format(port, protocol))
try:
subprocess.check_call(_args)
except subprocess.CalledProcessError:
# Older Juju pre 2.3 doesn't support ICMP
# so treat it as a no-op if it fails.
if not icmp:
raise
def open_port(port, protocol="TCP"): def open_port(port, protocol="TCP"):
"""Open a service network port""" """Open a service network port"""
_args = ['open-port'] _port_op('open-port', port, protocol)
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"): def close_port(port, protocol="TCP"):
"""Close a service network port""" """Close a service network port"""
_args = ['close-port'] _port_op('close-port', port, protocol)
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def open_ports(start, end, protocol="TCP"): def open_ports(start, end, protocol="TCP"):
@ -667,6 +685,17 @@ def close_ports(start, end, protocol="TCP"):
subprocess.check_call(_args) subprocess.check_call(_args)
def opened_ports():
"""Get the opened ports
*Note that this will only show ports opened in a previous hook*
:returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
"""
_args = ['opened-ports', '--format=json']
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
@cached @cached
def unit_get(attribute): def unit_get(attribute):
"""Get the unit ID for the remote unit""" """Get the unit ID for the remote unit"""
@ -1077,6 +1106,35 @@ def network_get_primary_address(binding):
return subprocess.check_output(cmd).decode('UTF-8').strip() return subprocess.check_output(cmd).decode('UTF-8').strip()
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get(endpoint, relation_id=None):
"""
Retrieve the network details for a relation endpoint
:param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if run on Juju < 2.1
"""
cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
try:
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
# Early versions of Juju 2.0.x required the --primary-address argument.
# We catch that condition here and raise NotImplementedError since
# the requested semantics are not available - the caller can then
# use the network_get_primary_address() method instead.
if '--primary-address is currently required' in e.output.decode('UTF-8'):
raise NotImplementedError
raise
return yaml.safe_load(response)
def add_metric(*args, **kwargs): def add_metric(*args, **kwargs):
"""Add metric values. Values may be expressed with keyword arguments. For """Add metric values. Values may be expressed with keyword arguments. For
metric names containing dashes, these may be expressed as one or more metric names containing dashes, these may be expressed as one or more

View File

@ -34,7 +34,7 @@ import six
from contextlib import contextmanager from contextlib import contextmanager
from collections import OrderedDict from collections import OrderedDict
from .hookenv import log, DEBUG from .hookenv import log, DEBUG, local_unit
from .fstab import Fstab from .fstab import Fstab
from charmhelpers.osplatform import get_platform from charmhelpers.osplatform import get_platform
@ -441,6 +441,49 @@ def add_user_to_group(username, group):
subprocess.check_call(cmd) subprocess.check_call(cmd)
def chage(username, lastday=None, expiredate=None, inactive=None,
mindays=None, maxdays=None, root=None, warndays=None):
"""Change user password expiry information
:param str username: User to update
:param str lastday: Set when password was changed in YYYY-MM-DD format
:param str expiredate: Set when user's account will no longer be
accessible in YYYY-MM-DD format.
-1 will remove an account expiration date.
:param str inactive: Set the number of days of inactivity after a password
has expired before the account is locked.
-1 will remove an account's inactivity.
:param str mindays: Set the minimum number of days between password
changes to MIN_DAYS.
0 indicates the password can be changed anytime.
:param str maxdays: Set the maximum number of days during which a
password is valid.
-1 as MAX_DAYS will remove checking maxdays
:param str root: Apply changes in the CHROOT_DIR directory
:param str warndays: Set the number of days of warning before a password
change is required
:raises subprocess.CalledProcessError: if call to chage fails
"""
cmd = ['chage']
if root:
cmd.extend(['--root', root])
if lastday:
cmd.extend(['--lastday', lastday])
if expiredate:
cmd.extend(['--expiredate', expiredate])
if inactive:
cmd.extend(['--inactive', inactive])
if mindays:
cmd.extend(['--mindays', mindays])
if maxdays:
cmd.extend(['--maxdays', maxdays])
if warndays:
cmd.extend(['--warndays', warndays])
cmd.append(username)
subprocess.check_call(cmd)
remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
def rsync(from_path, to_path, flags='-r', options=None, timeout=None): def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path""" """Replicate the contents of a path"""
options = options or ['--delete', '--executability'] options = options or ['--delete', '--executability']
@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path):
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines) output = "\n".join(lines)
return output return output
def modulo_distribution(modulo=3, wait=30):
""" Modulo distribution
This helper uses the unit number, a modulo value and a constant wait time
to produce a calculated wait time distribution. This is useful in large
scale deployments to distribute load during an expensive operation such as
service restarts.
If you have 1000 nodes that need to restart 100 at a time 1 minute at a
time:
time.wait(modulo_distribution(modulo=100, wait=60))
restart()
If you need restarts to happen serially set modulo to the exact number of
nodes and set a high constant wait time:
time.wait(modulo_distribution(modulo=10, wait=120))
restart()
@param modulo: int The modulo number creates the group distribution
@param wait: int The constant time wait value
@return: int Calculated time to wait for unit operation
"""
unit_number = int(local_unit().split('/')[1])
return (unit_number % modulo) * wait

View File

@ -358,7 +358,7 @@ class Storage(object):
try: try:
yield self.revision yield self.revision
self.revision = None self.revision = None
except: except Exception:
self.flush(False) self.flush(False)
self.revision = None self.revision = None
raise raise

View File

@ -21,3 +21,6 @@ from basic_deployment import CinderBasicDeployment
if __name__ == '__main__': if __name__ == '__main__':
deployment = CinderBasicDeployment(series='artful') deployment = CinderBasicDeployment(series='artful')
deployment.run_tests() deployment.run_tests()
# NOTE(beisner): Artful target disabled, pending bug:
# https://bugs.launchpad.net/charm-percona-cluster/+bug/1728132

0
tests/gate-basic-xenial-pike Normal file → Executable file
View File

View File

@ -60,7 +60,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
commands = commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
[testenv:func27-dfs] [testenv:func27-dfs]
# Charm Functional Test # Charm Functional Test