Enable xenial-pike amulet test

Make default func27-smoke xenial-pike
Charm-helpers sync

Change-Id: I289d38e4170d204fbf9b0281b28be28c9e847e65
This commit is contained in:
David Ames 2017-11-15 16:09:49 -08:00
parent 7c065062d2
commit 7ecfa30b00
27 changed files with 379 additions and 115 deletions

View File

@ -30,6 +30,7 @@ import yaml
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
hook_name,
local_unit, local_unit,
log, log,
relation_ids, relation_ids,
@ -285,7 +286,7 @@ class NRPE(object):
try: try:
nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid nagios_gid = grp.getgrnam('nagios').gr_gid
except: except Exception:
log("Nagios user not set up, nrpe checks not updated") log("Nagios user not set up, nrpe checks not updated")
return return
@ -302,7 +303,12 @@ class NRPE(object):
"command": nrpecheck.command, "command": nrpecheck.command,
} }
service('restart', 'nagios-nrpe-server') # update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unneccessary alerts. Let's not restart
# on update-status hooks.
if not hook_name() == 'update-status':
service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \ monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master") relation_ids("nrpe-external-master")

View File

@ -490,7 +490,7 @@ def get_host_ip(hostname, fallback=None):
if not ip_addr: if not ip_addr:
try: try:
ip_addr = socket.gethostbyname(hostname) ip_addr = socket.gethostbyname(hostname)
except: except Exception:
log("Failed to resolve hostname '%s'" % (hostname), log("Failed to resolve hostname '%s'" % (hostname),
level=WARNING) level=WARNING)
return fallback return fallback
@ -518,7 +518,7 @@ def get_hostname(address, fqdn=True):
if not result: if not result:
try: try:
result = socket.gethostbyaddr(address)[0] result = socket.gethostbyaddr(address)[0]
except: except Exception:
return None return None
else: else:
result = address result = address

View File

@ -250,7 +250,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.log.debug('Waiting up to {}s for extended status on services: ' self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services)) '{}'.format(timeout, services))
service_messages = {service: message for service in services} service_messages = {service: message for service in services}
# Check for idleness
self.d.sentry.wait()
# Check for error states and bail early
self.d.sentry.wait_for_status(self.d.juju_env, services)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout) self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK') self.log.info('OK')
def _get_openstack_release(self): def _get_openstack_release(self):
@ -303,20 +310,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
test scenario, based on OpenStack release and whether ceph radosgw test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not.""" is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo: if self._get_openstack_release() <= self.trusty_juno:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier # Juno or earlier
pools = [ pools = [
'data', 'data',
'metadata', 'metadata',
'rbd', 'rbd',
'cinder', 'cinder-ceph',
'glance'
]
elif (self.trust_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance' 'glance'
] ]

View File

@ -23,6 +23,7 @@ import urllib
import urlparse import urlparse
import cinderclient.v1.client as cinder_client import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client from keystoneclient.v2_0 import client as keystone_client
@ -42,7 +43,6 @@ import swiftclient
from charmhelpers.contrib.amulet.utils import ( from charmhelpers.contrib.amulet.utils import (
AmuletUtils AmuletUtils
) )
from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.core.host import CompareHostReleases from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG DEBUG = logging.DEBUG
@ -310,7 +310,6 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Checking if tenant exists ({})...'.format(tenant)) self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()] return tenant in [t.name for t in keystone.tenants.list()]
@retry_on_exception(5, base_delay=10)
def keystone_wait_for_propagation(self, sentry_relation_pairs, def keystone_wait_for_propagation(self, sentry_relation_pairs,
api_version): api_version):
"""Iterate over list of sentry and relation tuples and verify that """Iterate over list of sentry and relation tuples and verify that
@ -326,7 +325,7 @@ class OpenStackAmuletUtils(AmuletUtils):
rel = sentry.relation('identity-service', rel = sentry.relation('identity-service',
relation_name) relation_name)
self.log.debug('keystone relation data: {}'.format(rel)) self.log.debug('keystone relation data: {}'.format(rel))
if rel['api_version'] != str(api_version): if rel.get('api_version') != str(api_version):
raise Exception("api_version not propagated through relation" raise Exception("api_version not propagated through relation"
" data yet ('{}' != '{}')." " data yet ('{}' != '{}')."
"".format(rel['api_version'], api_version)) "".format(rel['api_version'], api_version))
@ -348,15 +347,19 @@ class OpenStackAmuletUtils(AmuletUtils):
config = {'preferred-api-version': api_version} config = {'preferred-api-version': api_version}
deployment.d.configure('keystone', config) deployment.d.configure('keystone', config)
deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username, def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant): password, tenant, api_version=2):
"""Authenticates admin user with cinder.""" """Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens. # NOTE(beisner): cinder python client doesn't accept tokens.
keystone_ip = keystone_sentry.info['public-address'] keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
return cinder_client.Client(username, password, tenant, ept) _clients = {
1: cinder_client.Client,
2: cinder_clientv2.Client}
return _clients[api_version](username, password, tenant, ept)
def authenticate_keystone(self, keystone_ip, username, password, def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False, api_version=False, admin_port=False,
@ -617,13 +620,25 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Keypair ({}) already exists, ' self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name)) 'using it.'.format(keypair_name))
return _keypair return _keypair
except: except Exception:
self.log.debug('Keypair ({}) does not exist, ' self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name)) 'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name) _keypair = nova.keypairs.create(name=keypair_name)
return _keypair return _keypair
def _get_cinder_obj_name(self, cinder_object):
"""Retrieve name of cinder object.
:param cinder_object: cinder snapshot or volume object
:returns: str cinder object name
"""
# v1 objects store name in 'display_name' attr but v2+ use 'name'
try:
return cinder_object.display_name
except AttributeError:
return cinder_object.name
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
img_id=None, src_vol_id=None, snap_id=None): img_id=None, src_vol_id=None, snap_id=None):
"""Create cinder volume, optionally from a glance image, OR """Create cinder volume, optionally from a glance image, OR
@ -674,6 +689,13 @@ class OpenStackAmuletUtils(AmuletUtils):
source_volid=src_vol_id, source_volid=src_vol_id,
snapshot_id=snap_id) snapshot_id=snap_id)
vol_id = vol_new.id vol_id = vol_new.id
except TypeError:
vol_new = cinder.volumes.create(name=vol_name,
imageRef=img_id,
size=vol_size,
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
except Exception as e: except Exception as e:
msg = 'Failed to create volume: {}'.format(e) msg = 'Failed to create volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
@ -688,7 +710,7 @@ class OpenStackAmuletUtils(AmuletUtils):
# Re-validate new volume # Re-validate new volume
self.log.debug('Validating volume attributes...') self.log.debug('Validating volume attributes...')
val_vol_name = cinder.volumes.get(vol_id).display_name val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
val_vol_boot = cinder.volumes.get(vol_id).bootable val_vol_boot = cinder.volumes.get(vol_id).bootable
val_vol_stat = cinder.volumes.get(vol_id).status val_vol_stat = cinder.volumes.get(vol_id).status
val_vol_size = cinder.volumes.get(vol_id).size val_vol_size = cinder.volumes.get(vol_id).size

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import collections
import glob import glob
import json import json
import math import math
@ -578,11 +579,14 @@ class HAProxyContext(OSContextGenerator):
laddr = get_address_in_network(config(cfg_opt)) laddr = get_address_in_network(config(cfg_opt))
if laddr: if laddr:
netmask = get_netmask_for_address(laddr) netmask = get_netmask_for_address(laddr)
cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, cluster_hosts[laddr] = {
netmask), 'network': "{}/{}".format(laddr,
'backends': {l_unit: laddr}} netmask),
'backends': collections.OrderedDict([(l_unit,
laddr)])
}
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in sorted(related_units(rid)):
_laddr = relation_get('{}-address'.format(addr_type), _laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit) rid=rid, unit=unit)
if _laddr: if _laddr:
@ -594,10 +598,13 @@ class HAProxyContext(OSContextGenerator):
# match in the frontend # match in the frontend
cluster_hosts[addr] = {} cluster_hosts[addr] = {}
netmask = get_netmask_for_address(addr) netmask = get_netmask_for_address(addr)
cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), cluster_hosts[addr] = {
'backends': {l_unit: addr}} 'network': "{}/{}".format(addr, netmask),
'backends': collections.OrderedDict([(l_unit,
addr)])
}
for rid in relation_ids('cluster'): for rid in relation_ids('cluster'):
for unit in related_units(rid): for unit in sorted(related_units(rid)):
_laddr = relation_get('private-address', _laddr = relation_get('private-address',
rid=rid, unit=unit) rid=rid, unit=unit)
if _laddr: if _laddr:
@ -628,6 +635,8 @@ class HAProxyContext(OSContextGenerator):
ctxt['local_host'] = '127.0.0.1' ctxt['local_host'] = '127.0.0.1'
ctxt['haproxy_host'] = '0.0.0.0' ctxt['haproxy_host'] = '0.0.0.0'
ctxt['ipv6_enabled'] = not is_ipv6_disabled()
ctxt['stat_port'] = '8888' ctxt['stat_port'] = '8888'
db = kv() db = kv()
@ -844,15 +853,6 @@ class NeutronContext(OSContextGenerator):
for pkgs in self.packages: for pkgs in self.packages:
ensure_packages(pkgs) ensure_packages(pkgs)
def _save_flag_file(self):
if self.network_manager == 'quantum':
_file = '/etc/nova/quantum_plugin.conf'
else:
_file = '/etc/nova/neutron_plugin.conf'
with open(_file, 'wb') as out:
out.write(self.plugin + '\n')
def ovs_ctxt(self): def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver', driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager) self.network_manager)
@ -997,7 +997,6 @@ class NeutronContext(OSContextGenerator):
flags = config_flags_parser(alchemy_flags) flags = config_flags_parser(alchemy_flags)
ctxt['neutron_alchemy_flags'] = flags ctxt['neutron_alchemy_flags'] = flags
self._save_flag_file()
return ctxt return ctxt
@ -1177,7 +1176,7 @@ class SubordinateConfigContext(OSContextGenerator):
if sub_config and sub_config != '': if sub_config and sub_config != '':
try: try:
sub_config = json.loads(sub_config) sub_config = json.loads(sub_config)
except: except Exception:
log('Could not parse JSON from ' log('Could not parse JSON from '
'subordinate_configuration setting from %s' 'subordinate_configuration setting from %s'
% rid, level=ERROR) % rid, level=ERROR)

View File

@ -18,7 +18,7 @@ rbd default features = {{ rbd_features }}
[client] [client]
{% if rbd_client_cache_settings -%} {% if rbd_client_cache_settings -%}
{% for key, value in rbd_client_cache_settings.iteritems() -%} {% for key, value in rbd_client_cache_settings.items() -%}
{{ key }} = {{ value }} {{ key }} = {{ value }}
{% endfor -%} {% endfor -%}
{%- endif %} {%- endif %}

View File

@ -48,7 +48,9 @@ listen stats
{% for service, ports in service_ports.items() -%} {% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }} frontend tcp-in_{{ service }}
bind *:{{ ports[0] }} bind *:{{ ports[0] }}
{% if ipv6_enabled -%}
bind :::{{ ports[0] }} bind :::{{ ports[0] }}
{% endif -%}
{% for frontend in frontends -%} {% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}

View File

@ -272,6 +272,8 @@ class OSConfigRenderer(object):
raise OSConfigException raise OSConfigException
_out = self.render(config_file) _out = self.render(config_file)
if six.PY3:
_out = _out.encode('UTF-8')
with open(config_file, 'wb') as out: with open(config_file, 'wb') as out:
out.write(_out) out.write(_out)

View File

@ -426,7 +426,7 @@ def get_os_codename_package(package, fatal=True):
try: try:
pkg = cache[package] pkg = cache[package]
except: except Exception:
if not fatal: if not fatal:
return None return None
# the package is unknown to the current apt cache. # the package is unknown to the current apt cache.
@ -618,7 +618,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
juju_rc_path = "%s/%s" % (charm_dir(), script_path) juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)): if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path)) os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wb') as rc_script: with open(juju_rc_path, 'wt') as rc_script:
rc_script.write( rc_script.write(
"#!/bin/bash\n") "#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p)) [rc_script.write('export %s=%s\n' % (u, p))
@ -797,7 +797,7 @@ def git_default_repos(projects_yaml):
service = service_name() service = service_name()
core_project = service core_project = service
for default, branch in GIT_DEFAULT_BRANCHES.iteritems(): for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
if projects_yaml == default: if projects_yaml == default:
# add the requirements repo first # add the requirements repo first
@ -1618,7 +1618,7 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
upgrade_callback(configs=configs) upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'}) action_set({'outcome': 'success, upgrade completed.'})
ret = True ret = True
except: except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'}) action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()}) action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an ' action_fail('do_openstack_upgrade resulted in an '
@ -1723,7 +1723,7 @@ def is_unit_paused_set():
kv = t[0] kv = t[0]
# transform something truth-y into a Boolean. # transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused'))) return not(not(kv.get('unit-paused')))
except: except Exception:
return False return False
@ -2051,7 +2051,7 @@ def update_json_file(filename, items):
def snap_install_requested(): def snap_install_requested():
""" Determine if installing from snaps """ Determine if installing from snaps
If openstack-origin is of the form snap:track/channel If openstack-origin is of the form snap:track/channel[/branch]
and channel is in SNAPS_CHANNELS return True. and channel is in SNAPS_CHANNELS return True.
""" """
origin = config('openstack-origin') or "" origin = config('openstack-origin') or ""
@ -2060,9 +2060,9 @@ def snap_install_requested():
_src = origin[5:] _src = origin[5:]
if '/' in _src: if '/' in _src:
_track, channel = _src.split('/') channel = _src.split('/')[1]
else: else:
# Hanlde snap:track with no channel # Handle snap:track with no channel
channel = 'stable' channel = 'stable'
return valid_snap_channel(channel) return valid_snap_channel(channel)

View File

@ -49,6 +49,6 @@ def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
open_port(port) open_port(port)
debugger = Rpdb(addr=addr, port=port) debugger = Rpdb(addr=addr, port=port)
debugger.set_trace(sys._getframe().f_back) debugger.set_trace(sys._getframe().f_back)
except: except Exception:
_error("Cannot start a remote debug session on %s:%s" % (addr, _error("Cannot start a remote debug session on %s:%s" % (addr,
port)) port))

View File

@ -370,9 +370,10 @@ def get_mon_map(service):
Also raises CalledProcessError if our ceph command fails Also raises CalledProcessError if our ceph command fails
""" """
try: try:
mon_status = check_output( mon_status = check_output(['ceph', '--id', service,
['ceph', '--id', service, 'mon_status', '--format=json'])
'mon_status', '--format=json']) if six.PY3:
mon_status = mon_status.decode('UTF-8')
try: try:
return json.loads(mon_status) return json.loads(mon_status)
except ValueError as v: except ValueError as v:
@ -457,7 +458,7 @@ def monitor_key_get(service, key):
try: try:
output = check_output( output = check_output(
['ceph', '--id', service, ['ceph', '--id', service,
'config-key', 'get', str(key)]) 'config-key', 'get', str(key)]).decode('UTF-8')
return output return output
except CalledProcessError as e: except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format( log("Monitor config-key get failed with message: {}".format(
@ -500,6 +501,8 @@ def get_erasure_profile(service, name):
out = check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get', 'osd', 'erasure-code-profile', 'get',
name, '--format=json']) name, '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out) return json.loads(out)
except (CalledProcessError, OSError, ValueError): except (CalledProcessError, OSError, ValueError):
return None return None
@ -686,7 +689,10 @@ def get_cache_mode(service, pool_name):
""" """
validator(value=service, valid_type=six.string_types) validator(value=service, valid_type=six.string_types)
validator(value=pool_name, valid_type=six.string_types) validator(value=pool_name, valid_type=six.string_types)
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) out = check_output(['ceph', '--id', service,
'osd', 'dump', '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
try: try:
osd_json = json.loads(out) osd_json = json.loads(out)
for pool in osd_json['pools']: for pool in osd_json['pools']:
@ -700,8 +706,9 @@ def get_cache_mode(service, pool_name):
def pool_exists(service, name): def pool_exists(service, name):
"""Check to see if a RADOS pool already exists.""" """Check to see if a RADOS pool already exists."""
try: try:
out = check_output(['rados', '--id', service, out = check_output(['rados', '--id', service, 'lspools'])
'lspools']).decode('UTF-8') if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
@ -714,9 +721,12 @@ def get_osds(service):
""" """
version = ceph_version() version = ceph_version()
if version and version >= '0.56': if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'ls', 'osd', 'ls',
'--format=json']).decode('UTF-8')) '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
return None return None
@ -734,7 +744,9 @@ def rbd_exists(service, pool, rbd_img):
"""Check to see if a RADOS block device exists.""" """Check to see if a RADOS block device exists."""
try: try:
out = check_output(['rbd', 'list', '--id', out = check_output(['rbd', 'list', '--id',
service, '--pool', pool]).decode('UTF-8') service, '--pool', pool])
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
@ -859,7 +871,9 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name): def image_mapped(name):
"""Determine whether a RADOS block device is mapped locally.""" """Determine whether a RADOS block device is mapped locally."""
try: try:
out = check_output(['rbd', 'showmapped']).decode('UTF-8') out = check_output(['rbd', 'showmapped'])
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
@ -1018,7 +1032,9 @@ def ceph_version():
"""Retrieve the local version of ceph.""" """Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'): if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v'] cmd = ['ceph', '-v']
output = check_output(cmd).decode('US-ASCII') output = check_output(cmd)
if six.PY3:
output = output.decode('UTF-8')
output = output.split() output = output.split()
if len(output) > 3: if len(output) > 3:
return output[2] return output[2]

View File

@ -74,10 +74,10 @@ def list_lvm_volume_group(block_device):
''' '''
vg = None vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines() pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd: for lvm in pvd:
l = l.decode('UTF-8') lvm = lvm.decode('UTF-8')
if l.strip().startswith('VG Name'): if lvm.strip().startswith('VG Name'):
vg = ' '.join(l.strip().split()[2:]) vg = ' '.join(lvm.strip().split()[2:])
return vg return vg

View File

@ -64,6 +64,6 @@ def is_device_mounted(device):
''' '''
try: try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8') out = check_output(['lsblk', '-P', device]).decode('UTF-8')
except: except Exception:
return False return False
return bool(re.search(r'MOUNTPOINT=".+"', out)) return bool(re.search(r'MOUNTPOINT=".+"', out))

View File

@ -283,7 +283,7 @@ def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None,
try: try:
log('Syncing local path %s to %s@%s:%s' % (path, user, host, path)) log('Syncing local path %s to %s@%s:%s' % (path, user, host, path))
run_as_user(user, cmd, gid) run_as_user(user, cmd, gid)
except: except Exception:
log('Error syncing remote files') log('Error syncing remote files')
if fatal: if fatal:
raise raise

View File

@ -22,6 +22,7 @@ from __future__ import print_function
import copy import copy
from distutils.version import LooseVersion from distutils.version import LooseVersion
from functools import wraps from functools import wraps
from collections import namedtuple
import glob import glob
import os import os
import json import json
@ -644,18 +645,31 @@ def is_relation_made(relation, keys='private-address'):
return False return False
def _port_op(op_name, port, protocol="TCP"):
"""Open or close a service network port"""
_args = [op_name]
icmp = protocol.upper() == "ICMP"
if icmp:
_args.append(protocol)
else:
_args.append('{}/{}'.format(port, protocol))
try:
subprocess.check_call(_args)
except subprocess.CalledProcessError:
# Older Juju pre 2.3 doesn't support ICMP
# so treat it as a no-op if it fails.
if not icmp:
raise
def open_port(port, protocol="TCP"): def open_port(port, protocol="TCP"):
"""Open a service network port""" """Open a service network port"""
_args = ['open-port'] _port_op('open-port', port, protocol)
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"): def close_port(port, protocol="TCP"):
"""Close a service network port""" """Close a service network port"""
_args = ['close-port'] _port_op('close-port', port, protocol)
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def open_ports(start, end, protocol="TCP"): def open_ports(start, end, protocol="TCP"):
@ -1101,13 +1115,24 @@ def network_get(endpoint, relation_id=None):
:param endpoint: string. The name of a relation endpoint :param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context. :param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query. :return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if run on Juju < 2.0 :raise: NotImplementedError if run on Juju < 2.1
""" """
cmd = ['network-get', endpoint, '--format', 'yaml'] cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id: if relation_id:
cmd.append('-r') cmd.append('-r')
cmd.append(relation_id) cmd.append(relation_id)
response = subprocess.check_output(cmd).decode('UTF-8').strip() try:
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
# Early versions of Juju 2.0.x required the --primary-address argument.
# We catch that condition here and raise NotImplementedError since
# the requested semantics are not available - the caller can then
# use the network_get_primary_address() method instead.
if '--primary-address is currently required' in e.output.decode('UTF-8'):
raise NotImplementedError
raise
return yaml.safe_load(response) return yaml.safe_load(response)
@ -1140,3 +1165,42 @@ def meter_info():
"""Get the meter status information, if running in the meter-status-changed """Get the meter status information, if running in the meter-status-changed
hook.""" hook."""
return os.environ.get('JUJU_METER_INFO') return os.environ.get('JUJU_METER_INFO')
def iter_units_for_relation_name(relation_name):
"""Iterate through all units in a relation
Generator that iterates through all the units in a relation and yields
a named tuple with rid and unit field names.
Usage:
data = [(u.rid, u.unit)
for u in iter_units_for_relation_name(relation_name)]
:param relation_name: string relation name
:yield: Named Tuple with rid and unit field names
"""
RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
for rid in relation_ids(relation_name):
for unit in related_units(rid):
yield RelatedUnit(rid, unit)
def ingress_address(rid=None, unit=None):
"""
Retrieve the ingress-address from a relation when available. Otherwise,
return the private-address. This function is to be used on the consuming
side of the relation.
Usage:
addresses = [ingress_address(rid=u.rid, unit=u.unit)
for u in iter_units_for_relation_name(relation_name)]
:param rid: string relation id
:param unit: string unit name
:side effect: calls relation_get
:return: string IP address
"""
settings = relation_get(rid=rid, unit=unit)
return (settings.get('ingress-address') or
settings.get('private-address'))

View File

@ -61,13 +61,19 @@ def bytes_from_string(value):
if isinstance(value, six.string_types): if isinstance(value, six.string_types):
value = six.text_type(value) value = six.text_type(value)
else: else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value) msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg) raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value) matches = re.match("([0-9]+)([a-zA-Z]+)", value)
if not matches: if matches:
msg = "Unable to interpret string value '%s' as bytes" % (value) size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
raise ValueError(msg) else:
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) # Assume that value passed in is bytes
try:
size = int(value)
except ValueError:
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return size
class BasicStringComparator(object): class BasicStringComparator(object):

View File

@ -358,7 +358,7 @@ class Storage(object):
try: try:
yield self.revision yield self.revision
self.revision = None self.revision = None
except: except Exception:
self.flush(False) self.flush(False)
self.revision = None self.revision = None
raise raise

View File

@ -572,7 +572,7 @@ def get_upstream_version(package):
cache = apt_cache() cache = apt_cache()
try: try:
pkg = cache[package] pkg = cache[package]
except: except Exception:
# the package is unknown to the current apt cache. # the package is unknown to the current apt cache.
return None return None

View File

@ -422,6 +422,11 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
def validate_keystone_users(self, client): def validate_keystone_users(self, client):
"""Verify all existing roles.""" """Verify all existing roles."""
u.log.debug('Checking keystone users...') u.log.debug('Checking keystone users...')
if self._get_openstack_release() < self.xenial_pike:
cinder_user = 'cinder_cinderv2'
else:
cinder_user = 'cinderv3_cinderv2'
base = [ base = [
{'name': 'demoUser', {'name': 'demoUser',
'enabled': True, 'enabled': True,
@ -431,7 +436,7 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
'enabled': True, 'enabled': True,
'id': u.not_null, 'id': u.not_null,
'email': 'juju@localhost'}, 'email': 'juju@localhost'},
{'name': 'cinder_cinderv2', {'name': cinder_user,
'enabled': True, 'enabled': True,
'id': u.not_null, 'id': u.not_null,
'email': u'juju@localhost'} 'email': u'juju@localhost'}
@ -609,6 +614,9 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
'volume': [endpoint_check], 'volume': [endpoint_check],
'identity': [endpoint_check] 'identity': [endpoint_check]
} }
if self._get_openstack_release() >= self.xenial_pike:
expected.pop('volume')
expected['volumev2'] = [endpoint_check]
actual = self.keystone_v2.service_catalog.get_endpoints() actual = self.keystone_v2.service_catalog.get_endpoints()
ret = u.validate_svc_catalog_endpoint_data(expected, actual) ret = u.validate_svc_catalog_endpoint_data(expected, actual)
@ -704,6 +712,8 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
'service_tenant_id': u.not_null, 'service_tenant_id': u.not_null,
'service_host': u.valid_ip 'service_host': u.valid_ip
} }
if self._get_openstack_release() >= self.xenial_pike:
expected['service_username'] = 'cinderv3_cinderv2'
for unit in self.keystone_sentries: for unit in self.keystone_sentries:
ret = u.validate_relation_data(unit, relation, expected) ret = u.validate_relation_data(unit, relation, expected)
if ret: if ret:
@ -728,6 +738,22 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
'cinderv2_admin_url': u.valid_url, 'cinderv2_admin_url': u.valid_url,
'private-address': u.valid_ip, 'private-address': u.valid_ip,
} }
if self._get_openstack_release() >= self.xenial_pike:
expected.pop('cinder_region')
expected.pop('cinder_service')
expected.pop('cinder_public_url')
expected.pop('cinder_admin_url')
expected.pop('cinder_internal_url')
expected.update({
'cinderv2_region': 'RegionOne',
'cinderv3_region': 'RegionOne',
'cinderv3_service': 'cinderv3',
'cinderv3_region': 'RegionOne',
'cinderv3_public_url': u.valid_url,
'cinderv3_internal_url': u.valid_url,
'cinderv3_admin_url': u.valid_url})
ret = u.validate_relation_data(unit, relation, expected) ret = u.validate_relation_data(unit, relation, expected)
if ret: if ret:
message = u.relation_error('cinder identity-service', ret) message = u.relation_error('cinder identity-service', ret)

View File

@ -250,7 +250,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.log.debug('Waiting up to {}s for extended status on services: ' self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services)) '{}'.format(timeout, services))
service_messages = {service: message for service in services} service_messages = {service: message for service in services}
# Check for idleness
self.d.sentry.wait()
# Check for error states and bail early
self.d.sentry.wait_for_status(self.d.juju_env, services)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout) self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK') self.log.info('OK')
def _get_openstack_release(self): def _get_openstack_release(self):
@ -303,20 +310,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
test scenario, based on OpenStack release and whether ceph radosgw test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not.""" is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo: if self._get_openstack_release() <= self.trusty_juno:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier # Juno or earlier
pools = [ pools = [
'data', 'data',
'metadata', 'metadata',
'rbd', 'rbd',
'cinder', 'cinder-ceph',
'glance'
]
elif (self.trust_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance' 'glance'
] ]

View File

@ -23,6 +23,7 @@ import urllib
import urlparse import urlparse
import cinderclient.v1.client as cinder_client import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client from keystoneclient.v2_0 import client as keystone_client
@ -42,7 +43,6 @@ import swiftclient
from charmhelpers.contrib.amulet.utils import ( from charmhelpers.contrib.amulet.utils import (
AmuletUtils AmuletUtils
) )
from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.core.host import CompareHostReleases from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG DEBUG = logging.DEBUG
@ -310,7 +310,6 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Checking if tenant exists ({})...'.format(tenant)) self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()] return tenant in [t.name for t in keystone.tenants.list()]
@retry_on_exception(5, base_delay=10)
def keystone_wait_for_propagation(self, sentry_relation_pairs, def keystone_wait_for_propagation(self, sentry_relation_pairs,
api_version): api_version):
"""Iterate over list of sentry and relation tuples and verify that """Iterate over list of sentry and relation tuples and verify that
@ -326,7 +325,7 @@ class OpenStackAmuletUtils(AmuletUtils):
rel = sentry.relation('identity-service', rel = sentry.relation('identity-service',
relation_name) relation_name)
self.log.debug('keystone relation data: {}'.format(rel)) self.log.debug('keystone relation data: {}'.format(rel))
if rel['api_version'] != str(api_version): if rel.get('api_version') != str(api_version):
raise Exception("api_version not propagated through relation" raise Exception("api_version not propagated through relation"
" data yet ('{}' != '{}')." " data yet ('{}' != '{}')."
"".format(rel['api_version'], api_version)) "".format(rel['api_version'], api_version))
@ -348,15 +347,19 @@ class OpenStackAmuletUtils(AmuletUtils):
config = {'preferred-api-version': api_version} config = {'preferred-api-version': api_version}
deployment.d.configure('keystone', config) deployment.d.configure('keystone', config)
deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username, def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant): password, tenant, api_version=2):
"""Authenticates admin user with cinder.""" """Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens. # NOTE(beisner): cinder python client doesn't accept tokens.
keystone_ip = keystone_sentry.info['public-address'] keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8')) ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
return cinder_client.Client(username, password, tenant, ept) _clients = {
1: cinder_client.Client,
2: cinder_clientv2.Client}
return _clients[api_version](username, password, tenant, ept)
def authenticate_keystone(self, keystone_ip, username, password, def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False, api_version=False, admin_port=False,
@ -617,13 +620,25 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Keypair ({}) already exists, ' self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name)) 'using it.'.format(keypair_name))
return _keypair return _keypair
except: except Exception:
self.log.debug('Keypair ({}) does not exist, ' self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name)) 'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name) _keypair = nova.keypairs.create(name=keypair_name)
return _keypair return _keypair
def _get_cinder_obj_name(self, cinder_object):
"""Retrieve name of cinder object.
:param cinder_object: cinder snapshot or volume object
:returns: str cinder object name
"""
# v1 objects store name in 'display_name' attr but v2+ use 'name'
try:
return cinder_object.display_name
except AttributeError:
return cinder_object.name
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
img_id=None, src_vol_id=None, snap_id=None): img_id=None, src_vol_id=None, snap_id=None):
"""Create cinder volume, optionally from a glance image, OR """Create cinder volume, optionally from a glance image, OR
@ -674,6 +689,13 @@ class OpenStackAmuletUtils(AmuletUtils):
source_volid=src_vol_id, source_volid=src_vol_id,
snapshot_id=snap_id) snapshot_id=snap_id)
vol_id = vol_new.id vol_id = vol_new.id
except TypeError:
vol_new = cinder.volumes.create(name=vol_name,
imageRef=img_id,
size=vol_size,
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
except Exception as e: except Exception as e:
msg = 'Failed to create volume: {}'.format(e) msg = 'Failed to create volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg) amulet.raise_status(amulet.FAIL, msg=msg)
@ -688,7 +710,7 @@ class OpenStackAmuletUtils(AmuletUtils):
# Re-validate new volume # Re-validate new volume
self.log.debug('Validating volume attributes...') self.log.debug('Validating volume attributes...')
val_vol_name = cinder.volumes.get(vol_id).display_name val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
val_vol_boot = cinder.volumes.get(vol_id).bootable val_vol_boot = cinder.volumes.get(vol_id).bootable
val_vol_stat = cinder.volumes.get(vol_id).status val_vol_stat = cinder.volumes.get(vol_id).status
val_vol_size = cinder.volumes.get(vol_id).size val_vol_size = cinder.volumes.get(vol_id).size

View File

@ -22,6 +22,7 @@ from __future__ import print_function
import copy import copy
from distutils.version import LooseVersion from distutils.version import LooseVersion
from functools import wraps from functools import wraps
from collections import namedtuple
import glob import glob
import os import os
import json import json
@ -644,18 +645,31 @@ def is_relation_made(relation, keys='private-address'):
return False return False
def _port_op(op_name, port, protocol="TCP"):
"""Open or close a service network port"""
_args = [op_name]
icmp = protocol.upper() == "ICMP"
if icmp:
_args.append(protocol)
else:
_args.append('{}/{}'.format(port, protocol))
try:
subprocess.check_call(_args)
except subprocess.CalledProcessError:
# Older Juju pre 2.3 doesn't support ICMP
# so treat it as a no-op if it fails.
if not icmp:
raise
def open_port(port, protocol="TCP"): def open_port(port, protocol="TCP"):
"""Open a service network port""" """Open a service network port"""
_args = ['open-port'] _port_op('open-port', port, protocol)
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"): def close_port(port, protocol="TCP"):
"""Close a service network port""" """Close a service network port"""
_args = ['close-port'] _port_op('close-port', port, protocol)
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def open_ports(start, end, protocol="TCP"): def open_ports(start, end, protocol="TCP"):
@ -1101,13 +1115,24 @@ def network_get(endpoint, relation_id=None):
:param endpoint: string. The name of a relation endpoint :param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context. :param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query. :return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if run on Juju < 2.0 :raise: NotImplementedError if run on Juju < 2.1
""" """
cmd = ['network-get', endpoint, '--format', 'yaml'] cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id: if relation_id:
cmd.append('-r') cmd.append('-r')
cmd.append(relation_id) cmd.append(relation_id)
response = subprocess.check_output(cmd).decode('UTF-8').strip() try:
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
# Early versions of Juju 2.0.x required the --primary-address argument.
# We catch that condition here and raise NotImplementedError since
# the requested semantics are not available - the caller can then
# use the network_get_primary_address() method instead.
if '--primary-address is currently required' in e.output.decode('UTF-8'):
raise NotImplementedError
raise
return yaml.safe_load(response) return yaml.safe_load(response)
@ -1140,3 +1165,42 @@ def meter_info():
"""Get the meter status information, if running in the meter-status-changed """Get the meter status information, if running in the meter-status-changed
hook.""" hook."""
return os.environ.get('JUJU_METER_INFO') return os.environ.get('JUJU_METER_INFO')
def iter_units_for_relation_name(relation_name):
"""Iterate through all units in a relation
Generator that iterates through all the units in a relation and yields
a named tuple with rid and unit field names.
Usage:
data = [(u.rid, u.unit)
for u in iter_units_for_relation_name(relation_name)]
:param relation_name: string relation name
:yield: Named Tuple with rid and unit field names
"""
RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
for rid in relation_ids(relation_name):
for unit in related_units(rid):
yield RelatedUnit(rid, unit)
def ingress_address(rid=None, unit=None):
"""
Retrieve the ingress-address from a relation when available. Otherwise,
return the private-address. This function is to be used on the consuming
side of the relation.
Usage:
addresses = [ingress_address(rid=u.rid, unit=u.unit)
for u in iter_units_for_relation_name(relation_name)]
:param rid: string relation id
:param unit: string unit name
:side effect: calls relation_get
:return: string IP address
"""
settings = relation_get(rid=rid, unit=unit)
return (settings.get('ingress-address') or
settings.get('private-address'))

View File

@ -61,13 +61,19 @@ def bytes_from_string(value):
if isinstance(value, six.string_types): if isinstance(value, six.string_types):
value = six.text_type(value) value = six.text_type(value)
else: else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value) msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg) raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value) matches = re.match("([0-9]+)([a-zA-Z]+)", value)
if not matches: if matches:
msg = "Unable to interpret string value '%s' as bytes" % (value) size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
raise ValueError(msg) else:
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) # Assume that value passed in is bytes
try:
size = int(value)
except ValueError:
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return size
class BasicStringComparator(object): class BasicStringComparator(object):

View File

@ -358,7 +358,7 @@ class Storage(object):
try: try:
yield self.revision yield self.revision
self.revision = None self.revision = None
except: except Exception:
self.flush(False) self.flush(False)
self.revision = None self.revision = None
raise raise

0
tests/gate-basic-xenial-pike Normal file → Executable file
View File

View File

@ -60,7 +60,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
commands = commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
[testenv:func27-dfs] [testenv:func27-dfs]
# Charm Functional Test # Charm Functional Test

View File

@ -186,6 +186,7 @@ class TestKeystoneContexts(CharmTestCase):
'service_ports': {'admin-port': ['12', '34'], 'service_ports': {'admin-port': ['12', '34'],
'public-port': ['12', '34']}, 'public-port': ['12', '34']},
'default_backend': '1.2.3.4', 'default_backend': '1.2.3.4',
'ipv6_enabled': True,
'frontends': {'1.2.3.4': { 'frontends': {'1.2.3.4': {
'network': '1.2.3.4/255.255.255.0', 'network': '1.2.3.4/255.255.255.0',
'backends': { 'backends': {