Sync libraries & common files prior to freeze
* charm-helpers sync for classic charms * charms.ceph sync for ceph charms * rebuild for reactive charms * sync tox.ini files as needed * sync requirements.txt files to sync to standard Change-Id: I7c3e0a7e77e16b9bf318ca22c39f8220c5144ac7
This commit is contained in:
parent
a32af1b98b
commit
8f45645505
@ -45,5 +45,6 @@ def openstack_upgrade():
|
|||||||
CONFIGS)):
|
CONFIGS)):
|
||||||
config_changed()
|
config_changed()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
openstack_upgrade()
|
openstack_upgrade()
|
||||||
|
@ -63,5 +63,6 @@ def main():
|
|||||||
config['neutron_config'] = dict(conf)
|
config['neutron_config'] = dict(conf)
|
||||||
return audits.action_parse_results(audits.run(config))
|
return audits.action_parse_results(audits.run(config))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
''' Helpers for interacting with OpenvSwitch '''
|
''' Helpers for interacting with OpenvSwitch '''
|
||||||
|
import collections
|
||||||
import hashlib
|
import hashlib
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -20,9 +21,9 @@ import six
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from charmhelpers import deprecate
|
from charmhelpers import deprecate
|
||||||
|
from charmhelpers.contrib.network.ovs import ovsdb as ch_ovsdb
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install
|
||||||
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
log, WARNING, INFO, DEBUG
|
log, WARNING, INFO, DEBUG
|
||||||
)
|
)
|
||||||
@ -592,3 +593,76 @@ def ovs_appctl(target, args):
|
|||||||
cmd = ['ovs-appctl', '-t', target]
|
cmd = ['ovs-appctl', '-t', target]
|
||||||
cmd.extend(args)
|
cmd.extend(args)
|
||||||
return subprocess.check_output(cmd, universal_newlines=True)
|
return subprocess.check_output(cmd, universal_newlines=True)
|
||||||
|
|
||||||
|
|
||||||
|
def uuid_for_port(port_name):
|
||||||
|
"""Get UUID of named port.
|
||||||
|
|
||||||
|
:param port_name: Name of port.
|
||||||
|
:type port_name: str
|
||||||
|
:returns: Port UUID.
|
||||||
|
:rtype: Optional[uuid.UUID]
|
||||||
|
"""
|
||||||
|
for port in ch_ovsdb.SimpleOVSDB(
|
||||||
|
'ovs-vsctl').port.find('name={}'.format(port_name)):
|
||||||
|
return port['_uuid']
|
||||||
|
|
||||||
|
|
||||||
|
def bridge_for_port(port_uuid):
|
||||||
|
"""Find which bridge a port is on.
|
||||||
|
|
||||||
|
:param port_uuid: UUID of port.
|
||||||
|
:type port_uuid: uuid.UUID
|
||||||
|
:returns: Name of bridge or None.
|
||||||
|
:rtype: Optional[str]
|
||||||
|
"""
|
||||||
|
for bridge in ch_ovsdb.SimpleOVSDB(
|
||||||
|
'ovs-vsctl').bridge:
|
||||||
|
# If there is a single port on a bridge the ports property will not be
|
||||||
|
# a list. ref: juju/charm-helpers#510
|
||||||
|
if (isinstance(bridge['ports'], list) and
|
||||||
|
port_uuid in bridge['ports'] or
|
||||||
|
port_uuid == bridge['ports']):
|
||||||
|
return bridge['name']
|
||||||
|
|
||||||
|
|
||||||
|
PatchPort = collections.namedtuple('PatchPort', ('bridge', 'port'))
|
||||||
|
Patch = collections.namedtuple('Patch', ('this_end', 'other_end'))
|
||||||
|
|
||||||
|
|
||||||
|
def patch_ports_on_bridge(bridge):
|
||||||
|
"""Find patch ports on a bridge.
|
||||||
|
|
||||||
|
:param bridge: Name of bridge
|
||||||
|
:type bridge: str
|
||||||
|
:returns: Iterator with bridge and port name for both ends of a patch.
|
||||||
|
:rtype: Iterator[Patch[PatchPort[str,str],PatchPort[str,str]]]
|
||||||
|
:raises: ValueError
|
||||||
|
"""
|
||||||
|
# On any given vSwitch there will be a small number of patch ports, so we
|
||||||
|
# start by iterating over ports with type `patch` then look up which bridge
|
||||||
|
# they belong to and act on any ports that match the criteria.
|
||||||
|
for interface in ch_ovsdb.SimpleOVSDB(
|
||||||
|
'ovs-vsctl').interface.find('type=patch'):
|
||||||
|
for port in ch_ovsdb.SimpleOVSDB(
|
||||||
|
'ovs-vsctl').port.find('name={}'.format(interface['name'])):
|
||||||
|
if bridge_for_port(port['_uuid']) == bridge:
|
||||||
|
this_end = PatchPort(bridge, port['name'])
|
||||||
|
other_end = PatchPort(bridge_for_port(
|
||||||
|
uuid_for_port(
|
||||||
|
interface['options']['peer'])),
|
||||||
|
interface['options']['peer'])
|
||||||
|
yield(Patch(this_end, other_end))
|
||||||
|
# We expect one result and it is ok if it turns out to be a port
|
||||||
|
# for a different bridge. However we need a break here to satisfy
|
||||||
|
# the for/else check which is in place to detect interface refering
|
||||||
|
# to non-existent port.
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise ValueError('Port for interface named "{}" does unexpectedly '
|
||||||
|
'not exist.'.format(interface['name']))
|
||||||
|
else:
|
||||||
|
# Allow our caller to handle no patch ports found gracefully, in
|
||||||
|
# reference to PEP479 just doing a return will provide a emtpy iterator
|
||||||
|
# and not None.
|
||||||
|
return
|
||||||
|
@ -36,6 +36,11 @@ class SimpleOVSDB(object):
|
|||||||
for br in ovsdb.bridge:
|
for br in ovsdb.bridge:
|
||||||
if br['name'] == 'br-test':
|
if br['name'] == 'br-test':
|
||||||
ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed')
|
ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed')
|
||||||
|
|
||||||
|
WARNING: If a list type field only have one item `ovs-vsctl` will present
|
||||||
|
it as a single item. Since we do not know the schema we have no way of
|
||||||
|
knowing what fields should be de-serialized as lists so the caller has
|
||||||
|
to be careful of checking the type of values returned from this library.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# For validation we keep a complete map of currently known good tool and
|
# For validation we keep a complete map of currently known good tool and
|
||||||
@ -157,6 +162,51 @@ class SimpleOVSDB(object):
|
|||||||
self._tool = tool
|
self._tool = tool
|
||||||
self._table = table
|
self._table = table
|
||||||
|
|
||||||
|
def _deserialize_ovsdb(self, data):
|
||||||
|
"""Deserialize OVSDB RFC7047 section 5.1 data.
|
||||||
|
|
||||||
|
:param data: Multidimensional list where first row contains RFC7047
|
||||||
|
type information
|
||||||
|
:type data: List[str,any]
|
||||||
|
:returns: Deserialized data.
|
||||||
|
:rtype: any
|
||||||
|
"""
|
||||||
|
# When using json formatted output to OVS commands Internal OVSDB
|
||||||
|
# notation may occur that require further deserializing.
|
||||||
|
# Reference: https://tools.ietf.org/html/rfc7047#section-5.1
|
||||||
|
ovs_type_cb_map = {
|
||||||
|
'uuid': uuid.UUID,
|
||||||
|
# NOTE: OVSDB sets have overloaded type
|
||||||
|
# see special handling below
|
||||||
|
'set': list,
|
||||||
|
'map': dict,
|
||||||
|
}
|
||||||
|
assert len(data) > 1, ('Invalid data provided, expecting list '
|
||||||
|
'with at least two elements.')
|
||||||
|
if data[0] == 'set':
|
||||||
|
# special handling for set
|
||||||
|
#
|
||||||
|
# it is either a list of strings or a list of typed lists.
|
||||||
|
# taste first element to see which it is
|
||||||
|
for el in data[1]:
|
||||||
|
# NOTE: We lock this handling down to the `uuid` type as
|
||||||
|
# that is the only one we have a practical example of.
|
||||||
|
# We could potentially just handle this generally based on
|
||||||
|
# the types listed in `ovs_type_cb_map` but let's open for
|
||||||
|
# that as soon as we have a concrete example to validate on
|
||||||
|
if isinstance(
|
||||||
|
el, list) and len(el) and el[0] == 'uuid':
|
||||||
|
decoded_set = []
|
||||||
|
for el in data[1]:
|
||||||
|
decoded_set.append(self._deserialize_ovsdb(el))
|
||||||
|
return(decoded_set)
|
||||||
|
# fall back to normal processing below
|
||||||
|
break
|
||||||
|
|
||||||
|
# Use map to deserialize data with fallback to `str`
|
||||||
|
f = ovs_type_cb_map.get(data[0], str)
|
||||||
|
return f(data[1])
|
||||||
|
|
||||||
def _find_tbl(self, condition=None):
|
def _find_tbl(self, condition=None):
|
||||||
"""Run and parse output of OVSDB `find` command.
|
"""Run and parse output of OVSDB `find` command.
|
||||||
|
|
||||||
@ -165,15 +215,6 @@ class SimpleOVSDB(object):
|
|||||||
:returns: Dictionary with data
|
:returns: Dictionary with data
|
||||||
:rtype: Dict[str, any]
|
:rtype: Dict[str, any]
|
||||||
"""
|
"""
|
||||||
# When using json formatted output to OVS commands Internal OVSDB
|
|
||||||
# notation may occur that require further deserializing.
|
|
||||||
# Reference: https://tools.ietf.org/html/rfc7047#section-5.1
|
|
||||||
ovs_type_cb_map = {
|
|
||||||
'uuid': uuid.UUID,
|
|
||||||
# FIXME sets also appear to sometimes contain type/value tuples
|
|
||||||
'set': list,
|
|
||||||
'map': dict,
|
|
||||||
}
|
|
||||||
cmd = [self._tool, '-f', 'json', 'find', self._table]
|
cmd = [self._tool, '-f', 'json', 'find', self._table]
|
||||||
if condition:
|
if condition:
|
||||||
cmd.append(condition)
|
cmd.append(condition)
|
||||||
@ -182,9 +223,8 @@ class SimpleOVSDB(object):
|
|||||||
for row in data['data']:
|
for row in data['data']:
|
||||||
values = []
|
values = []
|
||||||
for col in row:
|
for col in row:
|
||||||
if isinstance(col, list):
|
if isinstance(col, list) and len(col) > 1:
|
||||||
f = ovs_type_cb_map.get(col[0], str)
|
values.append(self._deserialize_ovsdb(col))
|
||||||
values.append(f(col[1]))
|
|
||||||
else:
|
else:
|
||||||
values.append(col)
|
values.append(col)
|
||||||
yield dict(zip(data['headings'], values))
|
yield dict(zip(data['headings'], values))
|
||||||
|
@ -29,6 +29,8 @@ from subprocess import check_call, CalledProcessError
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
import charmhelpers.contrib.storage.linux.ceph as ch_ceph
|
||||||
|
|
||||||
from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
|
from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
|
||||||
_config_ini as config_ini
|
_config_ini as config_ini
|
||||||
)
|
)
|
||||||
@ -56,6 +58,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
status_set,
|
status_set,
|
||||||
network_get_primary_address,
|
network_get_primary_address,
|
||||||
WARNING,
|
WARNING,
|
||||||
|
service_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.sysctl import create as sysctl_create
|
from charmhelpers.core.sysctl import create as sysctl_create
|
||||||
@ -808,6 +811,12 @@ class CephContext(OSContextGenerator):
|
|||||||
|
|
||||||
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
|
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
|
||||||
|
|
||||||
|
if config('pool-type') and config('pool-type') == 'erasure-coded':
|
||||||
|
base_pool_name = config('rbd-pool') or config('rbd-pool-name')
|
||||||
|
if not base_pool_name:
|
||||||
|
base_pool_name = service_name()
|
||||||
|
ctxt['rbd_default_data_pool'] = base_pool_name
|
||||||
|
|
||||||
if not os.path.isdir('/etc/ceph'):
|
if not os.path.isdir('/etc/ceph'):
|
||||||
os.mkdir('/etc/ceph')
|
os.mkdir('/etc/ceph')
|
||||||
|
|
||||||
@ -3175,3 +3184,90 @@ class SRIOVContext(OSContextGenerator):
|
|||||||
:rtype: Dict[str,int]
|
:rtype: Dict[str,int]
|
||||||
"""
|
"""
|
||||||
return self._map
|
return self._map
|
||||||
|
|
||||||
|
|
||||||
|
class CephBlueStoreCompressionContext(OSContextGenerator):
|
||||||
|
"""Ceph BlueStore compression options."""
|
||||||
|
|
||||||
|
# Tuple with Tuples that map configuration option name to CephBrokerRq op
|
||||||
|
# property name
|
||||||
|
options = (
|
||||||
|
('bluestore-compression-algorithm',
|
||||||
|
'compression-algorithm'),
|
||||||
|
('bluestore-compression-mode',
|
||||||
|
'compression-mode'),
|
||||||
|
('bluestore-compression-required-ratio',
|
||||||
|
'compression-required-ratio'),
|
||||||
|
('bluestore-compression-min-blob-size',
|
||||||
|
'compression-min-blob-size'),
|
||||||
|
('bluestore-compression-min-blob-size-hdd',
|
||||||
|
'compression-min-blob-size-hdd'),
|
||||||
|
('bluestore-compression-min-blob-size-ssd',
|
||||||
|
'compression-min-blob-size-ssd'),
|
||||||
|
('bluestore-compression-max-blob-size',
|
||||||
|
'compression-max-blob-size'),
|
||||||
|
('bluestore-compression-max-blob-size-hdd',
|
||||||
|
'compression-max-blob-size-hdd'),
|
||||||
|
('bluestore-compression-max-blob-size-ssd',
|
||||||
|
'compression-max-blob-size-ssd'),
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""Initialize context by loading values from charm config.
|
||||||
|
|
||||||
|
We keep two maps, one suitable for use with CephBrokerRq's and one
|
||||||
|
suitable for template generation.
|
||||||
|
"""
|
||||||
|
charm_config = config()
|
||||||
|
|
||||||
|
# CephBrokerRq op map
|
||||||
|
self.op = {}
|
||||||
|
# Context exposed for template generation
|
||||||
|
self.ctxt = {}
|
||||||
|
for config_key, op_key in self.options:
|
||||||
|
value = charm_config.get(config_key)
|
||||||
|
self.ctxt.update({config_key.replace('-', '_'): value})
|
||||||
|
self.op.update({op_key: value})
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
"""Get context.
|
||||||
|
|
||||||
|
:returns: Context
|
||||||
|
:rtype: Dict[str,any]
|
||||||
|
"""
|
||||||
|
return self.ctxt
|
||||||
|
|
||||||
|
def get_op(self):
|
||||||
|
"""Get values for use in CephBrokerRq op.
|
||||||
|
|
||||||
|
:returns: Context values with CephBrokerRq op property name as key.
|
||||||
|
:rtype: Dict[str,any]
|
||||||
|
"""
|
||||||
|
return self.op
|
||||||
|
|
||||||
|
def get_kwargs(self):
|
||||||
|
"""Get values for use as keyword arguments.
|
||||||
|
|
||||||
|
:returns: Context values with key suitable for use as kwargs to
|
||||||
|
CephBrokerRq add_op_create_*_pool methods.
|
||||||
|
:rtype: Dict[str,any]
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
k.replace('-', '_'): v
|
||||||
|
for k, v in self.op.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
def validate(self):
|
||||||
|
"""Validate options.
|
||||||
|
|
||||||
|
:raises: AssertionError
|
||||||
|
"""
|
||||||
|
# We slip in a dummy name on class instantiation to allow validation of
|
||||||
|
# the other options. It will not affect further use.
|
||||||
|
#
|
||||||
|
# NOTE: once we retire Python 3.5 we can fold this into a in-line
|
||||||
|
# dictionary comprehension in the call to the initializer.
|
||||||
|
dummy_op = {'name': 'dummy-name'}
|
||||||
|
dummy_op.update(self.op)
|
||||||
|
pool = ch_ceph.BasePool('dummy-service', op=dummy_op)
|
||||||
|
pool.validate()
|
||||||
|
@ -22,3 +22,7 @@ rbd default features = {{ rbd_features }}
|
|||||||
{{ key }} = {{ value }}
|
{{ key }} = {{ value }}
|
||||||
{% endfor -%}
|
{% endfor -%}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
|
{% if rbd_default_data_pool -%}
|
||||||
|
rbd default data pool = {{ rbd_default_data_pool }}
|
||||||
|
{% endif %}
|
||||||
|
@ -0,0 +1,28 @@
|
|||||||
|
{# section header omitted as options can belong to multiple sections #}
|
||||||
|
{% if bluestore_compression_algorithm -%}
|
||||||
|
bluestore compression algorithm = {{ bluestore_compression_algorithm }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if bluestore_compression_mode -%}
|
||||||
|
bluestore compression mode = {{ bluestore_compression_mode }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if bluestore_compression_required_ratio -%}
|
||||||
|
bluestore compression required ratio = {{ bluestore_compression_required_ratio }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if bluestore_compression_min_blob_size -%}
|
||||||
|
bluestore compression min blob size = {{ bluestore_compression_min_blob_size }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if bluestore_compression_min_blob_size_hdd -%}
|
||||||
|
bluestore compression min blob size hdd = {{ bluestore_compression_min_blob_size_hdd }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if bluestore_compression_min_blob_size_ssd -%}
|
||||||
|
bluestore compression min blob size ssd = {{ bluestore_compression_min_blob_size_ssd }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if bluestore_compression_max_blob_size -%}
|
||||||
|
bluestore compression max blob size = {{ bluestore_compression_max_blob_size }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if bluestore_compression_max_blob_size_hdd -%}
|
||||||
|
bluestore compression max blob size hdd = {{ bluestore_compression_max_blob_size_hdd }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if bluestore_compression_max_blob_size_ssd -%}
|
||||||
|
bluestore compression max blob size ssd = {{ bluestore_compression_max_blob_size_ssd }}
|
||||||
|
{% endif -%}
|
File diff suppressed because it is too large
Load Diff
@ -876,12 +876,12 @@ class NeutronApiApiPasteContext(context.OSContextGenerator):
|
|||||||
t, n, c = [m.get(v) for v in ['type', 'name', 'config']]
|
t, n, c = [m.get(v) for v in ['type', 'name', 'config']]
|
||||||
# note that dict has to be non-empty
|
# note that dict has to be non-empty
|
||||||
if not types_valid(t, n, c):
|
if not types_valid(t, n, c):
|
||||||
raise ValueError('Extra middleware key type(s) are'
|
raise ValueError('Extra middleware key type(s) are'
|
||||||
' invalid: {}'.format(repr(m)))
|
' invalid: {}'.format(repr(m)))
|
||||||
if not mtype_valid(t):
|
if not mtype_valid(t):
|
||||||
raise ValueError('Extra middleware type key is not'
|
raise ValueError('Extra middleware type key is not'
|
||||||
' a valid PasteDeploy middleware '
|
' a valid PasteDeploy middleware '
|
||||||
'type {}'.format(repr(t)))
|
'type {}'.format(repr(t)))
|
||||||
if not c:
|
if not c:
|
||||||
raise ValueError('Extra middleware config dictionary'
|
raise ValueError('Extra middleware config dictionary'
|
||||||
' is empty')
|
' is empty')
|
||||||
@ -893,7 +893,7 @@ class NeutronApiApiPasteContext(context.OSContextGenerator):
|
|||||||
if rdata_middleware:
|
if rdata_middleware:
|
||||||
try:
|
try:
|
||||||
middleware = ast.literal_eval(rdata_middleware)
|
middleware = ast.literal_eval(rdata_middleware)
|
||||||
except:
|
except Exception:
|
||||||
import traceback
|
import traceback
|
||||||
log(traceback.format_exc())
|
log(traceback.format_exc())
|
||||||
raise ValueError('Invalid extra middleware data'
|
raise ValueError('Invalid extra middleware data'
|
||||||
|
@ -877,8 +877,10 @@ def router_feature_present(feature):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
l3ha_router_present = partial(router_feature_present, feature='ha')
|
l3ha_router_present = partial(router_feature_present, feature='ha')
|
||||||
|
|
||||||
|
|
||||||
dvr_router_present = partial(router_feature_present, feature='distributed')
|
dvr_router_present = partial(router_feature_present, feature='distributed')
|
||||||
|
|
||||||
|
|
||||||
@ -892,7 +894,7 @@ def neutron_ready():
|
|||||||
neutron_client.list_routers()
|
neutron_client.list_routers()
|
||||||
log('neutron client ready')
|
log('neutron client ready')
|
||||||
return True
|
return True
|
||||||
except:
|
except Exception:
|
||||||
log('neutron query failed, neutron not ready ')
|
log('neutron query failed, neutron not ready ')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -953,7 +955,7 @@ def check_optional_relations(configs):
|
|||||||
if relation_ids('ha'):
|
if relation_ids('ha'):
|
||||||
try:
|
try:
|
||||||
get_hacluster_config()
|
get_hacluster_config()
|
||||||
except:
|
except Exception:
|
||||||
return ('blocked',
|
return ('blocked',
|
||||||
'hacluster missing configuration: '
|
'hacluster missing configuration: '
|
||||||
'vip, vip_iface, vip_cidr')
|
'vip, vip_iface, vip_cidr')
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
# requirements. They are intertwined. Also, Zaza itself should specify
|
# requirements. They are intertwined. Also, Zaza itself should specify
|
||||||
# all of its own requirements and if it doesn't, fix it there.
|
# all of its own requirements and if it doesn't, fix it there.
|
||||||
#
|
#
|
||||||
|
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
|
||||||
pbr>=1.8.0,<1.9.0
|
pbr>=1.8.0,<1.9.0
|
||||||
simplejson>=2.2.0
|
simplejson>=2.2.0
|
||||||
netifaces>=0.10.4
|
netifaces>=0.10.4
|
||||||
|
@ -7,10 +7,11 @@
|
|||||||
# requirements. They are intertwined. Also, Zaza itself should specify
|
# requirements. They are intertwined. Also, Zaza itself should specify
|
||||||
# all of its own requirements and if it doesn't, fix it there.
|
# all of its own requirements and if it doesn't, fix it there.
|
||||||
#
|
#
|
||||||
|
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
|
||||||
charm-tools>=2.4.4
|
charm-tools>=2.4.4
|
||||||
requests>=2.18.4
|
requests>=2.18.4
|
||||||
mock>=1.2
|
mock>=1.2
|
||||||
flake8>=2.2.4,<=2.4.1
|
flake8>=2.2.4
|
||||||
stestr>=2.2.0
|
stestr>=2.2.0
|
||||||
coverage>=4.5.2
|
coverage>=4.5.2
|
||||||
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
|
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
|
||||||
|
2
tox.ini
2
tox.ini
@ -116,5 +116,5 @@ commands =
|
|||||||
functest-run-suite --keep-model --bundle {posargs}
|
functest-run-suite --keep-model --bundle {posargs}
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
ignore = E402,E226
|
ignore = E402,E226,W503,W504
|
||||||
exclude = */charmhelpers
|
exclude = */charmhelpers
|
||||||
|
@ -101,9 +101,9 @@ class TestConfig(object):
|
|||||||
return self.config
|
return self.config
|
||||||
|
|
||||||
def set(self, attr, value):
|
def set(self, attr, value):
|
||||||
if attr not in self.config:
|
if attr not in self.config:
|
||||||
raise KeyError
|
raise KeyError
|
||||||
self.config[attr] = value
|
self.config[attr] = value
|
||||||
|
|
||||||
|
|
||||||
class TestRelation(object):
|
class TestRelation(object):
|
||||||
|
Loading…
Reference in New Issue
Block a user