Bump hacking
hacking 3.0.x is quite old. Bump it to the current latest version. Change-Id: I8d87fed6afe5988678c64090af261266d1ca20e6
This commit is contained in:
parent
a9dc3794a6
commit
566a830f64
@ -1,6 +1,3 @@
|
|||||||
# The order of packages is significant, because pip processes them in the order
|
|
||||||
# of appearance. Changing the order has an impact on the overall integration
|
|
||||||
# process, which may cause wedges in the gate later.
|
|
||||||
openstackdocstheme>=2.2.1 # Apache-2.0
|
openstackdocstheme>=2.2.1 # Apache-2.0
|
||||||
sphinx>=2.0.0,!=2.1.0 # BSD
|
sphinx>=2.0.0,!=2.1.0 # BSD
|
||||||
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
# The order of packages is significant, because pip processes them in the order
|
# Requirements lower bounds listed here are our best effort to keep them up to
|
||||||
# of appearance. Changing the order has an impact on the overall integration
|
# date but we do not test them so no guarantee of having them all correct. If
|
||||||
# process, which may cause wedges in the gate later.
|
# you find any incorrect lower bounds, let us know or propose a fix.
|
||||||
|
|
||||||
apscheduler>=3.5.1 # MIT License
|
apscheduler>=3.5.1 # MIT License
|
||||||
jsonpatch>=1.21 # BSD
|
jsonpatch>=1.21 # BSD
|
||||||
keystoneauth1>=3.4.0 # Apache-2.0
|
keystoneauth1>=3.4.0 # Apache-2.0
|
||||||
|
@ -1,11 +1,7 @@
|
|||||||
# The order of packages is significant, because pip processes them in the order
|
|
||||||
# of appearance. Changing the order has an impact on the overall integration
|
|
||||||
# process, which may cause wedges in the gate later.
|
|
||||||
|
|
||||||
coverage>=4.5.1 # Apache-2.0
|
coverage>=4.5.1 # Apache-2.0
|
||||||
doc8>=0.8.0 # Apache-2.0
|
doc8>=0.8.0 # Apache-2.0
|
||||||
freezegun>=0.3.10 # Apache-2.0
|
freezegun>=0.3.10 # Apache-2.0
|
||||||
hacking>=3.0.1,<3.1.0 # Apache-2.0
|
hacking>=7.0.0,<7.1.0 # Apache-2.0
|
||||||
oslotest>=3.3.0 # Apache-2.0
|
oslotest>=3.3.0 # Apache-2.0
|
||||||
testscenarios>=0.5.0 # Apache-2.0/BSD
|
testscenarios>=0.5.0 # Apache-2.0/BSD
|
||||||
testtools>=2.3.0 # MIT
|
testtools>=2.3.0 # MIT
|
||||||
|
@ -153,7 +153,7 @@ class CinderHelper(object):
|
|||||||
final_status = ('success', 'error')
|
final_status = ('success', 'error')
|
||||||
while getattr(volume, 'migration_status') not in final_status:
|
while getattr(volume, 'migration_status') not in final_status:
|
||||||
volume = self.get_volume(volume.id)
|
volume = self.get_volume(volume.id)
|
||||||
LOG.debug('Waiting the migration of {0}'.format(volume))
|
LOG.debug('Waiting the migration of %s', volume)
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
if getattr(volume, 'migration_status') == 'error':
|
if getattr(volume, 'migration_status') == 'error':
|
||||||
host_name = getattr(volume, 'os-vol-host-attr:host')
|
host_name = getattr(volume, 'os-vol-host-attr:host')
|
||||||
@ -230,7 +230,7 @@ class CinderHelper(object):
|
|||||||
availability_zone=getattr(volume, 'availability_zone'))
|
availability_zone=getattr(volume, 'availability_zone'))
|
||||||
while getattr(new_volume, 'status') != 'available' and retry:
|
while getattr(new_volume, 'status') != 'available' and retry:
|
||||||
new_volume = cinder.volumes.get(new_volume.id)
|
new_volume = cinder.volumes.get(new_volume.id)
|
||||||
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
|
LOG.debug('Waiting volume creation of %s', new_volume)
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
LOG.debug("retry count: %s", retry)
|
LOG.debug("retry count: %s", retry)
|
||||||
|
@ -292,9 +292,7 @@ class NovaHelper(object):
|
|||||||
'OS-EXT-STS:vm_state') != 'resized' \
|
'OS-EXT-STS:vm_state') != 'resized' \
|
||||||
and retry:
|
and retry:
|
||||||
instance = self.nova.servers.get(instance.id)
|
instance = self.nova.servers.get(instance.id)
|
||||||
LOG.debug(
|
LOG.debug('Waiting the resize of %s to %s', instance, flavor_id)
|
||||||
'Waiting the resize of {0} to {1}'.format(
|
|
||||||
instance, flavor_id))
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
|
|
||||||
@ -349,8 +347,7 @@ class NovaHelper(object):
|
|||||||
if dest_hostname is None:
|
if dest_hostname is None:
|
||||||
while (instance.status not in ['ACTIVE', 'ERROR'] and retry):
|
while (instance.status not in ['ACTIVE', 'ERROR'] and retry):
|
||||||
instance = self.nova.servers.get(instance.id)
|
instance = self.nova.servers.get(instance.id)
|
||||||
LOG.debug(
|
LOG.debug('Waiting the migration of %s', instance.id)
|
||||||
'Waiting the migration of {0}'.format(instance.id))
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||||
@ -371,11 +368,9 @@ class NovaHelper(object):
|
|||||||
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
||||||
LOG.debug("Instance task state: %s is null", instance_id)
|
LOG.debug("Instance task state: %s is null", instance_id)
|
||||||
break
|
break
|
||||||
LOG.debug(
|
LOG.debug('Waiting the migration of %s to %s',
|
||||||
'Waiting the migration of {0} to {1}'.format(
|
instance,
|
||||||
instance,
|
getattr(instance, 'OS-EXT-SRV-ATTR:host'))
|
||||||
getattr(instance,
|
|
||||||
'OS-EXT-SRV-ATTR:host')))
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
|
|
||||||
@ -725,7 +720,7 @@ class NovaHelper(object):
|
|||||||
instance_id, old_volume.id, new_volume.id)
|
instance_id, old_volume.id, new_volume.id)
|
||||||
while getattr(new_volume, 'status') != 'in-use' and retry:
|
while getattr(new_volume, 'status') != 'in-use' and retry:
|
||||||
new_volume = self.cinder.volumes.get(new_volume.id)
|
new_volume = self.cinder.volumes.get(new_volume.id)
|
||||||
LOG.debug('Waiting volume update to {0}'.format(new_volume))
|
LOG.debug('Waiting volume update to %s', new_volume)
|
||||||
time.sleep(retry_interval)
|
time.sleep(retry_interval)
|
||||||
retry -= 1
|
retry -= 1
|
||||||
LOG.debug("retry count: %s", retry)
|
LOG.debug("retry count: %s", retry)
|
||||||
|
@ -91,8 +91,8 @@ class DataSourceBase(object):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
self.query_retry_reset(e)
|
self.query_retry_reset(e)
|
||||||
LOG.warning("Retry {0} of {1} while retrieving metrics retry "
|
LOG.warning("Retry %d of %d while retrieving metrics retry "
|
||||||
"in {2} seconds".format(i+1, num_retries, timeout))
|
"in %d seconds", i+1, num_retries, timeout)
|
||||||
time.sleep(timeout)
|
time.sleep(timeout)
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
|
@ -90,8 +90,8 @@ class GnocchiHelper(base.DataSourceBase):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
if not resources:
|
if not resources:
|
||||||
LOG.warning("The {0} resource {1} could not be "
|
LOG.warning("The %s resource %s could not be found",
|
||||||
"found".format(self.NAME, resource_id))
|
self.NAME, resource_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
resource_id = resources[0]['id']
|
resource_id = resources[0]['id']
|
||||||
@ -99,7 +99,7 @@ class GnocchiHelper(base.DataSourceBase):
|
|||||||
if meter_name == "instance_cpu_usage":
|
if meter_name == "instance_cpu_usage":
|
||||||
if resource_type != "instance":
|
if resource_type != "instance":
|
||||||
LOG.warning("Unsupported resource type for metric "
|
LOG.warning("Unsupported resource type for metric "
|
||||||
"'instance_cpu_usage': ", resource_type)
|
"'instance_cpu_usage': %s", resource_type)
|
||||||
return
|
return
|
||||||
|
|
||||||
# The "cpu_util" gauge (percentage) metric has been removed.
|
# The "cpu_util" gauge (percentage) metric has been removed.
|
||||||
@ -172,8 +172,8 @@ class GnocchiHelper(base.DataSourceBase):
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
if not resources:
|
if not resources:
|
||||||
LOG.warning("The {0} resource {1} could not be "
|
LOG.warning("The %s resource %s could not be found",
|
||||||
"found".format(self.NAME, resource_id))
|
self.NAME, resource_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
resource_id = resources[0]['id']
|
resource_id = resources[0]['id']
|
||||||
|
@ -158,8 +158,9 @@ class GrafanaHelper(base.DataSourceBase):
|
|||||||
try:
|
try:
|
||||||
self.METRIC_MAP[meter_name]
|
self.METRIC_MAP[meter_name]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error("Metric: {0} does not appear in the current Grafana "
|
LOG.error(
|
||||||
"metric map".format(meter_name))
|
"Metric: %s does not appear in the current Grafana metric map",
|
||||||
|
meter_name)
|
||||||
raise exception.MetricNotAvailable(metric=meter_name)
|
raise exception.MetricNotAvailable(metric=meter_name)
|
||||||
|
|
||||||
db = self.METRIC_MAP[meter_name]['db']
|
db = self.METRIC_MAP[meter_name]['db']
|
||||||
@ -184,7 +185,7 @@ class GrafanaHelper(base.DataSourceBase):
|
|||||||
|
|
||||||
resp = self.query_retry(self._request, **kwargs)
|
resp = self.query_retry(self._request, **kwargs)
|
||||||
if not resp:
|
if not resp:
|
||||||
LOG.warning("Datasource {0} is not available.".format(self.NAME))
|
LOG.warning("Datasource %s is not available.", self.NAME)
|
||||||
return
|
return
|
||||||
|
|
||||||
result = translator.extract_result(resp.content)
|
result = translator.extract_result(resp.content)
|
||||||
|
@ -57,8 +57,8 @@ class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
|
|||||||
resource = self._extract_attribute(
|
resource = self._extract_attribute(
|
||||||
data['resource'], data['attribute'])
|
data['resource'], data['attribute'])
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
LOG.error("Resource: {0} does not contain attribute {1}".format(
|
LOG.error("Resource: %s does not contain attribute %s",
|
||||||
data['resource'], data['attribute']))
|
data['resource'], data['attribute'])
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Granularity is optional if it is None the minimal value for InfluxDB
|
# Granularity is optional if it is None the minimal value for InfluxDB
|
||||||
@ -82,7 +82,7 @@ class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
|
|||||||
index_aggregate = result['columns'].index(self._data['aggregate'])
|
index_aggregate = result['columns'].index(self._data['aggregate'])
|
||||||
return result['values'][0][index_aggregate]
|
return result['values'][0][index_aggregate]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.error("Could not extract {0} for the resource: {1}".format(
|
LOG.error("Could not extract %s for the resource: %s",
|
||||||
self._data['metric'], self._data['resource']))
|
self._data['metric'], self._data['resource'])
|
||||||
raise exception.NoSuchMetricForHost(
|
raise exception.NoSuchMetricForHost(
|
||||||
metric=self._data['metric'], host=self._data['resource'])
|
metric=self._data['metric'], host=self._data['resource'])
|
||||||
|
@ -37,8 +37,8 @@ class DataSourceManager(object):
|
|||||||
(mon.MonascaHelper.NAME, mon.MonascaHelper.METRIC_MAP),
|
(mon.MonascaHelper.NAME, mon.MonascaHelper.METRIC_MAP),
|
||||||
(graf.GrafanaHelper.NAME, graf.GrafanaHelper.METRIC_MAP),
|
(graf.GrafanaHelper.NAME, graf.GrafanaHelper.METRIC_MAP),
|
||||||
])
|
])
|
||||||
"""Dictionary with all possible datasources, dictionary order is the default
|
"""Dictionary with all possible datasources, dictionary order is
|
||||||
order for attempting to use datasources
|
the default order for attempting to use datasources
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, config=None, osc=None):
|
def __init__(self, config=None, osc=None):
|
||||||
@ -127,8 +127,9 @@ class DataSourceManager(object):
|
|||||||
if (metric not in self.metric_map[datasource] or
|
if (metric not in self.metric_map[datasource] or
|
||||||
self.metric_map[datasource].get(metric) is None):
|
self.metric_map[datasource].get(metric) is None):
|
||||||
no_metric = True
|
no_metric = True
|
||||||
LOG.warning("Datasource: {0} could not be used due to "
|
LOG.warning(
|
||||||
"metric: {1}".format(datasource, metric))
|
"Datasource: %s could not be used due to metric: %s",
|
||||||
|
datasource, metric)
|
||||||
break
|
break
|
||||||
if not no_metric:
|
if not no_metric:
|
||||||
# Try to use a specific datasource but attempt additional
|
# Try to use a specific datasource but attempt additional
|
||||||
|
@ -216,9 +216,9 @@ class BaseModelBuilder(object):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(e)
|
LOG.exception(e)
|
||||||
self.call_retry_reset(e)
|
self.call_retry_reset(e)
|
||||||
LOG.warning("Retry {0} of {1}, error while calling service "
|
LOG.warning("Retry %d of %d, error while calling service "
|
||||||
"retry in {2} seconds".format(i+1, num_retries,
|
"retry in %s seconds",
|
||||||
timeout))
|
i+1, num_retries, timeout)
|
||||||
time.sleep(timeout)
|
time.sleep(timeout)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -274,7 +274,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
|||||||
instances = getattr(node_info, "servers", None)
|
instances = getattr(node_info, "servers", None)
|
||||||
# Do not submit job if there are no instances on compute node
|
# Do not submit job if there are no instances on compute node
|
||||||
if instances is None:
|
if instances is None:
|
||||||
LOG.info("No instances on compute_node: {0}".format(node_info))
|
LOG.info("No instances on compute_node: %s", node_info)
|
||||||
return
|
return
|
||||||
future_instances.append(
|
future_instances.append(
|
||||||
self.executor.submit(
|
self.executor.submit(
|
||||||
@ -330,7 +330,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
|||||||
self.nova_helper.get_compute_node_by_name,
|
self.nova_helper.get_compute_node_by_name,
|
||||||
node, servers=True, detailed=True)
|
node, servers=True, detailed=True)
|
||||||
for node in compute_nodes]
|
for node in compute_nodes]
|
||||||
LOG.debug("submitted {0} jobs".format(len(compute_nodes)))
|
LOG.debug("submitted %d jobs", len(compute_nodes))
|
||||||
|
|
||||||
# Futures will concurrently be added, only safe with CPython GIL
|
# Futures will concurrently be added, only safe with CPython GIL
|
||||||
future_instances = []
|
future_instances = []
|
||||||
@ -427,7 +427,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
|||||||
|
|
||||||
def add_instance_node(self, node, instances):
|
def add_instance_node(self, node, instances):
|
||||||
if instances is None:
|
if instances is None:
|
||||||
LOG.info("no instances on compute_node: {0}".format(node))
|
LOG.info("no instances on compute_node: %s", node)
|
||||||
return
|
return
|
||||||
host = node.service["host"]
|
host = node.service["host"]
|
||||||
compute_node = self.model.get_node_by_uuid(node.id)
|
compute_node = self.model.get_node_by_uuid(node.id)
|
||||||
|
@ -180,8 +180,8 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
|
|||||||
|
|
||||||
for potential_noisy_instance in (
|
for potential_noisy_instance in (
|
||||||
instance_priority_list_reverse):
|
instance_priority_list_reverse):
|
||||||
if(potential_noisy_instance ==
|
if (potential_noisy_instance ==
|
||||||
potential_priority_instance):
|
potential_priority_instance):
|
||||||
loop_break_flag = True
|
loop_break_flag = True
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ class UniformAirflow(base.BaseStrategy):
|
|||||||
host = nodemap['node']
|
host = nodemap['node']
|
||||||
if 'cores_used' not in nodemap:
|
if 'cores_used' not in nodemap:
|
||||||
# calculate the available resources
|
# calculate the available resources
|
||||||
nodemap['cores_used'], nodemap['mem_used'],\
|
nodemap['cores_used'], nodemap['mem_used'], \
|
||||||
nodemap['disk_used'] = self.calculate_used_resource(
|
nodemap['disk_used'] = self.calculate_used_resource(
|
||||||
host)
|
host)
|
||||||
cores_available = (host.vcpus -
|
cores_available = (host.vcpus -
|
||||||
|
@ -98,7 +98,7 @@ def no_translate_debug_logs(logical_line, filename):
|
|||||||
"""
|
"""
|
||||||
for hint in _all_hints:
|
for hint in _all_hints:
|
||||||
if logical_line.startswith("LOG.debug(%s(" % hint):
|
if logical_line.startswith("LOG.debug(%s(" % hint):
|
||||||
yield(0, "N319 Don't translate debug level logs")
|
yield (0, "N319 Don't translate debug level logs")
|
||||||
|
|
||||||
|
|
||||||
@flake8ext
|
@flake8ext
|
||||||
@ -128,21 +128,21 @@ def check_assert_called_once_with(logical_line, filename):
|
|||||||
@flake8ext
|
@flake8ext
|
||||||
def check_python3_xrange(logical_line):
|
def check_python3_xrange(logical_line):
|
||||||
if re.search(r"\bxrange\s*\(", logical_line):
|
if re.search(r"\bxrange\s*\(", logical_line):
|
||||||
yield(0, "N325: Do not use xrange. Use range for large loops.")
|
yield (0, "N325: Do not use xrange. Use range for large loops.")
|
||||||
|
|
||||||
|
|
||||||
@flake8ext
|
@flake8ext
|
||||||
def check_no_basestring(logical_line):
|
def check_no_basestring(logical_line):
|
||||||
if re.search(r"\bbasestring\b", logical_line):
|
if re.search(r"\bbasestring\b", logical_line):
|
||||||
msg = ("N326: basestring is not Python3-compatible, use str instead.")
|
msg = ("N326: basestring is not Python3-compatible, use str instead.")
|
||||||
yield(0, msg)
|
yield (0, msg)
|
||||||
|
|
||||||
|
|
||||||
@flake8ext
|
@flake8ext
|
||||||
def check_python3_no_iteritems(logical_line):
|
def check_python3_no_iteritems(logical_line):
|
||||||
if re.search(r".*\.iteritems\(\)", logical_line):
|
if re.search(r".*\.iteritems\(\)", logical_line):
|
||||||
msg = ("N327: Use dict.items() instead of dict.iteritems().")
|
msg = ("N327: Use dict.items() instead of dict.iteritems().")
|
||||||
yield(0, msg)
|
yield (0, msg)
|
||||||
|
|
||||||
|
|
||||||
@flake8ext
|
@flake8ext
|
||||||
@ -282,7 +282,7 @@ def no_redundant_import_alias(logical_line):
|
|||||||
N342
|
N342
|
||||||
"""
|
"""
|
||||||
if re.match(re_redundant_import_alias, logical_line):
|
if re.match(re_redundant_import_alias, logical_line):
|
||||||
yield(0, "N342: No redundant import alias.")
|
yield (0, "N342: No redundant import alias.")
|
||||||
|
|
||||||
|
|
||||||
@flake8ext
|
@flake8ext
|
||||||
|
@ -169,7 +169,7 @@ class ActionPlan(base.WatcherPersistentObject, base.WatcherObject,
|
|||||||
:param action_plan_id: the id *or* uuid of a action_plan.
|
:param action_plan_id: the id *or* uuid of a action_plan.
|
||||||
:param eager: Load object fields if True (Default: False)
|
:param eager: Load object fields if True (Default: False)
|
||||||
:returns: a :class:`Action` object.
|
:returns: a :class:`Action` object.
|
||||||
"""
|
""" # noqa: E501
|
||||||
if utils.is_int_like(action_plan_id):
|
if utils.is_int_like(action_plan_id):
|
||||||
return cls.get_by_id(context, action_plan_id, eager=eager)
|
return cls.get_by_id(context, action_plan_id, eager=eager)
|
||||||
elif utils.is_uuid_like(action_plan_id):
|
elif utils.is_uuid_like(action_plan_id):
|
||||||
@ -184,7 +184,7 @@ class ActionPlan(base.WatcherPersistentObject, base.WatcherObject,
|
|||||||
:param action_plan_id: the id of a action_plan.
|
:param action_plan_id: the id of a action_plan.
|
||||||
:param eager: Load object fields if True (Default: False)
|
:param eager: Load object fields if True (Default: False)
|
||||||
:returns: a :class:`ActionPlan` object.
|
:returns: a :class:`ActionPlan` object.
|
||||||
"""
|
""" # noqa: E501
|
||||||
db_action_plan = cls.dbapi.get_action_plan_by_id(
|
db_action_plan = cls.dbapi.get_action_plan_by_id(
|
||||||
context, action_plan_id, eager=eager)
|
context, action_plan_id, eager=eager)
|
||||||
action_plan = cls._from_db_object(
|
action_plan = cls._from_db_object(
|
||||||
@ -199,7 +199,7 @@ class ActionPlan(base.WatcherPersistentObject, base.WatcherObject,
|
|||||||
:param context: Security context
|
:param context: Security context
|
||||||
:param eager: Load object fields if True (Default: False)
|
:param eager: Load object fields if True (Default: False)
|
||||||
:returns: a :class:`ActionPlan` object.
|
:returns: a :class:`ActionPlan` object.
|
||||||
"""
|
""" # noqa: E501
|
||||||
db_action_plan = cls.dbapi.get_action_plan_by_uuid(
|
db_action_plan = cls.dbapi.get_action_plan_by_uuid(
|
||||||
context, uuid, eager=eager)
|
context, uuid, eager=eager)
|
||||||
action_plan = cls._from_db_object(
|
action_plan = cls._from_db_object(
|
||||||
|
@ -435,9 +435,10 @@ class TestListAction(api_base.FunctionalTest):
|
|||||||
self.assertIn('links', response.keys())
|
self.assertIn('links', response.keys())
|
||||||
self.assertEqual(2, len(response['links']))
|
self.assertEqual(2, len(response['links']))
|
||||||
self.assertIn(uuid, response['links'][0]['href'])
|
self.assertIn(uuid, response['links'][0]['href'])
|
||||||
for l in response['links']:
|
for link in response['links']:
|
||||||
bookmark = l['rel'] == 'bookmark'
|
bookmark = link['rel'] == 'bookmark'
|
||||||
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
|
self.assertTrue(self.validate_link(
|
||||||
|
link['href'], bookmark=bookmark))
|
||||||
|
|
||||||
def test_collection_links(self):
|
def test_collection_links(self):
|
||||||
parents = None
|
parents = None
|
||||||
|
@ -270,9 +270,10 @@ class TestListActionPlan(api_base.FunctionalTest):
|
|||||||
self.assertIn('links', response.keys())
|
self.assertIn('links', response.keys())
|
||||||
self.assertEqual(2, len(response['links']))
|
self.assertEqual(2, len(response['links']))
|
||||||
self.assertIn(uuid, response['links'][0]['href'])
|
self.assertIn(uuid, response['links'][0]['href'])
|
||||||
for l in response['links']:
|
for link in response['links']:
|
||||||
bookmark = l['rel'] == 'bookmark'
|
bookmark = link['rel'] == 'bookmark'
|
||||||
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
|
self.assertTrue(self.validate_link(
|
||||||
|
link['href'], bookmark=bookmark))
|
||||||
|
|
||||||
def test_collection_links(self):
|
def test_collection_links(self):
|
||||||
for id_ in range(5):
|
for id_ in range(5):
|
||||||
|
@ -216,9 +216,10 @@ class TestListAuditTemplate(FunctionalTestWithSetup):
|
|||||||
self.assertIn('links', response.keys())
|
self.assertIn('links', response.keys())
|
||||||
self.assertEqual(2, len(response['links']))
|
self.assertEqual(2, len(response['links']))
|
||||||
self.assertIn(uuid, response['links'][0]['href'])
|
self.assertIn(uuid, response['links'][0]['href'])
|
||||||
for l in response['links']:
|
for link in response['links']:
|
||||||
bookmark = l['rel'] == 'bookmark'
|
bookmark = link['rel'] == 'bookmark'
|
||||||
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
|
self.assertTrue(
|
||||||
|
self.validate_link(link['href'], bookmark=bookmark))
|
||||||
|
|
||||||
def test_collection_links(self):
|
def test_collection_links(self):
|
||||||
for id_ in range(5):
|
for id_ in range(5):
|
||||||
|
@ -237,9 +237,10 @@ class TestListAudit(api_base.FunctionalTest):
|
|||||||
self.assertIn('links', response.keys())
|
self.assertIn('links', response.keys())
|
||||||
self.assertEqual(2, len(response['links']))
|
self.assertEqual(2, len(response['links']))
|
||||||
self.assertIn(uuid, response['links'][0]['href'])
|
self.assertIn(uuid, response['links'][0]['href'])
|
||||||
for l in response['links']:
|
for link in response['links']:
|
||||||
bookmark = l['rel'] == 'bookmark'
|
bookmark = link['rel'] == 'bookmark'
|
||||||
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
|
self.assertTrue(
|
||||||
|
self.validate_link(link['href'], bookmark=bookmark))
|
||||||
|
|
||||||
def test_collection_links(self):
|
def test_collection_links(self):
|
||||||
for id_ in range(5):
|
for id_ in range(5):
|
||||||
|
@ -70,7 +70,7 @@ def create_test_audit_template(**kwargs):
|
|||||||
:param kwargs: kwargsargs with overriding values for audit template's
|
:param kwargs: kwargsargs with overriding values for audit template's
|
||||||
attributes.
|
attributes.
|
||||||
:returns: Test AuditTemplate DB object.
|
:returns: Test AuditTemplate DB object.
|
||||||
"""
|
""" # noqa: E501
|
||||||
audit_template = get_test_audit_template(**kwargs)
|
audit_template = get_test_audit_template(**kwargs)
|
||||||
# Let DB generate ID if it isn't specified explicitly
|
# Let DB generate ID if it isn't specified explicitly
|
||||||
if 'id' not in kwargs:
|
if 'id' not in kwargs:
|
||||||
|
@ -67,7 +67,7 @@ class TestNoisyNeighbor(TestBaseStrategy):
|
|||||||
self.m_c_model.return_value = model
|
self.m_c_model.return_value = model
|
||||||
node_uuid = 'Node_1'
|
node_uuid = 'Node_1'
|
||||||
n1, n2 = self.strategy.group_hosts()
|
n1, n2 = self.strategy.group_hosts()
|
||||||
self.assertTrue(node_uuid in n1)
|
self.assertIn(node_uuid, n1)
|
||||||
self.assertEqual(n1[node_uuid]['priority_vm'].uuid, 'INSTANCE_3')
|
self.assertEqual(n1[node_uuid]['priority_vm'].uuid, 'INSTANCE_3')
|
||||||
self.assertEqual(n1[node_uuid]['noisy_vm'].uuid, 'INSTANCE_4')
|
self.assertEqual(n1[node_uuid]['noisy_vm'].uuid, 'INSTANCE_4')
|
||||||
self.assertEqual('Node_0', n2[0].uuid)
|
self.assertEqual('Node_0', n2[0].uuid)
|
||||||
|
@ -464,11 +464,11 @@ class TestZoneMigration(TestBaseStrategy):
|
|||||||
}
|
}
|
||||||
filters = self.strategy.get_priority_filter_list()
|
filters = self.strategy.get_priority_filter_list()
|
||||||
self.assertIn(strategies.zone_migration.ComputeHostSortFilter,
|
self.assertIn(strategies.zone_migration.ComputeHostSortFilter,
|
||||||
map(lambda l: l.__class__, filters))
|
map(lambda l: l.__class__, filters)) # noqa: E741
|
||||||
self.assertIn(strategies.zone_migration.StorageHostSortFilter,
|
self.assertIn(strategies.zone_migration.StorageHostSortFilter,
|
||||||
map(lambda l: l.__class__, filters))
|
map(lambda l: l.__class__, filters)) # noqa: E741
|
||||||
self.assertIn(strategies.zone_migration.ProjectSortFilter,
|
self.assertIn(strategies.zone_migration.ProjectSortFilter,
|
||||||
map(lambda l: l.__class__, filters))
|
map(lambda l: l.__class__, filters)) # noqa: E741
|
||||||
|
|
||||||
# ComputeHostSortFilter #
|
# ComputeHostSortFilter #
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user