Bump hacking
hacking 3.0.x is quite old. Bump it to the current latest version. Change-Id: I8d87fed6afe5988678c64090af261266d1ca20e6
This commit is contained in:
parent
a9dc3794a6
commit
566a830f64
@ -1,6 +1,3 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
openstackdocstheme>=2.2.1 # Apache-2.0
|
||||
sphinx>=2.0.0,!=2.1.0 # BSD
|
||||
sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0
|
||||
|
@ -1,7 +1,6 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
# Requirements lower bounds listed here are our best effort to keep them up to
|
||||
# date but we do not test them so no guarantee of having them all correct. If
|
||||
# you find any incorrect lower bounds, let us know or propose a fix.
|
||||
apscheduler>=3.5.1 # MIT License
|
||||
jsonpatch>=1.21 # BSD
|
||||
keystoneauth1>=3.4.0 # Apache-2.0
|
||||
|
@ -1,15 +1,11 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
coverage>=4.5.1 # Apache-2.0
|
||||
doc8>=0.8.0 # Apache-2.0
|
||||
freezegun>=0.3.10 # Apache-2.0
|
||||
hacking>=3.0.1,<3.1.0 # Apache-2.0
|
||||
hacking>=7.0.0,<7.1.0 # Apache-2.0
|
||||
oslotest>=3.3.0 # Apache-2.0
|
||||
testscenarios>=0.5.0 # Apache-2.0/BSD
|
||||
testtools>=2.3.0 # MIT
|
||||
stestr>=2.0.0 # Apache-2.0
|
||||
os-api-ref>=1.4.0 # Apache-2.0
|
||||
bandit>=1.6.0 # Apache-2.0
|
||||
WebTest>=2.0.27 # MIT
|
||||
WebTest>=2.0.27 # MIT
|
||||
|
@ -153,7 +153,7 @@ class CinderHelper(object):
|
||||
final_status = ('success', 'error')
|
||||
while getattr(volume, 'migration_status') not in final_status:
|
||||
volume = self.get_volume(volume.id)
|
||||
LOG.debug('Waiting the migration of {0}'.format(volume))
|
||||
LOG.debug('Waiting the migration of %s', volume)
|
||||
time.sleep(retry_interval)
|
||||
if getattr(volume, 'migration_status') == 'error':
|
||||
host_name = getattr(volume, 'os-vol-host-attr:host')
|
||||
@ -230,7 +230,7 @@ class CinderHelper(object):
|
||||
availability_zone=getattr(volume, 'availability_zone'))
|
||||
while getattr(new_volume, 'status') != 'available' and retry:
|
||||
new_volume = cinder.volumes.get(new_volume.id)
|
||||
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
|
||||
LOG.debug('Waiting volume creation of %s', new_volume)
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s", retry)
|
||||
|
@ -292,9 +292,7 @@ class NovaHelper(object):
|
||||
'OS-EXT-STS:vm_state') != 'resized' \
|
||||
and retry:
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
LOG.debug(
|
||||
'Waiting the resize of {0} to {1}'.format(
|
||||
instance, flavor_id))
|
||||
LOG.debug('Waiting the resize of %s to %s', instance, flavor_id)
|
||||
time.sleep(1)
|
||||
retry -= 1
|
||||
|
||||
@ -349,8 +347,7 @@ class NovaHelper(object):
|
||||
if dest_hostname is None:
|
||||
while (instance.status not in ['ACTIVE', 'ERROR'] and retry):
|
||||
instance = self.nova.servers.get(instance.id)
|
||||
LOG.debug(
|
||||
'Waiting the migration of {0}'.format(instance.id))
|
||||
LOG.debug('Waiting the migration of %s', instance.id)
|
||||
time.sleep(1)
|
||||
retry -= 1
|
||||
new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host')
|
||||
@ -371,11 +368,9 @@ class NovaHelper(object):
|
||||
if not getattr(instance, 'OS-EXT-STS:task_state'):
|
||||
LOG.debug("Instance task state: %s is null", instance_id)
|
||||
break
|
||||
LOG.debug(
|
||||
'Waiting the migration of {0} to {1}'.format(
|
||||
instance,
|
||||
getattr(instance,
|
||||
'OS-EXT-SRV-ATTR:host')))
|
||||
LOG.debug('Waiting the migration of %s to %s',
|
||||
instance,
|
||||
getattr(instance, 'OS-EXT-SRV-ATTR:host'))
|
||||
time.sleep(1)
|
||||
retry -= 1
|
||||
|
||||
@ -725,7 +720,7 @@ class NovaHelper(object):
|
||||
instance_id, old_volume.id, new_volume.id)
|
||||
while getattr(new_volume, 'status') != 'in-use' and retry:
|
||||
new_volume = self.cinder.volumes.get(new_volume.id)
|
||||
LOG.debug('Waiting volume update to {0}'.format(new_volume))
|
||||
LOG.debug('Waiting volume update to %s', new_volume)
|
||||
time.sleep(retry_interval)
|
||||
retry -= 1
|
||||
LOG.debug("retry count: %s", retry)
|
||||
|
@ -91,8 +91,8 @@ class DataSourceBase(object):
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
self.query_retry_reset(e)
|
||||
LOG.warning("Retry {0} of {1} while retrieving metrics retry "
|
||||
"in {2} seconds".format(i+1, num_retries, timeout))
|
||||
LOG.warning("Retry %d of %d while retrieving metrics retry "
|
||||
"in %d seconds", i+1, num_retries, timeout)
|
||||
time.sleep(timeout)
|
||||
|
||||
@abc.abstractmethod
|
||||
|
@ -90,8 +90,8 @@ class GnocchiHelper(base.DataSourceBase):
|
||||
**kwargs)
|
||||
|
||||
if not resources:
|
||||
LOG.warning("The {0} resource {1} could not be "
|
||||
"found".format(self.NAME, resource_id))
|
||||
LOG.warning("The %s resource %s could not be found",
|
||||
self.NAME, resource_id)
|
||||
return
|
||||
|
||||
resource_id = resources[0]['id']
|
||||
@ -99,7 +99,7 @@ class GnocchiHelper(base.DataSourceBase):
|
||||
if meter_name == "instance_cpu_usage":
|
||||
if resource_type != "instance":
|
||||
LOG.warning("Unsupported resource type for metric "
|
||||
"'instance_cpu_usage': ", resource_type)
|
||||
"'instance_cpu_usage': %s", resource_type)
|
||||
return
|
||||
|
||||
# The "cpu_util" gauge (percentage) metric has been removed.
|
||||
@ -172,8 +172,8 @@ class GnocchiHelper(base.DataSourceBase):
|
||||
**kwargs)
|
||||
|
||||
if not resources:
|
||||
LOG.warning("The {0} resource {1} could not be "
|
||||
"found".format(self.NAME, resource_id))
|
||||
LOG.warning("The %s resource %s could not be found",
|
||||
self.NAME, resource_id)
|
||||
return
|
||||
|
||||
resource_id = resources[0]['id']
|
||||
|
@ -158,8 +158,9 @@ class GrafanaHelper(base.DataSourceBase):
|
||||
try:
|
||||
self.METRIC_MAP[meter_name]
|
||||
except KeyError:
|
||||
LOG.error("Metric: {0} does not appear in the current Grafana "
|
||||
"metric map".format(meter_name))
|
||||
LOG.error(
|
||||
"Metric: %s does not appear in the current Grafana metric map",
|
||||
meter_name)
|
||||
raise exception.MetricNotAvailable(metric=meter_name)
|
||||
|
||||
db = self.METRIC_MAP[meter_name]['db']
|
||||
@ -184,7 +185,7 @@ class GrafanaHelper(base.DataSourceBase):
|
||||
|
||||
resp = self.query_retry(self._request, **kwargs)
|
||||
if not resp:
|
||||
LOG.warning("Datasource {0} is not available.".format(self.NAME))
|
||||
LOG.warning("Datasource %s is not available.", self.NAME)
|
||||
return
|
||||
|
||||
result = translator.extract_result(resp.content)
|
||||
|
@ -57,8 +57,8 @@ class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
|
||||
resource = self._extract_attribute(
|
||||
data['resource'], data['attribute'])
|
||||
except AttributeError:
|
||||
LOG.error("Resource: {0} does not contain attribute {1}".format(
|
||||
data['resource'], data['attribute']))
|
||||
LOG.error("Resource: %s does not contain attribute %s",
|
||||
data['resource'], data['attribute'])
|
||||
raise
|
||||
|
||||
# Granularity is optional if it is None the minimal value for InfluxDB
|
||||
@ -82,7 +82,7 @@ class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
|
||||
index_aggregate = result['columns'].index(self._data['aggregate'])
|
||||
return result['values'][0][index_aggregate]
|
||||
except KeyError:
|
||||
LOG.error("Could not extract {0} for the resource: {1}".format(
|
||||
self._data['metric'], self._data['resource']))
|
||||
LOG.error("Could not extract %s for the resource: %s",
|
||||
self._data['metric'], self._data['resource'])
|
||||
raise exception.NoSuchMetricForHost(
|
||||
metric=self._data['metric'], host=self._data['resource'])
|
||||
|
@ -37,8 +37,8 @@ class DataSourceManager(object):
|
||||
(mon.MonascaHelper.NAME, mon.MonascaHelper.METRIC_MAP),
|
||||
(graf.GrafanaHelper.NAME, graf.GrafanaHelper.METRIC_MAP),
|
||||
])
|
||||
"""Dictionary with all possible datasources, dictionary order is the default
|
||||
order for attempting to use datasources
|
||||
"""Dictionary with all possible datasources, dictionary order is
|
||||
the default order for attempting to use datasources
|
||||
"""
|
||||
|
||||
def __init__(self, config=None, osc=None):
|
||||
@ -127,8 +127,9 @@ class DataSourceManager(object):
|
||||
if (metric not in self.metric_map[datasource] or
|
||||
self.metric_map[datasource].get(metric) is None):
|
||||
no_metric = True
|
||||
LOG.warning("Datasource: {0} could not be used due to "
|
||||
"metric: {1}".format(datasource, metric))
|
||||
LOG.warning(
|
||||
"Datasource: %s could not be used due to metric: %s",
|
||||
datasource, metric)
|
||||
break
|
||||
if not no_metric:
|
||||
# Try to use a specific datasource but attempt additional
|
||||
|
@ -216,9 +216,9 @@ class BaseModelBuilder(object):
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
self.call_retry_reset(e)
|
||||
LOG.warning("Retry {0} of {1}, error while calling service "
|
||||
"retry in {2} seconds".format(i+1, num_retries,
|
||||
timeout))
|
||||
LOG.warning("Retry %d of %d, error while calling service "
|
||||
"retry in %s seconds",
|
||||
i+1, num_retries, timeout)
|
||||
time.sleep(timeout)
|
||||
raise
|
||||
|
||||
|
@ -274,7 +274,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
||||
instances = getattr(node_info, "servers", None)
|
||||
# Do not submit job if there are no instances on compute node
|
||||
if instances is None:
|
||||
LOG.info("No instances on compute_node: {0}".format(node_info))
|
||||
LOG.info("No instances on compute_node: %s", node_info)
|
||||
return
|
||||
future_instances.append(
|
||||
self.executor.submit(
|
||||
@ -330,7 +330,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
||||
self.nova_helper.get_compute_node_by_name,
|
||||
node, servers=True, detailed=True)
|
||||
for node in compute_nodes]
|
||||
LOG.debug("submitted {0} jobs".format(len(compute_nodes)))
|
||||
LOG.debug("submitted %d jobs", len(compute_nodes))
|
||||
|
||||
# Futures will concurrently be added, only safe with CPython GIL
|
||||
future_instances = []
|
||||
@ -427,7 +427,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
|
||||
|
||||
def add_instance_node(self, node, instances):
|
||||
if instances is None:
|
||||
LOG.info("no instances on compute_node: {0}".format(node))
|
||||
LOG.info("no instances on compute_node: %s", node)
|
||||
return
|
||||
host = node.service["host"]
|
||||
compute_node = self.model.get_node_by_uuid(node.id)
|
||||
|
@ -180,8 +180,8 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
|
||||
|
||||
for potential_noisy_instance in (
|
||||
instance_priority_list_reverse):
|
||||
if(potential_noisy_instance ==
|
||||
potential_priority_instance):
|
||||
if (potential_noisy_instance ==
|
||||
potential_priority_instance):
|
||||
loop_break_flag = True
|
||||
break
|
||||
|
||||
|
@ -205,7 +205,7 @@ class UniformAirflow(base.BaseStrategy):
|
||||
host = nodemap['node']
|
||||
if 'cores_used' not in nodemap:
|
||||
# calculate the available resources
|
||||
nodemap['cores_used'], nodemap['mem_used'],\
|
||||
nodemap['cores_used'], nodemap['mem_used'], \
|
||||
nodemap['disk_used'] = self.calculate_used_resource(
|
||||
host)
|
||||
cores_available = (host.vcpus -
|
||||
|
@ -98,7 +98,7 @@ def no_translate_debug_logs(logical_line, filename):
|
||||
"""
|
||||
for hint in _all_hints:
|
||||
if logical_line.startswith("LOG.debug(%s(" % hint):
|
||||
yield(0, "N319 Don't translate debug level logs")
|
||||
yield (0, "N319 Don't translate debug level logs")
|
||||
|
||||
|
||||
@flake8ext
|
||||
@ -128,21 +128,21 @@ def check_assert_called_once_with(logical_line, filename):
|
||||
@flake8ext
|
||||
def check_python3_xrange(logical_line):
|
||||
if re.search(r"\bxrange\s*\(", logical_line):
|
||||
yield(0, "N325: Do not use xrange. Use range for large loops.")
|
||||
yield (0, "N325: Do not use xrange. Use range for large loops.")
|
||||
|
||||
|
||||
@flake8ext
|
||||
def check_no_basestring(logical_line):
|
||||
if re.search(r"\bbasestring\b", logical_line):
|
||||
msg = ("N326: basestring is not Python3-compatible, use str instead.")
|
||||
yield(0, msg)
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
@flake8ext
|
||||
def check_python3_no_iteritems(logical_line):
|
||||
if re.search(r".*\.iteritems\(\)", logical_line):
|
||||
msg = ("N327: Use dict.items() instead of dict.iteritems().")
|
||||
yield(0, msg)
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
@flake8ext
|
||||
@ -282,7 +282,7 @@ def no_redundant_import_alias(logical_line):
|
||||
N342
|
||||
"""
|
||||
if re.match(re_redundant_import_alias, logical_line):
|
||||
yield(0, "N342: No redundant import alias.")
|
||||
yield (0, "N342: No redundant import alias.")
|
||||
|
||||
|
||||
@flake8ext
|
||||
|
@ -169,7 +169,7 @@ class ActionPlan(base.WatcherPersistentObject, base.WatcherObject,
|
||||
:param action_plan_id: the id *or* uuid of a action_plan.
|
||||
:param eager: Load object fields if True (Default: False)
|
||||
:returns: a :class:`Action` object.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
if utils.is_int_like(action_plan_id):
|
||||
return cls.get_by_id(context, action_plan_id, eager=eager)
|
||||
elif utils.is_uuid_like(action_plan_id):
|
||||
@ -184,7 +184,7 @@ class ActionPlan(base.WatcherPersistentObject, base.WatcherObject,
|
||||
:param action_plan_id: the id of a action_plan.
|
||||
:param eager: Load object fields if True (Default: False)
|
||||
:returns: a :class:`ActionPlan` object.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
db_action_plan = cls.dbapi.get_action_plan_by_id(
|
||||
context, action_plan_id, eager=eager)
|
||||
action_plan = cls._from_db_object(
|
||||
@ -199,7 +199,7 @@ class ActionPlan(base.WatcherPersistentObject, base.WatcherObject,
|
||||
:param context: Security context
|
||||
:param eager: Load object fields if True (Default: False)
|
||||
:returns: a :class:`ActionPlan` object.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
db_action_plan = cls.dbapi.get_action_plan_by_uuid(
|
||||
context, uuid, eager=eager)
|
||||
action_plan = cls._from_db_object(
|
||||
|
@ -435,9 +435,10 @@ class TestListAction(api_base.FunctionalTest):
|
||||
self.assertIn('links', response.keys())
|
||||
self.assertEqual(2, len(response['links']))
|
||||
self.assertIn(uuid, response['links'][0]['href'])
|
||||
for l in response['links']:
|
||||
bookmark = l['rel'] == 'bookmark'
|
||||
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
|
||||
for link in response['links']:
|
||||
bookmark = link['rel'] == 'bookmark'
|
||||
self.assertTrue(self.validate_link(
|
||||
link['href'], bookmark=bookmark))
|
||||
|
||||
def test_collection_links(self):
|
||||
parents = None
|
||||
|
@ -270,9 +270,10 @@ class TestListActionPlan(api_base.FunctionalTest):
|
||||
self.assertIn('links', response.keys())
|
||||
self.assertEqual(2, len(response['links']))
|
||||
self.assertIn(uuid, response['links'][0]['href'])
|
||||
for l in response['links']:
|
||||
bookmark = l['rel'] == 'bookmark'
|
||||
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
|
||||
for link in response['links']:
|
||||
bookmark = link['rel'] == 'bookmark'
|
||||
self.assertTrue(self.validate_link(
|
||||
link['href'], bookmark=bookmark))
|
||||
|
||||
def test_collection_links(self):
|
||||
for id_ in range(5):
|
||||
|
@ -216,9 +216,10 @@ class TestListAuditTemplate(FunctionalTestWithSetup):
|
||||
self.assertIn('links', response.keys())
|
||||
self.assertEqual(2, len(response['links']))
|
||||
self.assertIn(uuid, response['links'][0]['href'])
|
||||
for l in response['links']:
|
||||
bookmark = l['rel'] == 'bookmark'
|
||||
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
|
||||
for link in response['links']:
|
||||
bookmark = link['rel'] == 'bookmark'
|
||||
self.assertTrue(
|
||||
self.validate_link(link['href'], bookmark=bookmark))
|
||||
|
||||
def test_collection_links(self):
|
||||
for id_ in range(5):
|
||||
|
@ -237,9 +237,10 @@ class TestListAudit(api_base.FunctionalTest):
|
||||
self.assertIn('links', response.keys())
|
||||
self.assertEqual(2, len(response['links']))
|
||||
self.assertIn(uuid, response['links'][0]['href'])
|
||||
for l in response['links']:
|
||||
bookmark = l['rel'] == 'bookmark'
|
||||
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
|
||||
for link in response['links']:
|
||||
bookmark = link['rel'] == 'bookmark'
|
||||
self.assertTrue(
|
||||
self.validate_link(link['href'], bookmark=bookmark))
|
||||
|
||||
def test_collection_links(self):
|
||||
for id_ in range(5):
|
||||
|
@ -70,7 +70,7 @@ def create_test_audit_template(**kwargs):
|
||||
:param kwargs: kwargsargs with overriding values for audit template's
|
||||
attributes.
|
||||
:returns: Test AuditTemplate DB object.
|
||||
"""
|
||||
""" # noqa: E501
|
||||
audit_template = get_test_audit_template(**kwargs)
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kwargs:
|
||||
|
@ -67,7 +67,7 @@ class TestNoisyNeighbor(TestBaseStrategy):
|
||||
self.m_c_model.return_value = model
|
||||
node_uuid = 'Node_1'
|
||||
n1, n2 = self.strategy.group_hosts()
|
||||
self.assertTrue(node_uuid in n1)
|
||||
self.assertIn(node_uuid, n1)
|
||||
self.assertEqual(n1[node_uuid]['priority_vm'].uuid, 'INSTANCE_3')
|
||||
self.assertEqual(n1[node_uuid]['noisy_vm'].uuid, 'INSTANCE_4')
|
||||
self.assertEqual('Node_0', n2[0].uuid)
|
||||
|
@ -464,11 +464,11 @@ class TestZoneMigration(TestBaseStrategy):
|
||||
}
|
||||
filters = self.strategy.get_priority_filter_list()
|
||||
self.assertIn(strategies.zone_migration.ComputeHostSortFilter,
|
||||
map(lambda l: l.__class__, filters))
|
||||
map(lambda l: l.__class__, filters)) # noqa: E741
|
||||
self.assertIn(strategies.zone_migration.StorageHostSortFilter,
|
||||
map(lambda l: l.__class__, filters))
|
||||
map(lambda l: l.__class__, filters)) # noqa: E741
|
||||
self.assertIn(strategies.zone_migration.ProjectSortFilter,
|
||||
map(lambda l: l.__class__, filters))
|
||||
map(lambda l: l.__class__, filters)) # noqa: E741
|
||||
|
||||
# ComputeHostSortFilter #
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user