Add hacking checks to watcher

The hacking checks enforce during the pep8 run functional validations of
the code to ensure deeper filters and code consistency.  This change set
adds the hacking checks to the wathcer project.  These checks were
seeded from the neutron project, which had a good set of base defaults.

This change set also updates the watcher project to be compliant with
these new hacking checks.

Change-Id: I6f4566d384a7400bddf228aa127a53e6ecc82c2e
This commit is contained in:
Drew Thorstensen 2016-07-20 16:05:01 -07:00
parent 65d3a4d75e
commit 7d704dbeec
31 changed files with 398 additions and 75 deletions

View File

@ -17,6 +17,7 @@ oslo.log>=1.14.0 # Apache-2.0
oslo.messaging>=5.2.0 # Apache-2.0
oslo.policy>=1.9.0 # Apache-2.0
oslo.reports>=0.6.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
oslo.service>=1.10.0 # Apache-2.0
oslo.utils>=3.15.0 # Apache-2.0
PasteDeploy>=1.5.0 # MIT

View File

@ -54,6 +54,7 @@ commands = python setup.py bdist_wheel
[hacking]
import_exceptions = watcher._i18n
local-check-factory = watcher.hacking.checks.factory
[doc8]
extension=.rst

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_serialization import jsonutils
from oslo_utils import strutils
import six
import wsme
@ -118,7 +118,7 @@ class JsonType(wtypes.UserType):
@staticmethod
def validate(value):
try:
json.dumps(value)
jsonutils.dumps(value, default=None)
except TypeError:
raise exception.Invalid(_('%s is not JSON serializable') % value)
else:

View File

@ -20,10 +20,10 @@ response with one formatted so the client can parse it.
Based on pecan.middleware.errordocument
"""
import json
from xml import etree as et
from oslo_log import log
from oslo_serialization import jsonutils
import six
import webob
@ -84,7 +84,8 @@ class ParsableErrorMiddleware(object):
else:
if six.PY3:
app_iter = [i.decode('utf-8') for i in app_iter]
body = [json.dumps({'error_message': '\n'.join(app_iter)})]
body = [jsonutils.dumps(
{'error_message': '\n'.join(app_iter)})]
if six.PY3:
body = [item.encode('utf-8') for item in body]
state['headers'].append(('Content-Type', 'application/json'))

View File

@ -91,7 +91,8 @@ class WatcherException(Exception):
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.items():
LOG.error("%s: %s", name, value)
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
raise e

View File

@ -438,7 +438,7 @@ class PurgeCommand(object):
print(_("Here below is a table containing the objects "
"that can be purged%s:") % _orphans_note)
LOG.info("\n%s", self._objects_map.get_count_table())
LOG.info(_LI("\n%s"), self._objects_map.get_count_table())
print(self._objects_map.get_count_table())
LOG.info(_LI("Purge process completed"))
@ -465,11 +465,11 @@ def purge(age_in_days, max_number, audit_template, exclude_orphans, dry_run):
if max_number and max_number < 0:
raise exception.NegativeLimitError
LOG.info("[options] age_in_days = %s", age_in_days)
LOG.info("[options] max_number = %s", max_number)
LOG.info("[options] audit_template = %s", audit_template)
LOG.info("[options] exclude_orphans = %s", exclude_orphans)
LOG.info("[options] dry_run = %s", dry_run)
LOG.info(_LI("[options] age_in_days = %s"), age_in_days)
LOG.info(_LI("[options] max_number = %s"), max_number)
LOG.info(_LI("[options] audit_template = %s"), audit_template)
LOG.info(_LI("[options] exclude_orphans = %s"), exclude_orphans)
LOG.info(_LI("[options] dry_run = %s"), dry_run)
uuid = PurgeCommand.get_audit_template_uuid(audit_template)

View File

@ -16,11 +16,10 @@
SQLAlchemy models for watcher service
"""
import json
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_db.sqlalchemy import models
from oslo_serialization import jsonutils
import six.moves.urllib.parse as urlparse
from sqlalchemy import Column
from sqlalchemy import DateTime
@ -70,12 +69,12 @@ class JsonEncodedType(TypeDecorator):
% (self.__class__.__name__,
self.type.__name__,
type(value).__name__))
serialized_value = json.dumps(value)
serialized_value = jsonutils.dumps(value)
return serialized_value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
value = jsonutils.loads(value)
return value

View File

@ -24,7 +24,7 @@ calculating its :ref:`global efficacy <efficacy_definition>`.
"""
import abc
import json
from oslo_serialization import jsonutils
import six
import voluptuous
@ -81,4 +81,4 @@ class EfficacySpecification(object):
for indicator in self.indicators_specs]
def serialize_indicators_specs(self):
return json.dumps(self.get_indicators_specs_dicts())
return jsonutils.dumps(self.get_indicators_specs_dicts())

View File

@ -13,9 +13,12 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
import threading
from watcher._i18n import _LW
LOG = log.getLogger(__name__)
@ -72,10 +75,10 @@ class Mapping(object):
# remove vm
self.mapping_vm.pop(vm_uuid)
else:
LOG.warning(
"trying to delete the virtual machine {0} but it was not "
"found on hypervisor {1}".format(
vm_uuid, node_uuid))
LOG.warning(_LW(
"trying to delete the virtual machine %(vm)s but it was "
"not found on hypervisor %(hyp)s"),
{'vm': vm_uuid, 'hyp': node_uuid})
finally:
self.lock.release()

View File

@ -30,7 +30,7 @@ telemetries to measure thermal/workload status of server.
from oslo_log import log
from watcher._i18n import _, _LE, _LI
from watcher._i18n import _, _LI, _LW
from watcher.common import exception as wexc
from watcher.decision_engine.model import resource
from watcher.decision_engine.model import vm_state
@ -159,7 +159,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
aggregate='avg')
# some hosts may not have outlet temp meters, remove from target
if outlet_temp is None:
LOG.warning(_LE("%s: no outlet temp data"), resource_id)
LOG.warning(_LW("%s: no outlet temp data"), resource_id)
continue
LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
@ -183,7 +183,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
# select the first active VM to migrate
vm = self.model.get_vm_from_id(vm_id)
if vm.state != vm_state.VMState.ACTIVE.value:
LOG.info(_LE("VM not active, skipped: %s"),
LOG.info(_LI("VM not active, skipped: %s"),
vm.uuid)
continue
return mig_src_hypervisor, vm
@ -243,7 +243,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
return self.solution
if len(hosts_target) == 0:
LOG.warning(_LE("No hosts under outlet temp threshold found"))
LOG.warning(_LW("No hosts under outlet temp threshold found"))
return self.solution
# choose the server with highest outlet t
@ -263,7 +263,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
if len(dest_servers) == 0:
# TODO(zhenzanz): maybe to warn that there's no resource
# for instance.
LOG.info(_LE("No proper target host could be found"))
LOG.info(_LI("No proper target host could be found"))
return self.solution
dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"]))

View File

@ -179,7 +179,7 @@ class UniformAirflow(base.BaseStrategy):
try:
vm = self.model.get_vm_from_id(vm_id)
if vm.state != vm_state.VMState.ACTIVE.value:
LOG.info(_LE("VM not active; skipped: %s"),
LOG.info(_LI("VM not active; skipped: %s"),
vm.uuid)
continue
vms_tobe_migrate.append(vm)
@ -255,7 +255,7 @@ class UniformAirflow(base.BaseStrategy):
aggregate='avg')
# some hosts may not have airflow meter, remove from target
if airflow is None:
LOG.warning(_LE("%s: no airflow data"), resource_id)
LOG.warning(_LW("%s: no airflow data"), resource_id)
continue
LOG.debug("%s: airflow %f" % (resource_id, airflow))

View File

@ -170,7 +170,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
:param vm_uuid: vm for which statistic is gathered.
:return: dict
"""
LOG.debug(_LI('get_vm_load started'))
LOG.debug('get_vm_load started')
vm_vcpus = self.model.get_resource_from_id(
resource.ResourceType.cpu_cores).get_capacity(
self.model.get_vm_from_id(vm_uuid))

View File

@ -18,7 +18,7 @@ import collections
from oslo_log import log
from watcher._i18n import _LE, _LI
from watcher._i18n import _LI, _LW
from watcher.common import context
from watcher.decision_engine.loading import default
from watcher import objects
@ -238,7 +238,7 @@ class Syncer(object):
invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters)
for at in invalid_ats:
LOG.warning(
_LE("Audit Template '%(audit_template)s' references a "
_LW("Audit Template '%(audit_template)s' references a "
"goal that does not exist"),
audit_template=at.uuid)
@ -275,7 +275,7 @@ class Syncer(object):
strategy_loader = default.DefaultStrategyLoader()
implemented_strategies = strategy_loader.list_available()
for _, goal_cls in implemented_goals.items():
for goal_cls in implemented_goals.values():
goals_map[goal_cls.get_name()] = GoalMapping(
name=goal_cls.get_name(),
display_name=goal_cls.get_translatable_display_name(),
@ -284,7 +284,7 @@ class Syncer(object):
for indicator in goal_cls.get_efficacy_specification(
).get_indicators_specifications()))
for _, strategy_cls in implemented_strategies.items():
for strategy_cls in implemented_strategies.values():
strategies_map[strategy_cls.get_name()] = StrategyMapping(
name=strategy_cls.get_name(),
goal_name=strategy_cls.get_goal_name(),

View File

309
watcher/hacking/checks.py Normal file
View File

@ -0,0 +1,309 @@
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import pep8
import six
def flake8ext(f):
"""Decorator to indicate flake8 extension.
This is borrowed from hacking.core.flake8ext(), but at now it is used
only for unit tests to know which are watcher flake8 extensions.
"""
f.name = __name__
return f
# Guidelines for writing new hacking checks
#
# - Use only for Watcher specific tests. OpenStack general tests
# should be submitted to the common 'hacking' module.
# - Pick numbers in the range N3xx. Find the current test with
# the highest allocated number and then pick the next value.
# - Keep the test method code in the source file ordered based
# on the N3xx value.
# - List the new rule in the top level HACKING.rst file
_all_log_levels = {
'reserved': '_', # this should never be used with a log unless
# it is a variable used for a log message and
# a exception
'error': '_LE',
'info': '_LI',
'warning': '_LW',
'critical': '_LC',
'exception': '_LE',
}
_all_hints = set(_all_log_levels.values())
def _regex_for_level(level, hint):
return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
'level': level,
'wrong_hints': '|'.join(_all_hints - set([hint])),
}
log_translation_hint = re.compile(
'|'.join('(?:%s)' % _regex_for_level(level, hint)
for level, hint in six.iteritems(_all_log_levels)))
log_warn = re.compile(
r"(.)*LOG\.(warn)\(\s*('|\"|_)")
unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b")
unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b")
@flake8ext
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
if "watcher/tests" in filename:
return
if pep8.noqa(physical_line):
return
msg = "N320: Log messages require translation hints!"
if log_translation_hint.match(logical_line):
yield (0, msg)
@flake8ext
def use_jsonutils(logical_line, filename):
msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
# Skip list is currently empty.
json_check_skipped_patterns = []
for pattern in json_check_skipped_patterns:
if pattern in filename:
return
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
@flake8ext
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_(' and 'LOG.debug(_Lx('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
N319
"""
for hint in _all_hints:
if logical_line.startswith("LOG.debug(%s(" % hint):
yield(0, "N319 Don't translate debug level logs")
@flake8ext
def check_assert_called_once_with(logical_line, filename):
# Try to detect unintended calls of nonexistent mock methods like:
# assert_called_once
# assertCalledOnceWith
# assert_has_called
# called_once_with
if 'watcher/tests/' in filename:
if '.assert_called_once_with(' in logical_line:
return
uncased_line = logical_line.lower().replace('_', '')
check_calls = ['.assertcalledonce', '.calledoncewith']
if any(x for x in check_calls if x in uncased_line):
msg = ("N322: Possible use of no-op mock method. "
"please use assert_called_once_with.")
yield (0, msg)
if '.asserthascalled' in uncased_line:
msg = ("N322: Possible use of no-op mock method. "
"please use assert_has_calls.")
yield (0, msg)
@flake8ext
def check_python3_xrange(logical_line):
if re.search(r"\bxrange\s*\(", logical_line):
yield(0, "N325: Do not use xrange. Use range, or six.moves.range for "
"large loops.")
@flake8ext
def check_no_basestring(logical_line):
if re.search(r"\bbasestring\b", logical_line):
msg = ("N326: basestring is not Python3-compatible, use "
"six.string_types instead.")
yield(0, msg)
@flake8ext
def check_python3_no_iteritems(logical_line):
if re.search(r".*\.iteritems\(\)", logical_line):
msg = ("N327: Use six.iteritems() instead of dict.iteritems().")
yield(0, msg)
@flake8ext
def check_asserttrue(logical_line, filename):
if 'watcher/tests/' in filename:
if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertTrue(observed) instead of "
"assertEqual(True, observed)")
yield (0, msg)
@flake8ext
def check_assertfalse(logical_line, filename):
if 'watcher/tests/' in filename:
if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line):
msg = ("N328: Use assertFalse(observed) instead of "
"assertEqual(False, observed)")
yield (0, msg)
@flake8ext
def check_assertempty(logical_line, filename):
if 'watcher/tests/' in filename:
msg = ("N330: Use assertEqual(*empty*, observed) instead of "
"assertEqual(observed, *empty*). *empty* contains "
"{}, [], (), set(), '', \"\"")
empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")"
reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties
if re.search(reg, logical_line):
yield (0, msg)
@flake8ext
def check_assertisinstance(logical_line, filename):
if 'watcher/tests/' in filename:
if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)",
logical_line):
msg = ("N331: Use assertIsInstance(observed, type) instead "
"of assertTrue(isinstance(observed, type))")
yield (0, msg)
@flake8ext
def check_assertequal_for_httpcode(logical_line, filename):
msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) "
"instead of assertEqual(observed_http_code, expected_http_code)")
if 'watcher/tests/' in filename:
if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)",
logical_line):
yield (0, msg)
@flake8ext
def check_log_warn_deprecated(logical_line, filename):
msg = "N333: Use LOG.warning due to compatibility with py3"
if log_warn.match(logical_line):
yield (0, msg)
@flake8ext
def check_oslo_i18n_wrapper(logical_line, filename, noqa):
"""Check for watcher.i18n usage.
N340(watcher/foo/bar.py): from watcher.i18n import _
Okay(watcher/foo/bar.py): from watcher.i18n import _ # noqa
"""
if noqa:
return
split_line = logical_line.split()
modulename = os.path.normpath(filename).split('/')[0]
bad_i18n_module = '%s.i18n' % modulename
if (len(split_line) > 1 and split_line[0] in ('import', 'from')):
if (split_line[1] == bad_i18n_module or
modulename != 'watcher' and split_line[1] in ('watcher.i18n',
'watcher._i18n')):
msg = ("N340: %(found)s is found. Use %(module)s._i18n instead."
% {'found': split_line[1], 'module': modulename})
yield (0, msg)
@flake8ext
def check_builtins_gettext(logical_line, tokens, filename, lines, noqa):
"""Check usage of builtins gettext _().
N341(watcher/foo.py): _('foo')
Okay(watcher/i18n.py): _('foo')
Okay(watcher/_i18n.py): _('foo')
Okay(watcher/foo.py): _('foo') # noqa
"""
if noqa:
return
modulename = os.path.normpath(filename).split('/')[0]
if '%s/tests' % modulename in filename:
return
if os.path.basename(filename) in ('i18n.py', '_i18n.py'):
return
token_values = [t[1] for t in tokens]
i18n_wrapper = '%s._i18n' % modulename
if '_' in token_values:
i18n_import_line_found = False
for line in lines:
split_line = [elm.rstrip(',') for elm in line.split()]
if (len(split_line) > 1 and split_line[0] == 'from' and
split_line[1] == i18n_wrapper and
'_' in split_line):
i18n_import_line_found = True
break
if not i18n_import_line_found:
msg = ("N341: _ from python builtins module is used. "
"Use _ from %s instead." % i18n_wrapper)
yield (0, msg)
def factory(register):
register(validate_log_translations)
register(use_jsonutils)
register(check_assert_called_once_with)
register(no_translate_debug_logs)
register(check_python3_xrange)
register(check_no_basestring)
register(check_python3_no_iteritems)
register(check_asserttrue)
register(check_assertfalse)
register(check_assertempty)
register(check_assertisinstance)
register(check_assertequal_for_httpcode)
register(check_log_warn_deprecated)
register(check_oslo_i18n_wrapper)
register(check_builtins_gettext)

View File

@ -349,6 +349,14 @@ class WatcherObject(object):
def iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
return self._iteritems()
# dictish syntactic sugar, internal to pass hacking checks
def _iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
for name in list(self.fields.keys()) + self.obj_extra_fields:
@ -357,7 +365,7 @@ class WatcherObject(object):
yield name, getattr(self, name)
def items(self):
return list(self.iteritems())
return list(self._iteritems())
def __getitem__(self, name):
"""For backwards-compatibility with dict-based objects.

View File

@ -15,11 +15,10 @@
from __future__ import unicode_literals
import json
import mock
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from watcher.api.controllers import root
from watcher.api import hooks
@ -90,7 +89,8 @@ class TestNoExceptionTracebackHook(api_base.FunctionalTest):
response = self.get_json('/', path_prefix='', expect_errors=True)
actual_msg = json.loads(response.json['error_message'])['faultstring']
actual_msg = jsonutils.loads(
response.json['error_message'])['faultstring']
self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg)
def test_hook_remote_error_success(self):
@ -107,7 +107,8 @@ class TestNoExceptionTracebackHook(api_base.FunctionalTest):
# we don't care about this garbage.
expected_msg = ("Remote error: %s %s"
% (test_exc_type, self.MSG_WITHOUT_TRACE))
actual_msg = json.loads(response.json['error_message'])['faultstring']
actual_msg = jsonutils.loads(
response.json['error_message'])['faultstring']
self.assertEqual(expected_msg, actual_msg)
def test_hook_without_traceback(self):
@ -116,7 +117,8 @@ class TestNoExceptionTracebackHook(api_base.FunctionalTest):
response = self.get_json('/', path_prefix='', expect_errors=True)
actual_msg = json.loads(response.json['error_message'])['faultstring']
actual_msg = jsonutils.loads(
response.json['error_message'])['faultstring']
self.assertEqual(msg, actual_msg)
def test_hook_server_debug_on_serverfault(self):
@ -125,7 +127,7 @@ class TestNoExceptionTracebackHook(api_base.FunctionalTest):
response = self.get_json('/', path_prefix='', expect_errors=True)
actual_msg = json.loads(
actual_msg = jsonutils.loads(
response.json['error_message'])['faultstring']
self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg)
@ -137,6 +139,6 @@ class TestNoExceptionTracebackHook(api_base.FunctionalTest):
response = self.get_json('/', path_prefix='', expect_errors=True)
actual_msg = json.loads(
actual_msg = jsonutils.loads(
response.json['error_message'])['faultstring']
self.assertEqual(self.MSG_WITH_TRACE, actual_msg)

View File

@ -16,7 +16,7 @@ Utils for testing the API service.
"""
import datetime
import json
from oslo_serialization import jsonutils
from watcher.api.controllers.v1 import action as action_ctrl
from watcher.api.controllers.v1 import action_plan as action_plan_ctrl
@ -66,7 +66,7 @@ class FakeMemcache(object):
def get(self, key):
dt = datetime.datetime.utcnow() + datetime.timedelta(minutes=5)
return json.dumps((self._cache.get(key), dt.isoformat()))
return jsonutils.dumps((self._cache.get(key), dt.isoformat()))
def set(self, key, value, time=0, min_compress_len=0):
self.set_value = value

View File

@ -11,10 +11,10 @@
# limitations under the License.
import datetime
import json
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from wsme import types as wtypes
from watcher.api.controllers.v1 import action as api_action
@ -497,7 +497,7 @@ class TestActionPolicyEnforcement(api_base.FunctionalTest):
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
json.loads(response.json['error_message'])['faultstring'])
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(

View File

@ -12,11 +12,11 @@
import datetime
import itertools
import json
import mock
import pecan
from oslo_config import cfg
from oslo_serialization import jsonutils
from wsme import types as wtypes
from watcher.api.controllers.v1 import action_plan as api_action_plan
@ -590,7 +590,7 @@ class TestActionPlanPolicyEnforcement(api_base.FunctionalTest):
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
json.loads(response.json['error_message'])['faultstring'])
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(

View File

@ -12,10 +12,10 @@
import datetime
import itertools
import json
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from six.moves.urllib import parse as urlparse
from wsme import types as wtypes
@ -669,7 +669,7 @@ class TestAuaditTemplatePolicyEnforcement(api_base.FunctionalTest):
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
json.loads(response.json['error_message'])['faultstring'])
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(

View File

@ -11,10 +11,10 @@
# limitations under the License.
import datetime
import json
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from wsme import types as wtypes
@ -759,7 +759,7 @@ class TestAuaditPolicyEnforcement(api_base.FunctionalTest):
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
json.loads(response.json['error_message'])['faultstring'])
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(

View File

@ -10,9 +10,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves.urllib import parse as urlparse
from watcher.common import utils
@ -134,7 +133,7 @@ class TestGoalPolicyEnforcement(api_base.FunctionalTest):
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
json.loads(response.json['error_message'])['faultstring'])
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(

View File

@ -10,9 +10,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves.urllib import parse as urlparse
from watcher.common import utils
@ -203,7 +202,7 @@ class TestStrategyPolicyEnforcement(api_base.FunctionalTest):
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
json.loads(response.json['error_message'])['faultstring'])
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(

View File

@ -62,13 +62,13 @@ class TestChangeNovaServiceState(base.TestCase):
self.action.input_parameters = {
baction.BaseAction.RESOURCE_ID: "compute-1",
self.action.STATE: hstate.HypervisorState.DISABLED.value}
self.assertEqual(True, self.action.validate_parameters())
self.assertTrue(self.action.validate_parameters())
def test_parameters_up(self):
self.action.input_parameters = {
baction.BaseAction.RESOURCE_ID: "compute-1",
self.action.STATE: hstate.HypervisorState.ENABLED.value}
self.assertEqual(True, self.action.validate_parameters())
self.assertTrue(self.action.validate_parameters())
def test_parameters_exception_wrong_state(self):
self.action.input_parameters = {

View File

@ -77,7 +77,7 @@ class TestMigration(base.TestCase):
self.action.DST_HYPERVISOR: 'compute-2',
self.action.SRC_HYPERVISOR: 'compute-3'}
self.action.input_parameters = params
self.assertEqual(True, self.action.validate_parameters())
self.assertTrue(self.action.validate_parameters())
def test_parameters_cold(self):
params = {baction.BaseAction.RESOURCE_ID:
@ -86,7 +86,7 @@ class TestMigration(base.TestCase):
self.action.DST_HYPERVISOR: 'compute-2',
self.action.SRC_HYPERVISOR: 'compute-3'}
self.action_cold.input_parameters = params
self.assertEqual(True, self.action_cold.validate_parameters())
self.assertTrue(self.action_cold.validate_parameters())
def test_parameters_exception_empty_fields(self):
parameters = {baction.BaseAction.RESOURCE_ID: None,

View File

@ -29,7 +29,7 @@ class TestSleep(base.TestCase):
def test_parameters_duration(self):
self.s.input_parameters = {self.s.DURATION: 1.0}
self.assertEqual(True, self.s.validate_parameters())
self.assertTrue(self.s.validate_parameters())
def test_parameters_duration_empty(self):
self.s.input_parameters = {self.s.DURATION: None}

View File

@ -51,7 +51,7 @@ class TestNovaHelper(base.TestCase):
nova_util.nova.servers.list.return_value = [server]
result = nova_util.stop_instance(instance_id)
self.assertEqual(True, result)
self.assertTrue(result)
def test_set_host_offline(self, mock_glance, mock_cinder, mock_neutron,
mock_nova):
@ -60,7 +60,7 @@ class TestNovaHelper(base.TestCase):
nova_util.nova.hosts = mock.MagicMock()
nova_util.nova.hosts.get.return_value = host
result = nova_util.set_host_offline("rennes")
self.assertEqual(True, result)
self.assertTrue(result)
@mock.patch.object(time, 'sleep', mock.Mock())
def test_live_migrate_instance(self, mock_glance, mock_cinder,
@ -85,7 +85,7 @@ class TestNovaHelper(base.TestCase):
self.instance_uuid,
self.destination_hypervisor)
self.assertEqual(False, is_success)
self.assertFalse(is_success)
@mock.patch.object(time, 'sleep', mock.Mock())
def test_watcher_non_live_migrate_instance_volume(

View File

@ -125,15 +125,15 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
h1 = model.get_hypervisor_from_id('Node_0')
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
self.assertEqual(False, res)
self.assertFalse(res)
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
self.assertEqual(False, res)
self.assertFalse(res)
cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.is_overloaded(h1, model, cc)
self.assertEqual(True, res)
self.assertTrue(res)
def test_vm_fits(self):
model = self.fake_cluster.generate_scenario_1()
@ -143,11 +143,11 @@ class TestVMWorkloadConsolidation(base.BaseTestCase):
vm_uuid = 'VM_0'
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.vm_fits(vm_uuid, h, model, cc)
self.assertEqual(True, res)
self.assertTrue(res)
cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0}
res = self.strategy.vm_fits(vm_uuid, h, model, cc)
self.assertEqual(False, res)
self.assertFalse(res)
def test_add_action_enable_hypervisor(self):
model = self.fake_cluster.generate_scenario_1()

View File

@ -139,7 +139,7 @@ class TestWorkloadBalance(base.BaseTestCase):
self.strategy.ceilometer = mock.MagicMock(
statistic_aggregation=self.fake_metrics.mock_get_statistics_wb)
solution = self.strategy.execute()
self.assertEqual(solution.actions, [])
self.assertEqual([], solution.actions)
def test_execute(self):
model = self.fake_cluster.generate_scenario_6_with_2_hypervisors()

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_serialization import jsonutils
import uuid
from watcher_tempest_plugin.services.infra_optim import base
@ -27,11 +27,11 @@ class InfraOptimClientJSON(base.BaseInfraOptimClient):
def serialize(self, object_dict):
"""Serialize an Watcher object."""
return json.dumps(object_dict)
return jsonutils.dumps(object_dict)
def deserialize(self, object_str):
"""Deserialize an Watcher object."""
return json.loads(object_str.decode('utf-8'))
return jsonutils.loads(object_str.decode('utf-8'))
# ### AUDIT TEMPLATES ### #
@ -201,7 +201,7 @@ class InfraOptimClientJSON(base.BaseInfraOptimClient):
:param audit_uuid: The unique identifier of the related Audit
"""
_, action_plans = self.list_action_plans(audit_uuid=audit_uuid)
action_plans = self.list_action_plans(audit_uuid=audit_uuid)[1]
for action_plan in action_plans:
self.delete_action_plan(action_plan['uuid'])