Fix E265 violations and re-enable gating

E265 got stricter in hacking 0.9, so fix new violations and
re-enable gating.

Change-Id: I64159facb0698c19ea0689e9b1dd5266615ddb45
This commit is contained in:
Igor Degtiarov 2014-06-25 13:18:42 +03:00
parent 831c303815
commit a4ad787b3f
20 changed files with 65 additions and 67 deletions

View File

@ -41,8 +41,8 @@ class CombinationEvaluator(evaluator.Evaluator):
"""Ensure there is sufficient data for evaluation,
transitioning to unknown otherwise.
"""
#note(sileht): alarm can be evaluated only with
#stable state of other alarm
# note(sileht): alarm can be evaluated only with
# stable state of other alarm
alarms_missing_states = [alarm_id for alarm_id, state in states
if not state or state == evaluator.UNKNOWN]
sufficient = len(alarms_missing_states) == 0

View File

@ -190,7 +190,7 @@ class Query(_Base):
field = wtypes.text
"The name of the field to test"
#op = wsme.wsattr(operation_kind, default='eq')
# op = wsme.wsattr(operation_kind, default='eq')
# this ^ doesn't seem to work.
op = wsme.wsproperty(operation_kind, get_op, set_op)
"The comparison operator. Defaults to 'eq'."
@ -434,9 +434,9 @@ def _validate_timestamp_fields(query, field_name, operator_list,
for item in query:
if item.field == field_name:
#If *timestamp* or *search_offset* field was specified in the
#query, but timestamp is not supported on that resource, on
#which the query was invoked, then raise an exception.
# If *timestamp* or *search_offset* field was specified in the
# query, but timestamp is not supported on that resource, on
# which the query was invoked, then raise an exception.
if not allow_timestamps:
raise wsme.exc.UnknownArgument(field_name,
"not valid for " +
@ -1006,7 +1006,7 @@ class MetersController(rest.RestController):
"""
q = q or []
#Timestamp field is not supported for Meter queries
# Timestamp field is not supported for Meter queries
kwargs = _query_to_kwargs(q, pecan.request.storage_conn.get_meters,
allow_timestamps=False)
return [Meter.from_db_model(m)
@ -1503,8 +1503,8 @@ class AlarmThresholdRule(_Base):
meter_name = wsme.wsattr(wtypes.text, mandatory=True)
"The name of the meter"
#FIXME(sileht): default doesn't work
#workaround: default is set in validate method
# FIXME(sileht): default doesn't work
# workaround: default is set in validate method
query = wsme.wsattr([Query], default=[])
"""The query to find the data for computing statistics.
Ownership settings are automatically included based on the Alarm owner.
@ -1538,15 +1538,15 @@ class AlarmThresholdRule(_Base):
@staticmethod
def validate(threshold_rule):
#note(sileht): wsme default doesn't work in some case
#workaround for https://bugs.launchpad.net/wsme/+bug/1227039
# note(sileht): wsme default doesn't work in some case
# workaround for https://bugs.launchpad.net/wsme/+bug/1227039
if not threshold_rule.query:
threshold_rule.query = []
#Timestamp is not allowed for AlarmThresholdRule query, as the alarm
#evaluator will construct timestamp bounds for the sequence of
#statistics queries as the sliding evaluation window advances
#over time.
# Timestamp is not allowed for AlarmThresholdRule query, as the alarm
# evaluator will construct timestamp bounds for the sequence of
# statistics queries as the sliding evaluation window advances
# over time.
_validate_query(threshold_rule.query, storage.SampleFilter.__init__,
allow_timestamps=False)
return threshold_rule
@ -2139,7 +2139,7 @@ class AlarmsController(rest.RestController):
:param q: Filter rules for the alarms to be returned.
"""
q = q or []
#Timestamp is not supported field for Simple Alarm queries
# Timestamp is not supported field for Simple Alarm queries
kwargs = _query_to_kwargs(q,
pecan.request.storage_conn.get_alarms,
allow_timestamps=False)

View File

@ -25,7 +25,7 @@ import sys
import eventlet
# NOTE(jd) We need to monkey patch the socket and select module for,
# at least, oslo.messaging, otherwise everything's blocked on its
# first read() or select(), thread need to be patched too, because
# first read() or select(), thread need to be patched too, because
# oslo.messaging use threading.local
eventlet.monkey_patch(socket=True, select=True, thread=True)

View File

@ -51,20 +51,20 @@ def parse_snmp_return(ret):
class SNMPInspector(base.Inspector):
#CPU OIDs
# CPU OIDs
_cpu_1_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.1"
_cpu_5_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.2"
_cpu_15_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.3"
#Memory OIDs
# Memory OIDs
_memory_total_oid = "1.3.6.1.4.1.2021.4.5.0"
_memory_used_oid = "1.3.6.1.4.1.2021.4.6.0"
#Disk OIDs
# Disk OIDs
_disk_index_oid = "1.3.6.1.4.1.2021.9.1.1"
_disk_path_oid = "1.3.6.1.4.1.2021.9.1.2"
_disk_device_oid = "1.3.6.1.4.1.2021.9.1.3"
_disk_size_oid = "1.3.6.1.4.1.2021.9.1.6"
_disk_used_oid = "1.3.6.1.4.1.2021.9.1.8"
#Network Interface OIDs
# Network Interface OIDs
_interface_index_oid = "1.3.6.1.2.1.2.2.1.1"
_interface_name_oid = "1.3.6.1.2.1.2.2.1.2"
_interface_bandwidth_oid = "1.3.6.1.2.1.2.2.1.5"
@ -73,7 +73,7 @@ class SNMPInspector(base.Inspector):
_interface_received_oid = "1.3.6.1.2.1.2.2.1.10"
_interface_transmitted_oid = "1.3.6.1.2.1.2.2.1.16"
_interface_error_oid = "1.3.6.1.2.1.2.2.1.20"
#Default port and security name
# Default port and security name
_port = 161
_security_name = 'public'
@ -107,13 +107,13 @@ class SNMPInspector(base.Inspector):
return self._get_or_walk_oid(oid, host, False)
def inspect_cpu(self, host):
#get 1 minute load
# get 1 minute load
cpu_1_min_load = \
str(self._get_value_from_oid(self._cpu_1_min_load_oid, host))
#get 5 minute load
# get 5 minute load
cpu_5_min_load = \
str(self._get_value_from_oid(self._cpu_5_min_load_oid, host))
#get 15 minute load
# get 15 minute load
cpu_15_min_load = \
str(self._get_value_from_oid(self._cpu_15_min_load_oid, host))
@ -122,9 +122,9 @@ class SNMPInspector(base.Inspector):
cpu_15_min=float(cpu_15_min_load))
def inspect_memory(self, host):
#get total memory
# get total memory
total = self._get_value_from_oid(self._memory_total_oid, host)
#get used memory
# get used memory
used = self._get_value_from_oid(self._memory_used_oid, host)
yield base.MemoryStats(total=int(total), used=int(used))

View File

@ -45,11 +45,11 @@ class _Base(plugin.CentralPollster):
def _get_images(self, ksclient):
client = self.get_glance_client(ksclient)
#TODO(eglynn): use pagination to protect against unbounded
# TODO(eglynn): use pagination to protect against unbounded
# memory usage
rawImageList = list(itertools.chain(
client.images.list(filters={"is_public": True}),
#TODO(eglynn): extend glance API with all_tenants logic to
# TODO(eglynn): extend glance API with all_tenants logic to
# avoid second call to retrieve private images
client.images.list(filters={"is_public": False})))

View File

@ -60,7 +60,7 @@ class NetworkNotificationBase(plugin.NotificationBase):
# it may mean we miss charging for some amount of time,
# but it is better than throwing away the existing
# metadata for a resource when it is deleted.
##'%s.delete.start' % (self.resource_name),
# '%s.delete.start' % (self.resource_name),
]
@staticmethod

View File

@ -60,7 +60,7 @@ class NotificationBase(PluginBase):
:param conf: Configuration.
"""
#TODO(sileht): Backwards compatibility, remove in J+1
# TODO(sileht): Backwards compatibility, remove in J+1
if hasattr(self, 'get_exchange_topics'):
LOG.warn(_('get_exchange_topics API of NotificationPlugin is'
'deprecated, implements get_targets instead.'))
@ -114,8 +114,8 @@ class NotificationBase(PluginBase):
"""
#TODO(sileht): this will be moved into oslo.messaging
#see oslo.messaging bp notification-dispatcher-filter
# TODO(sileht): this will be moved into oslo.messaging
# see oslo.messaging bp notification-dispatcher-filter
if not self._handle_event_type(notification['event_type'],
self.event_types):
return

View File

@ -62,7 +62,7 @@ def _handle_sort_key(model_name, sort_key=None):
if not sort_key:
return sort_keys
# NOTE(Fengqian): We need to put the sort key from user
#in the first place of sort keys list.
# in the first place of sort keys list.
try:
sort_keys.remove(sort_key)
except ValueError:

View File

@ -333,7 +333,7 @@ class Connection(base.Connection):
data['counter_name'], data['counter_type'],
data['counter_unit'], rts, data['source'])
#TODO(nprivalova): try not to store resource_id
# TODO(nprivalova): try not to store resource_id
resource = hbase_utils.serialize_entry(**{
'source': data['source'],
'meter': {new_meter: data['timestamp']},
@ -348,7 +348,7 @@ class Connection(base.Connection):
ts = int(time.mktime(data['timestamp'].timetuple()) * 1000)
resource_table.put(data['resource_id'], resource, ts)
#TODO(nprivalova): improve uniqueness
# TODO(nprivalova): improve uniqueness
# Rowkey consists of reversed timestamp, meter and an md5 of
# user+resource+project for purposes of uniqueness
m = hashlib.md5()

View File

@ -18,8 +18,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend
"""
"""MongoDB storage backend"""
import calendar
import copy
@ -607,14 +606,15 @@ class Connection(pymongo_base.Connection):
sort_criteria_list = []
for i in range(len(sort_keys)):
#NOTE(fengqian): Generate the query criteria recursively.
#sort_keys=[k1, k2, k3], maker_value=[v1, v2, v3]
#sort_flags = ['$lt', '$gt', 'lt'].
#The query criteria should be
#{'k3': {'$lt': 'v3'}, 'k2': {'eq': 'v2'}, 'k1': {'eq': 'v1'}},
#{'k2': {'$gt': 'v2'}, 'k1': {'eq': 'v1'}},
#{'k1': {'$lt': 'v1'}} with 'OR' operation.
#Each recurse will generate one items of three.
# NOTE(fengqian): Generate the query criteria recursively.
# sort_keys=[k1, k2, k3], maker_value=[v1, v2, v3]
# sort_flags = ['$lt', '$gt', 'lt'].
# The query criteria should be
# {'k3': {'$lt': 'v3'}, 'k2': {'eq': 'v2'}, 'k1':
# {'eq': 'v1'}},
# {'k2': {'$gt': 'v2'}, 'k1': {'eq': 'v1'}},
# {'k1': {'$lt': 'v1'}} with 'OR' operation.
# Each recurse will generate one items of three.
sort_criteria_list.append(cls._recurse_sort_keys(
sort_keys[:(len(sort_keys) - i)],
marker, _op))
@ -673,9 +673,9 @@ class Connection(pymongo_base.Connection):
sort_dir)
q.update(query)
#NOTE(Fengqian):MongoDB collection.find can not handle limit
#when it equals None, it will raise TypeError, so we treate
#None as 0 for the value of limit.
# NOTE(Fengqian): MongoDB collection.find can not handle limit
# when it equals None, it will raise TypeError, so we treat
# None as 0 for the value of limit.
if limit is None:
limit = 0
return db_collection.find(q, limit=limit, sort=all_sort)

View File

@ -365,8 +365,8 @@ class Connection(base.Connection):
@staticmethod
def _decode_matching_metadata(matching_metadata):
if isinstance(matching_metadata, dict):
#note(sileht): keep compatibility with alarm
#with matching_metadata as a dict
# note(sileht): keep compatibility with alarm
# with matching_metadata as a dict
return matching_metadata
else:
new_matching_metadata = {}

View File

@ -16,7 +16,7 @@ from sqlalchemy import Index, MetaData, Table
INDEXES = {
#`table_name`: ((`index_name`, `column`),)
# `table_name`: ((`index_name`, `column`),)
"user": (('ix_user_id', 'id'),),
"source": (('ix_source_id', 'id'),),
"project": (('ix_project_id', 'id'),),

View File

@ -81,7 +81,7 @@ def downgrade(migrate_engine):
for row in table.select().execute().fetchall():
if row.type != 'threshold':
#note: type insupported in previous version
# note: type insupported in previous version
table.delete().where(table.c.id == row.id).execute()
else:
rule = json.loads(row.rule)
@ -93,7 +93,7 @@ def downgrade(migrate_engine):
'meter_name': int(rule['mater_name']),
'matching_metadata': {}}
#note: op are ignored because previous format don't support it
# note: op are ignored because previous format don't support it
for q in rule['query']:
values['matching_metadata'][q['field']] = q['value']
values['matching_metadata'] = json.dumps(

View File

@ -105,7 +105,7 @@ def upgrade(migrate_engine):
_alter_sourceassoc(meta, 'meter', 'idx_sm')
sourceassoc = sa.Table('sourceassoc', meta, autoload=True)
sourceassoc.c.meter_id.alter(name='sample_id')
#re-bind metadata to pick up alter name change
# re-bind metadata to pick up alter name change
meta = sa.MetaData(bind=migrate_engine)
_alter_sourceassoc(meta, 'sample', 'idx_ss', True)

View File

@ -66,7 +66,7 @@ class TestCoordinate(tests_base.BaseTestCase):
self.mock_utcnow.return_value = self.override_start
self.partition_coordinator = coordination.PartitionCoordinator()
self.partition_coordinator.coordination_rpc = mock.Mock()
#add extra logger to check exception conditions and logged content
# add extra logger to check exception conditions and logged content
self.str_handler = MockLoggingHandler()
coordination.LOG.logger.addHandler(self.str_handler)

View File

@ -307,7 +307,7 @@ class TestTraitDefinition(ConverterBase):
jsonpath_rw.parse('(payload.test)|(payload.other)'))
def test_invalid_path_config(self):
#test invalid jsonpath...
# test invalid jsonpath...
cfg = dict(fields='payload.bogus(')
self.assertRaises(converter.EventDefinitionException,
converter.TraitDefinition,
@ -316,7 +316,7 @@ class TestTraitDefinition(ConverterBase):
self.fake_plugin_mgr)
def test_invalid_plugin_config(self):
#test invalid jsonpath...
# test invalid jsonpath...
cfg = dict(fields='payload.test', plugin=dict(bogus="true"))
self.assertRaises(converter.EventDefinitionException,
converter.TraitDefinition,
@ -325,7 +325,7 @@ class TestTraitDefinition(ConverterBase):
self.fake_plugin_mgr)
def test_unknown_plugin(self):
#test invalid jsonpath...
# test invalid jsonpath...
cfg = dict(fields='payload.test', plugin=dict(name='bogus'))
self.assertRaises(converter.EventDefinitionException,
converter.TraitDefinition,
@ -351,7 +351,7 @@ class TestTraitDefinition(ConverterBase):
self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type)
def test_invalid_type_config(self):
#test invalid jsonpath...
# test invalid jsonpath...
cfg = dict(type='bogus', fields='payload.test')
self.assertRaises(converter.EventDefinitionException,
converter.TraitDefinition,

View File

@ -54,8 +54,8 @@ class MongoDBConnection(tests_db.TestBase):
@tests_db.run_with('mongodb')
class MongoDBTestMarkerBase(test_storage_scenarios.DBTestBase):
#NOTE(Fengqian): All these three test case are the same for resource
#and meter collection. As to alarm, we will set up in AlarmTestPagination.
# NOTE(Fengqian): All these three test case are the same for resource
# and meter collection. As to alarm, we will set up in AlarmTestPagination.
def test_get_marker(self):
marker_pairs = {'user_id': 'user-id-4'}
ret = impl_mongodb.Connection._get_marker(self.conn.db.resource,

View File

@ -2868,8 +2868,7 @@ class GetEventTest(EventTestBase):
def test_get_traits(self):
traits = self.conn.get_traits("Bar")
#format results in a way that makes them easier to
#work with
# format results in a way that makes them easier to work with
trait_dict = {}
for trait in traits:
trait_dict[trait.name] = trait.dtype

View File

@ -216,7 +216,7 @@ class AggregatorTransformer(ScalingTransformer):
def _get_unique_key(self, s):
non_aggregated_keys = "-".join([getattr(s, field)
for field in self.key_attributes])
#NOTE(sileht): it assumes, a meter always have the same unit/type
# NOTE(sileht): it assumes, a meter always have the same unit/type
return "%s-%s-%s" % (s.name, s.resource_id, non_aggregated_keys)
def handle_sample(self, context, sample):

View File

@ -37,13 +37,12 @@ commands = python setup.py build_sphinx
commands = {posargs}
[flake8]
# E265 block comment should start with #
# F402 import module shadowed by loop variable
# H305 imports not grouped correctly
# H307 like imports should be grouped together
# H405 multi line docstring summary not separated with an empty line
# H904 Wrap long lines in parentheses instead of a backslash
ignore = E265,F402,H305,H307,H405,H904
ignore = F402,H305,H307,H405,H904
builtins = _
exclude=.venv,.git,.tox,dist,doc,./ceilometer/openstack/common,*lib/python*,*egg,tools,nova_tests,build
show-source = True