Enable flake8 checking of ironic/nova/*

The directory ironic/nova/* was being excluded from the flake8 check.

Make some minor whitespace and comment changes to
ironic/nova/compute/manager.py to make it pass the flake8 checks.

Change-Id: I5a76550db3b28a87964d1505441246cdb0354ee7
This commit is contained in:
John L. Villalovos 2015-06-12 11:44:28 -07:00
parent 286f72e359
commit 9508d86dd0
2 changed files with 21 additions and 17 deletions

View File

@ -23,12 +23,12 @@ work. The goal here is to generalise the areas where n-c talking to a clustered
hypervisor has issues, and long term fold them into the main ComputeManager.
"""
from oslo_concurrency import lockutils
from nova.compute import manager
import nova.context
from oslo_concurrency import lockutils
CCM_SEMAPHORE='clustered_compute_manager'
CCM_SEMAPHORE = 'clustered_compute_manager'
class ClusteredComputeManager(manager.ComputeManager):
@ -48,22 +48,24 @@ class ClusteredComputeManager(manager.ComputeManager):
self.init_virt_events()
# try:
# evacuation is moot for a clustered hypervisor
# # checking that instance was not already evacuated to other host
# self._destroy_evacuated_instances(context)
# Don't run _init_instance until we solve the partitioning problem
# - with N n-cpu's all claiming the same hostname, running
# _init_instance here would lead to race conditions where each runs
# _init_instance concurrently.
# for instance in instances:
# self._init_instance(context, instance)
# evacuation is moot for a clustered hypervisor
# # checking that instance was not already evacuated to other host
# self._destroy_evacuated_instances(context)
# Don't run _init_instance until we solve the partitioning problem
# - with N n-cpu's all claiming the same hostname, running
# _init_instance here would lead to race conditions where each runs
# _init_instance concurrently.
# for instance in instances:
# self._init_instance(context, instance)
# finally:
# defer_iptables_apply is moot for clusters - no local iptables
# if CONF.defer_iptables_apply:
# self.driver.filter_defer_apply_off()
# defer_iptables_apply is moot for clusters - no local iptables
# if CONF.defer_iptables_apply:
# self.driver.filter_defer_apply_off()
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
"""Update our available resources
After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
@ -80,7 +82,9 @@ class ClusteredComputeManager(manager.ComputeManager):
@lockutils.synchronized(CCM_SEMAPHORE, 'ironic-')
def _update_resources(self):
"""Updates resources while protecting against a race on
"""Update our resources
Updates the resources while protecting against a race on
self._resource_tracker_dict.
"""
self.update_available_resource(nova.context.get_admin_context())

View File

@ -73,7 +73,7 @@ commands = {posargs}
[flake8]
# E711: ignored because it is normal to use "column == None" in sqlalchemy
ignore = E129,E711
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools,*ironic/nova*
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
max-complexity=17
[hacking]