Do pep8 housekeeping according to zuul rules

The pep8 rules used in nodepool are somewhat broken. In preparation to
use the pep8 ruleset from zuul we need to fix the findings upfront.

Change-Id: I9fb2a80db7671c590cdb8effbd1a1102aaa3aff8
This commit is contained in:
Tobias Henkel 2017-11-26 08:29:40 +01:00 committed by Tristan Cacqueray
parent be4b8f1416
commit 7d79770840
18 changed files with 114 additions and 92 deletions

View File

@ -32,9 +32,11 @@ from nodepool import zk
MINS = 60
HOURS = 60 * MINS
IMAGE_TIMEOUT = 6 * HOURS # How long to wait for an image save
SUSPEND_WAIT_TIME = 30 # How long to wait between checks for
# ZooKeeper connectivity if it disappears.
# How long to wait for an image save
IMAGE_TIMEOUT = 6 * HOURS
# How long to wait between checks for ZooKeeper connectivity if it disappears.
SUSPEND_WAIT_TIME = 30
# HP Cloud requires qemu compat with 0.10. That version works elsewhere,
# so just hardcode it for all qcow2 building
@ -151,7 +153,8 @@ class CleanupWorker(BaseWorker):
interval, zk):
super(CleanupWorker, self).__init__(builder_id, config_path,
secure_path, interval, zk)
self.log = logging.getLogger("nodepool.builder.CleanupWorker.%s" % name)
self.log = logging.getLogger(
"nodepool.builder.CleanupWorker.%s" % name)
self.name = 'CleanupWorker.%s' % name
def _buildUploadRecencyTable(self):
@ -395,7 +398,8 @@ class CleanupWorker(BaseWorker):
self.log.info("Removing failed upload record: %s" % upload)
self._zk.deleteUpload(image, build_id, provider, upload.id)
elif upload.state == zk.DELETING:
self.log.info("Removing deleted upload and record: %s" % upload)
self.log.info(
"Removing deleted upload and record: %s" % upload)
self._deleteUpload(upload)
elif upload.state == zk.FAILED:
self.log.info("Removing failed upload and record: %s" % upload)
@ -410,7 +414,7 @@ class CleanupWorker(BaseWorker):
all_builds = self._zk.getBuilds(image)
builds_to_keep = set([b for b in sorted(all_builds, reverse=True,
key=lambda y: y.state_time)
if b.state==zk.READY][:2])
if b.state == zk.READY][:2])
local_builds = set(self._filterLocalBuilds(image, all_builds))
diskimage = self._config.diskimages.get(image)
if not diskimage and not local_builds:
@ -576,6 +580,7 @@ class BuildWorker(BaseWorker):
or (now - builds[0].state_time) >= diskimage.rebuild_age
or not set(builds[0].formats).issuperset(diskimage.image_types)
):
try:
with self._zk.imageBuildLock(diskimage.name, blocking=False):
# To avoid locking each image repeatedly, we have an
@ -584,7 +589,8 @@ class BuildWorker(BaseWorker):
# lock acquisition. If it's not the same build as
# identified in the first check above, assume another
# BuildWorker created the build for us and continue.
builds2 = self._zk.getMostRecentBuilds(1, diskimage.name, zk.READY)
builds2 = self._zk.getMostRecentBuilds(
1, diskimage.name, zk.READY)
if builds2 and builds[0].id != builds2[0].id:
return
@ -746,7 +752,8 @@ class BuildWorker(BaseWorker):
self._zk.resetLostFlag()
build_data.state = zk.FAILED
elif p.returncode:
self.log.info("DIB failed creating %s (%s)" % (diskimage.name, p.returncode))
self.log.info(
"DIB failed creating %s (%s)" % (diskimage.name, p.returncode))
build_data.state = zk.FAILED
else:
self.log.info("DIB image %s is built" % diskimage.name)
@ -756,7 +763,8 @@ class BuildWorker(BaseWorker):
if self._statsd:
# record stats on the size of each image we create
for ext in img_types.split(','):
key = 'nodepool.dib_image_build.%s.%s.size' % (diskimage.name, ext)
key = 'nodepool.dib_image_build.%s.%s.size' % (
diskimage.name, ext)
# A bit tricky because these image files may be sparse
# files; we only want the true size of the file for
# purposes of watching if we've added too much stuff
@ -1101,9 +1109,9 @@ class NodePoolBuilder(object):
# startup process has completed.
self._start_lock = threading.Lock()
#=======================================================================
# ======================================================================
# Private methods
#=======================================================================
# ======================================================================
def _getBuilderID(self, id_file):
if not os.path.exists(id_file):
@ -1126,9 +1134,9 @@ class NodePoolBuilder(object):
raise RuntimeError('No images-dir specified in config.')
return config
#=======================================================================
# ======================================================================
# Public methods
#=======================================================================
# ======================================================================
def start(self):
'''

View File

@ -289,7 +289,7 @@ class NodePoolCmd(NodepoolApp):
validator = ConfigValidator(self.args.config)
validator.validate()
log.info("Configuration validation complete")
#TODO(asselin,yolanda): add validation of secure.conf
# TODO(asselin,yolanda): add validation of secure.conf
def request_list(self):
print(status.request_list(self.zk))

View File

@ -106,9 +106,9 @@ class NodeRequestHandler(object):
return 0
return self.launch_manager.alive_thread_count
#----------------------------------------------------------------
# ---------------------------------------------------------------
# Public methods
#----------------------------------------------------------------
# ---------------------------------------------------------------
def unlockNodeSet(self, clear_allocation=False):
'''

View File

@ -140,7 +140,7 @@ class OpenStackProviderConfig(ProviderConfig):
any([len(k) > 255 or len(v) > 255
for k, v in i.meta.items()]):
# soft-fail
#self.log.error("Invalid metadata for %s; ignored"
# self.log.error("Invalid metadata for %s; ignored"
# % i.name)
i.meta = {}
@ -200,7 +200,6 @@ class OpenStackProviderConfig(ProviderConfig):
top_label = config.labels[pl.name]
top_label.pools.append(pp)
def get_schema(self):
provider_diskimage = {
'name': str,
@ -238,7 +237,8 @@ class OpenStackProviderConfig(ProviderConfig):
label_diskimage = v.Schema({v.Required('diskimage'): str}, extra=True)
label_cloud_image = v.Schema({v.Required('cloud-image'): str}, extra=True)
label_cloud_image = v.Schema({v.Required('cloud-image'): str},
extra=True)
pool_label = v.All(pool_label_main,
v.Any(label_min_ram, label_flavor_name),

View File

@ -61,8 +61,8 @@ class NodeLauncher(threading.Thread, stats.StatsReporter):
self._pool = self._label.pool
self._provider_config = self._pool.provider
if self._label.diskimage:
self._diskimage = \
self._provider_config.diskimages[self._label.diskimage.name]
self._diskimage = self._provider_config.diskimages[
self._label.diskimage.name]
else:
self._diskimage = None
@ -542,8 +542,7 @@ class OpenStackNodeRequestHandler(NodeRequestHandler):
elif not self._imagesAvailable():
declined_reasons.append('images are not available')
elif (self.pool.max_servers == 0 or
not self._hasProviderQuota(self.request.node_types)
):
not self._hasProviderQuota(self.request.node_types)):
declined_reasons.append('it would exceed quota')
# TODO(tobiash): Maybe also calculate the quota prediction here and
# backoff for some seconds if the used quota would be exceeded?

View File

@ -64,8 +64,10 @@ class ServerDeleteException(TimeoutException):
class ImageCreateException(TimeoutException):
statsd_key = 'error.imagetimeout'
class ZKException(Exception):
pass
class ZKLockException(ZKException):
pass

View File

@ -35,10 +35,14 @@ from nodepool.driver.openstack.handler import OpenStackNodeRequestHandler
MINS = 60
HOURS = 60 * MINS
WATERMARK_SLEEP = 10 # Interval between checking if new servers needed
LOCK_CLEANUP = 8 * HOURS # When to delete node request lock znodes
SUSPEND_WAIT_TIME = 30 # How long to wait between checks for ZooKeeper
# connectivity if it disappears.
# Interval between checking if new servers needed
WATERMARK_SLEEP = 10
# When to delete node request lock znodes
LOCK_CLEANUP = 8 * HOURS
# How long to wait between checks for ZooKeeper connectivity if it disappears.
SUSPEND_WAIT_TIME = 30
class NodeDeleter(threading.Thread, stats.StatsReporter):
@ -137,9 +141,9 @@ class PoolWorker(threading.Thread):
os.getpid(),
self.name)
#----------------------------------------------------------------
# ---------------------------------------------------------------
# Private methods
#----------------------------------------------------------------
# ---------------------------------------------------------------
def _get_node_request_handler(self, provider, request):
if provider.driver.name == 'fake':
@ -177,8 +181,7 @@ class PoolWorker(threading.Thread):
# Short-circuit for limited request handling
if (provider.max_concurrency > 0 and
active_threads >= provider.max_concurrency
):
active_threads >= provider.max_concurrency):
self.log.debug("Request handling limited: %s active threads ",
"with max concurrency of %s",
active_threads, provider.max_concurrency)
@ -238,9 +241,9 @@ class PoolWorker(threading.Thread):
active_reqs = [r.request.id for r in self.request_handlers]
self.log.debug("Active requests: %s", active_reqs)
#----------------------------------------------------------------
# ---------------------------------------------------------------
# Public methods
#----------------------------------------------------------------
# ---------------------------------------------------------------
def activeThreads(self):
'''
@ -460,7 +463,7 @@ class CleanupWorker(BaseCleanupWorker):
for lock_stat in zk.nodeRequestLockStatsIterator():
if lock_stat.lock_id in requests:
continue
if (now - lock_stat.stat.mtime/1000) > LOCK_CLEANUP:
if (now - lock_stat.stat.mtime / 1000) > LOCK_CLEANUP:
zk.deleteNodeRequestLock(lock_stat.lock_id)
def _cleanupLeakedInstances(self):
@ -604,8 +607,7 @@ class DeletedNodeWorker(BaseCleanupWorker):
# If a ready node has been allocated to a request, but that
# request is now missing, deallocate it.
if (node.state == zk.READY and node.allocated_to
and not zk_conn.getNodeRequest(node.allocated_to)
):
and not zk_conn.getNodeRequest(node.allocated_to)):
try:
zk_conn.lockNode(node, blocking=False)
except exceptions.ZKLockException:
@ -936,13 +938,13 @@ class NodePool(threading.Thread):
key = provider.name + '-' + pool.name
if key not in self._pool_threads:
t = PoolWorker(self, provider.name, pool.name)
self.log.info( "Starting %s" % t.name)
self.log.info("Starting %s" % t.name)
t.start()
self._pool_threads[key] = t
elif not self._pool_threads[key].isAlive():
self._pool_threads[key].join()
t = PoolWorker(self, provider.name, pool.name)
self.log.info( "Restarting %s" % t.name)
self.log.info("Restarting %s" % t.name)
t.start()
self._pool_threads[key] = t
except Exception:

View File

@ -29,9 +29,8 @@ from nodepool import exceptions
log = logging.getLogger("nodepool.utils")
ITERATE_INTERVAL = 2 # How long to sleep while waiting for something
# in a loop
# How long to sleep while waiting for something in a loop
ITERATE_INTERVAL = 2
def iterate_timeout(max_seconds, exc, purpose):

View File

@ -24,6 +24,7 @@ from nodepool import zk
log = logging.getLogger("nodepool.stats")
def get_client():
"""Return a statsd client object setup from environment variables; or
None if they are not set
@ -87,7 +88,6 @@ class StatsReporter(object):
self._statsd.timing(key, dt)
self._statsd.incr(key)
def updateNodeStats(self, zk_conn, provider):
'''
Refresh statistics for all known nodes.
@ -108,11 +108,11 @@ class StatsReporter(object):
states[key] = 0
for node in zk_conn.nodeIterator():
#nodepool.nodes.STATE
# nodepool.nodes.STATE
key = 'nodepool.nodes.%s' % node.state
states[key] += 1
#nodepool.label.LABEL.nodes.STATE
# nodepool.label.LABEL.nodes.STATE
key = 'nodepool.label.%s.nodes.%s' % (node.type, node.state)
# It's possible we could see node types that aren't in our config
if key in states:
@ -120,7 +120,7 @@ class StatsReporter(object):
else:
states[key] = 1
#nodepool.provider.PROVIDER.nodes.STATE
# nodepool.provider.PROVIDER.nodes.STATE
key = 'nodepool.provider.%s.nodes.%s' % (node.provider, node.state)
# It's possible we could see providers that aren't in our config
if key in states:
@ -131,7 +131,7 @@ class StatsReporter(object):
for key, count in states.items():
self._statsd.gauge(key, count)
#nodepool.provider.PROVIDER.max_servers
# nodepool.provider.PROVIDER.max_servers
key = 'nodepool.provider.%s.max_servers' % provider.name
max_servers = sum([p.max_servers for p in provider.pools.values()
if p.max_servers])

View File

@ -145,7 +145,7 @@ def dib_image_list_json(zk):
for image_name in zk.getImageNames():
for build_no in zk.getBuildNumbers(image_name):
build = zk.getBuild(image_name, build_no)
objs.append({'id' : '-'.join([image_name, build_no]),
objs.append({'id': '-'.join([image_name, build_no]),
'image': image_name,
'builder': build.builder,
'formats': build.formats,
@ -154,6 +154,7 @@ def dib_image_list_json(zk):
})
return json.dumps(objs)
def image_list(zk):
t = PrettyTable(["Build ID", "Upload ID", "Provider", "Image",
"Provider Image Name", "Provider Image ID", "State",
@ -173,6 +174,7 @@ def image_list(zk):
age(upload.state_time)])
return str(t)
def request_list(zk):
t = PrettyTable(["Request ID", "State", "Requestor", "Node Types", "Nodes",
"Declined By"])

View File

@ -26,6 +26,7 @@ import requests.exceptions
from nodepool import stats
class ManagerStoppedException(Exception):
pass
@ -106,7 +107,7 @@ class TaskManager(threading.Thread):
self.log.debug("Manager %s ran task %s in %ss" %
(self.name, type(task).__name__, dt))
if self.statsd:
#nodepool.task.PROVIDER.subkey
# nodepool.task.PROVIDER.subkey
subkey = type(task).__name__
key = 'nodepool.task.%s.%s' % (self.name, subkey)
self.statsd.timing(key, int(dt * 1000))

View File

@ -163,10 +163,10 @@ class BaseTestCase(testtools.TestCase):
logging.basicConfig(level=logging.DEBUG)
l = logging.getLogger('kazoo')
l.setLevel(logging.INFO)
l.propagate=False
l.propagate = False
l = logging.getLogger('stevedore')
l.setLevel(logging.INFO)
l.propagate=False
l.propagate = False
self.useFixture(fixtures.NestedTempfile())
self.subprocesses = []
@ -500,8 +500,8 @@ class DBTestCase(BaseTestCase):
def printZKTree(self, node):
def join(a, b):
if a.endswith('/'):
return a+b
return a+'/'+b
return a + b
return a + '/' + b
data, stat = self.zk.client.get(node)
self.log.debug("Node: %s" % (node,))

View File

@ -86,6 +86,7 @@ class TestNodepoolBuilderDibImage(tests.BaseTestCase):
image = builder.DibImageFile('myid1234')
self.assertRaises(exceptions.BuilderError, image.to_path, '/imagedir/')
class TestNodePoolBuilder(tests.DBTestCase):
def test_start_stop(self):
@ -155,7 +156,8 @@ class TestNodePoolBuilder(tests.DBTestCase):
image = self.zk.getMostRecentImageUpload('fake-provider', 'fake-image')
self.replace_config(configfile, 'node_two_provider_remove.yaml')
self.waitForImageDeletion('fake-provider2', 'fake-image')
image2 = self.zk.getMostRecentImageUpload('fake-provider', 'fake-image')
image2 = self.zk.getMostRecentImageUpload('fake-provider',
'fake-image')
self.assertEqual(image, image2)
def test_image_addition(self):

View File

@ -52,7 +52,8 @@ class TestNodepoolCMD(tests.DBTestCase):
self.assertEquals(rows_with_val, count)
def assert_alien_images_listed(self, configfile, image_cnt, image_id):
self.assert_listed(configfile, ['alien-image-list'], 2, image_id, image_cnt)
self.assert_listed(configfile, ['alien-image-list'], 2, image_id,
image_cnt)
def assert_alien_images_empty(self, configfile):
self.assert_alien_images_listed(configfile, 0, 0)
@ -245,7 +246,7 @@ class TestNodepoolCMD(tests.DBTestCase):
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage( 'fake-provider', 'fake-image')
self.waitForImage('fake-provider', 'fake-image')
nodes = self.waitForNodes('fake-label')
self.assertEqual(len(nodes), 1)
@ -282,7 +283,7 @@ class TestNodepoolCMD(tests.DBTestCase):
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage( 'fake-provider', 'fake-image')
self.waitForImage('fake-provider', 'fake-image')
nodes = self.waitForNodes('fake-label')
self.assertEqual(len(nodes), 1)

View File

@ -256,8 +256,7 @@ class TestLauncher(tests.DBTestCase):
self._test_node_assignment_at_quota(config='node_quota_cloud.yaml',
max_cores=math.inf,
max_instances=math.inf,
max_ram=2*8192)
max_ram=2 * 8192)
@mock.patch('nodepool.driver.fake.provider.get_fake_quota')
def test_over_quota(self, mock_quota,
@ -268,9 +267,9 @@ class TestLauncher(tests.DBTestCase):
'''
# Start with an instance quota of 2
max_cores=math.inf
max_instances=2
max_ram=math.inf
max_cores = math.inf
max_instances = 2
max_ram = math.inf
# patch the cloud with requested quota
mock_quota.return_value = (max_cores, max_instances, max_ram)

View File

@ -104,7 +104,8 @@ class TestZooKeeper(tests.DBTestCase):
with testtools.ExpectedException(
npe.ZKLockException, "Did not get lock on .*"
):
with self.zk.imageUploadLock(image, bnum, prov, blocking=False):
with self.zk.imageUploadLock(image, bnum, prov,
blocking=False):
pass
def test_imageUploadLock_exception_blocking(self):
@ -254,7 +255,8 @@ class TestZooKeeper(tests.DBTestCase):
self.zk.storeImageUpload(image, bnum, provider, up3)
# up2 should be the most recent 'ready' upload
data = self.zk.getMostRecentBuildImageUploads(1, image, bnum, provider, zk.READY)
data = self.zk.getMostRecentBuildImageUploads(
1, image, bnum, provider, zk.READY)
self.assertNotEqual([], data)
self.assertEqual(1, len(data))
self.assertEqual(data[0].id, up2_id)
@ -278,7 +280,8 @@ class TestZooKeeper(tests.DBTestCase):
up3_id = self.zk.storeImageUpload(image, bnum, provider, up3)
# up3 should be the most recent upload, regardless of state
data = self.zk.getMostRecentBuildImageUploads(1, image, bnum, provider, None)
data = self.zk.getMostRecentBuildImageUploads(
1, image, bnum, provider, None)
self.assertNotEqual([], data)
self.assertEqual(1, len(data))
self.assertEqual(data[0].id, up3_id)
@ -860,11 +863,11 @@ class TestZKModel(tests.BaseTestCase):
self.assertEqual(o.image_id, d['image_id'])
self.assertEqual(o.launcher, d['launcher'])
self.assertEqual(o.external_id, d['external_id'])
self.assertEqual(o.hostname , d['hostname'])
self.assertEqual(o.comment , d['comment'])
self.assertEqual(o.hostname, d['hostname'])
self.assertEqual(o.comment, d['comment'])
self.assertEqual(o.hold_job, d['hold_job'])
self.assertEqual(o.host_keys , d['host_keys'])
self.assertEqual(o.connection_port , d['connection_port'])
self.assertEqual(o.host_keys, d['host_keys'])
self.assertEqual(o.connection_port, d['connection_port'])
def test_custom_connection_port(self):
n = zk.Node('0001')
@ -875,4 +878,5 @@ class TestZKModel(tests.BaseTestCase):
self.assertEqual(n.connection_port, 22, "Default port not 22")
n.connection_port = 22022
d = n.toDict()
self.assertEqual(d["connection_port"], 22022, "Custom ssh port not set")
self.assertEqual(d["connection_port"], 22022,
"Custom ssh port not set")

View File

@ -583,9 +583,9 @@ class ZooKeeper(object):
self.client = None
self._became_lost = False
#========================================================================
# =======================================================================
# Private Methods
#========================================================================
# =======================================================================
def _imagePath(self, image):
return "%s/%s" % (self.IMAGE_ROOT, image)
@ -709,10 +709,9 @@ class ZooKeeper(object):
else:
self.log.debug("ZooKeeper connection: CONNECTED")
#========================================================================
# =======================================================================
# Public Methods and Properties
#========================================================================
# =======================================================================
@property
def connected(self):
@ -1065,9 +1064,11 @@ class ZooKeeper(object):
except kze.NoNodeError:
return None
d = ImageUpload.fromDict(
self._bytesToDict(data), build_number, provider, image, upload_number
)
d = ImageUpload.fromDict(self._bytesToDict(data),
build_number,
provider,
image,
upload_number)
d.stat = stat
return d
@ -1158,7 +1159,8 @@ class ZooKeeper(object):
for upload in uploads:
if upload == 'lock': # skip the upload lock node
continue
data = self.getImageUpload(image, build_number, provider, upload)
data = self.getImageUpload(
image, build_number, provider, upload)
if not data or data.state != state:
continue
elif (recent_data is None or
@ -1201,7 +1203,8 @@ class ZooKeeper(object):
# Generate a path for the upload. This doesn't have to exist yet
# since we'll create new provider/upload ID znodes automatically.
# Append trailing / so the sequence node is created as a child node.
upload_path = self._imageUploadPath(image, build_number, provider) + "/"
upload_path = self._imageUploadPath(
image, build_number, provider) + "/"
if upload_number is None:
path = self.client.create(
@ -1486,7 +1489,8 @@ class ZooKeeper(object):
:raises: ZKLockException if the request is not currently locked.
'''
if request.lock is None:
raise npe.ZKLockException("Request %s does not hold a lock" % request)
raise npe.ZKLockException(
"Request %s does not hold a lock" % request)
request.lock.release()
request.lock = None
@ -1630,8 +1634,7 @@ class ZooKeeper(object):
ret = {}
for node in self.nodeIterator():
if (node.state == READY and
not node.allocated_to and node.type in labels
):
not node.allocated_to and node.type in labels):
if node.type not in ret:
ret[node.type] = []
ret[node.type].append(node)