diff --git a/nodepool/builder.py b/nodepool/builder.py index a8f2ad8e4..01a90b238 100644 --- a/nodepool/builder.py +++ b/nodepool/builder.py @@ -32,9 +32,11 @@ from nodepool import zk MINS = 60 HOURS = 60 * MINS -IMAGE_TIMEOUT = 6 * HOURS # How long to wait for an image save -SUSPEND_WAIT_TIME = 30 # How long to wait between checks for - # ZooKeeper connectivity if it disappears. +# How long to wait for an image save +IMAGE_TIMEOUT = 6 * HOURS + +# How long to wait between checks for ZooKeeper connectivity if it disappears. +SUSPEND_WAIT_TIME = 30 # HP Cloud requires qemu compat with 0.10. That version works elsewhere, # so just hardcode it for all qcow2 building @@ -151,7 +153,8 @@ class CleanupWorker(BaseWorker): interval, zk): super(CleanupWorker, self).__init__(builder_id, config_path, secure_path, interval, zk) - self.log = logging.getLogger("nodepool.builder.CleanupWorker.%s" % name) + self.log = logging.getLogger( + "nodepool.builder.CleanupWorker.%s" % name) self.name = 'CleanupWorker.%s' % name def _buildUploadRecencyTable(self): @@ -395,7 +398,8 @@ class CleanupWorker(BaseWorker): self.log.info("Removing failed upload record: %s" % upload) self._zk.deleteUpload(image, build_id, provider, upload.id) elif upload.state == zk.DELETING: - self.log.info("Removing deleted upload and record: %s" % upload) + self.log.info( + "Removing deleted upload and record: %s" % upload) self._deleteUpload(upload) elif upload.state == zk.FAILED: self.log.info("Removing failed upload and record: %s" % upload) @@ -410,7 +414,7 @@ class CleanupWorker(BaseWorker): all_builds = self._zk.getBuilds(image) builds_to_keep = set([b for b in sorted(all_builds, reverse=True, key=lambda y: y.state_time) - if b.state==zk.READY][:2]) + if b.state == zk.READY][:2]) local_builds = set(self._filterLocalBuilds(image, all_builds)) diskimage = self._config.diskimages.get(image) if not diskimage and not local_builds: @@ -575,7 +579,8 @@ class BuildWorker(BaseWorker): if (not builds or (now - builds[0].state_time) >= diskimage.rebuild_age or not set(builds[0].formats).issuperset(diskimage.image_types) - ): + ): + try: with self._zk.imageBuildLock(diskimage.name, blocking=False): # To avoid locking each image repeatedly, we have an @@ -584,7 +589,8 @@ class BuildWorker(BaseWorker): # lock acquisition. If it's not the same build as # identified in the first check above, assume another # BuildWorker created the build for us and continue. - builds2 = self._zk.getMostRecentBuilds(1, diskimage.name, zk.READY) + builds2 = self._zk.getMostRecentBuilds( + 1, diskimage.name, zk.READY) if builds2 and builds[0].id != builds2[0].id: return @@ -746,7 +752,8 @@ class BuildWorker(BaseWorker): self._zk.resetLostFlag() build_data.state = zk.FAILED elif p.returncode: - self.log.info("DIB failed creating %s (%s)" % (diskimage.name, p.returncode)) + self.log.info( + "DIB failed creating %s (%s)" % (diskimage.name, p.returncode)) build_data.state = zk.FAILED else: self.log.info("DIB image %s is built" % diskimage.name) @@ -756,7 +763,8 @@ class BuildWorker(BaseWorker): if self._statsd: # record stats on the size of each image we create for ext in img_types.split(','): - key = 'nodepool.dib_image_build.%s.%s.size' % (diskimage.name, ext) + key = 'nodepool.dib_image_build.%s.%s.size' % ( + diskimage.name, ext) # A bit tricky because these image files may be sparse # files; we only want the true size of the file for # purposes of watching if we've added too much stuff @@ -1101,9 +1109,9 @@ class NodePoolBuilder(object): # startup process has completed. self._start_lock = threading.Lock() - #======================================================================= + # ====================================================================== # Private methods - #======================================================================= + # ====================================================================== def _getBuilderID(self, id_file): if not os.path.exists(id_file): @@ -1126,9 +1134,9 @@ class NodePoolBuilder(object): raise RuntimeError('No images-dir specified in config.') return config - #======================================================================= + # ====================================================================== # Public methods - #======================================================================= + # ====================================================================== def start(self): ''' diff --git a/nodepool/cmd/nodepoolcmd.py b/nodepool/cmd/nodepoolcmd.py index 59f6a7919..a73052380 100755 --- a/nodepool/cmd/nodepoolcmd.py +++ b/nodepool/cmd/nodepoolcmd.py @@ -289,7 +289,7 @@ class NodePoolCmd(NodepoolApp): validator = ConfigValidator(self.args.config) validator.validate() log.info("Configuration validation complete") - #TODO(asselin,yolanda): add validation of secure.conf + # TODO(asselin,yolanda): add validation of secure.conf def request_list(self): print(status.request_list(self.zk)) diff --git a/nodepool/driver/__init__.py b/nodepool/driver/__init__.py index 256645e06..f6e71c3d5 100644 --- a/nodepool/driver/__init__.py +++ b/nodepool/driver/__init__.py @@ -106,9 +106,9 @@ class NodeRequestHandler(object): return 0 return self.launch_manager.alive_thread_count - #---------------------------------------------------------------- + # --------------------------------------------------------------- # Public methods - #---------------------------------------------------------------- + # --------------------------------------------------------------- def unlockNodeSet(self, clear_allocation=False): ''' diff --git a/nodepool/driver/openstack/config.py b/nodepool/driver/openstack/config.py index 02b47abdd..cfd0991a0 100644 --- a/nodepool/driver/openstack/config.py +++ b/nodepool/driver/openstack/config.py @@ -98,7 +98,7 @@ class OpenStackProviderConfig(ProviderConfig): def load(self, config): if OpenStackProviderConfig.os_client_config is None: - OpenStackProviderConfig.os_client_config = \ + OpenStackProviderConfig.os_client_config = \ os_client_config.OpenStackConfig() cloud_kwargs = self._cloudKwargs() self.cloud_config = self.os_client_config.get_one_cloud(**cloud_kwargs) @@ -140,7 +140,7 @@ class OpenStackProviderConfig(ProviderConfig): any([len(k) > 255 or len(v) > 255 for k, v in i.meta.items()]): # soft-fail - #self.log.error("Invalid metadata for %s; ignored" + # self.log.error("Invalid metadata for %s; ignored" # % i.name) i.meta = {} @@ -200,7 +200,6 @@ class OpenStackProviderConfig(ProviderConfig): top_label = config.labels[pl.name] top_label.pools.append(pp) - def get_schema(self): provider_diskimage = { 'name': str, @@ -238,7 +237,8 @@ class OpenStackProviderConfig(ProviderConfig): label_diskimage = v.Schema({v.Required('diskimage'): str}, extra=True) - label_cloud_image = v.Schema({v.Required('cloud-image'): str}, extra=True) + label_cloud_image = v.Schema({v.Required('cloud-image'): str}, + extra=True) pool_label = v.All(pool_label_main, v.Any(label_min_ram, label_flavor_name), diff --git a/nodepool/driver/openstack/handler.py b/nodepool/driver/openstack/handler.py index 4bfc7839f..1fa98803f 100644 --- a/nodepool/driver/openstack/handler.py +++ b/nodepool/driver/openstack/handler.py @@ -61,8 +61,8 @@ class NodeLauncher(threading.Thread, stats.StatsReporter): self._pool = self._label.pool self._provider_config = self._pool.provider if self._label.diskimage: - self._diskimage = \ - self._provider_config.diskimages[self._label.diskimage.name] + self._diskimage = self._provider_config.diskimages[ + self._label.diskimage.name] else: self._diskimage = None @@ -542,8 +542,7 @@ class OpenStackNodeRequestHandler(NodeRequestHandler): elif not self._imagesAvailable(): declined_reasons.append('images are not available') elif (self.pool.max_servers == 0 or - not self._hasProviderQuota(self.request.node_types) - ): + not self._hasProviderQuota(self.request.node_types)): declined_reasons.append('it would exceed quota') # TODO(tobiash): Maybe also calculate the quota prediction here and # backoff for some seconds if the used quota would be exceeded? diff --git a/nodepool/driver/openstack/provider.py b/nodepool/driver/openstack/provider.py index 7fb75f4f2..3ec7265cb 100755 --- a/nodepool/driver/openstack/provider.py +++ b/nodepool/driver/openstack/provider.py @@ -465,7 +465,7 @@ class OpenStackProvider(Provider): return True def uploadImage(self, image_name, filename, image_type=None, meta=None, - md5=None, sha256=None): + md5=None, sha256=None): # configure glance and upload image. Note the meta flags # are provided as custom glance properties # NOTE: we have wait=True set here. This is not how we normally diff --git a/nodepool/exceptions.py b/nodepool/exceptions.py index 941b59b49..c754e4943 100755 --- a/nodepool/exceptions.py +++ b/nodepool/exceptions.py @@ -64,8 +64,10 @@ class ServerDeleteException(TimeoutException): class ImageCreateException(TimeoutException): statsd_key = 'error.imagetimeout' + class ZKException(Exception): pass + class ZKLockException(ZKException): pass diff --git a/nodepool/launcher.py b/nodepool/launcher.py index 58a1a8257..da7d79647 100755 --- a/nodepool/launcher.py +++ b/nodepool/launcher.py @@ -35,10 +35,14 @@ from nodepool.driver.openstack.handler import OpenStackNodeRequestHandler MINS = 60 HOURS = 60 * MINS -WATERMARK_SLEEP = 10 # Interval between checking if new servers needed -LOCK_CLEANUP = 8 * HOURS # When to delete node request lock znodes -SUSPEND_WAIT_TIME = 30 # How long to wait between checks for ZooKeeper - # connectivity if it disappears. +# Interval between checking if new servers needed +WATERMARK_SLEEP = 10 + +# When to delete node request lock znodes +LOCK_CLEANUP = 8 * HOURS + +# How long to wait between checks for ZooKeeper connectivity if it disappears. +SUSPEND_WAIT_TIME = 30 class NodeDeleter(threading.Thread, stats.StatsReporter): @@ -137,9 +141,9 @@ class PoolWorker(threading.Thread): os.getpid(), self.name) - #---------------------------------------------------------------- + # --------------------------------------------------------------- # Private methods - #---------------------------------------------------------------- + # --------------------------------------------------------------- def _get_node_request_handler(self, provider, request): if provider.driver.name == 'fake': @@ -177,8 +181,7 @@ class PoolWorker(threading.Thread): # Short-circuit for limited request handling if (provider.max_concurrency > 0 and - active_threads >= provider.max_concurrency - ): + active_threads >= provider.max_concurrency): self.log.debug("Request handling limited: %s active threads ", "with max concurrency of %s", active_threads, provider.max_concurrency) @@ -238,9 +241,9 @@ class PoolWorker(threading.Thread): active_reqs = [r.request.id for r in self.request_handlers] self.log.debug("Active requests: %s", active_reqs) - #---------------------------------------------------------------- + # --------------------------------------------------------------- # Public methods - #---------------------------------------------------------------- + # --------------------------------------------------------------- def activeThreads(self): ''' @@ -460,7 +463,7 @@ class CleanupWorker(BaseCleanupWorker): for lock_stat in zk.nodeRequestLockStatsIterator(): if lock_stat.lock_id in requests: continue - if (now - lock_stat.stat.mtime/1000) > LOCK_CLEANUP: + if (now - lock_stat.stat.mtime / 1000) > LOCK_CLEANUP: zk.deleteNodeRequestLock(lock_stat.lock_id) def _cleanupLeakedInstances(self): @@ -604,8 +607,7 @@ class DeletedNodeWorker(BaseCleanupWorker): # If a ready node has been allocated to a request, but that # request is now missing, deallocate it. if (node.state == zk.READY and node.allocated_to - and not zk_conn.getNodeRequest(node.allocated_to) - ): + and not zk_conn.getNodeRequest(node.allocated_to)): try: zk_conn.lockNode(node, blocking=False) except exceptions.ZKLockException: @@ -936,13 +938,13 @@ class NodePool(threading.Thread): key = provider.name + '-' + pool.name if key not in self._pool_threads: t = PoolWorker(self, provider.name, pool.name) - self.log.info( "Starting %s" % t.name) + self.log.info("Starting %s" % t.name) t.start() self._pool_threads[key] = t elif not self._pool_threads[key].isAlive(): self._pool_threads[key].join() t = PoolWorker(self, provider.name, pool.name) - self.log.info( "Restarting %s" % t.name) + self.log.info("Restarting %s" % t.name) t.start() self._pool_threads[key] = t except Exception: diff --git a/nodepool/nodeutils.py b/nodepool/nodeutils.py index 1db8de4ce..178ec8e86 100755 --- a/nodepool/nodeutils.py +++ b/nodepool/nodeutils.py @@ -29,9 +29,8 @@ from nodepool import exceptions log = logging.getLogger("nodepool.utils") - -ITERATE_INTERVAL = 2 # How long to sleep while waiting for something - # in a loop +# How long to sleep while waiting for something in a loop +ITERATE_INTERVAL = 2 def iterate_timeout(max_seconds, exc, purpose): diff --git a/nodepool/stats.py b/nodepool/stats.py index e46e83a57..d733759c7 100755 --- a/nodepool/stats.py +++ b/nodepool/stats.py @@ -24,6 +24,7 @@ from nodepool import zk log = logging.getLogger("nodepool.stats") + def get_client(): """Return a statsd client object setup from environment variables; or None if they are not set @@ -69,7 +70,7 @@ class StatsReporter(object): 'nodepool.launch.provider.%s.%s' % (provider_name, subkey), 'nodepool.launch.image.%s.%s' % (image_name, subkey), 'nodepool.launch.%s' % (subkey,), - ] + ] if node_az: keys.append('nodepool.launch.provider.%s.%s.%s' % @@ -87,7 +88,6 @@ class StatsReporter(object): self._statsd.timing(key, dt) self._statsd.incr(key) - def updateNodeStats(self, zk_conn, provider): ''' Refresh statistics for all known nodes. @@ -108,11 +108,11 @@ class StatsReporter(object): states[key] = 0 for node in zk_conn.nodeIterator(): - #nodepool.nodes.STATE + # nodepool.nodes.STATE key = 'nodepool.nodes.%s' % node.state states[key] += 1 - #nodepool.label.LABEL.nodes.STATE + # nodepool.label.LABEL.nodes.STATE key = 'nodepool.label.%s.nodes.%s' % (node.type, node.state) # It's possible we could see node types that aren't in our config if key in states: @@ -120,7 +120,7 @@ class StatsReporter(object): else: states[key] = 1 - #nodepool.provider.PROVIDER.nodes.STATE + # nodepool.provider.PROVIDER.nodes.STATE key = 'nodepool.provider.%s.nodes.%s' % (node.provider, node.state) # It's possible we could see providers that aren't in our config if key in states: @@ -131,7 +131,7 @@ class StatsReporter(object): for key, count in states.items(): self._statsd.gauge(key, count) - #nodepool.provider.PROVIDER.max_servers + # nodepool.provider.PROVIDER.max_servers key = 'nodepool.provider.%s.max_servers' % provider.name max_servers = sum([p.max_servers for p in provider.pools.values() if p.max_servers]) diff --git a/nodepool/status.py b/nodepool/status.py index b28898885..07e56db55 100755 --- a/nodepool/status.py +++ b/nodepool/status.py @@ -145,15 +145,16 @@ def dib_image_list_json(zk): for image_name in zk.getImageNames(): for build_no in zk.getBuildNumbers(image_name): build = zk.getBuild(image_name, build_no) - objs.append({'id' : '-'.join([image_name, build_no]), + objs.append({'id': '-'.join([image_name, build_no]), 'image': image_name, 'builder': build.builder, 'formats': build.formats, 'state': build.state, 'age': int(build.state_time) - }) + }) return json.dumps(objs) + def image_list(zk): t = PrettyTable(["Build ID", "Upload ID", "Provider", "Image", "Provider Image Name", "Provider Image ID", "State", @@ -173,6 +174,7 @@ def image_list(zk): age(upload.state_time)]) return str(t) + def request_list(zk): t = PrettyTable(["Request ID", "State", "Requestor", "Node Types", "Nodes", "Declined By"]) diff --git a/nodepool/task_manager.py b/nodepool/task_manager.py index 7f795dd9c..95c493bea 100644 --- a/nodepool/task_manager.py +++ b/nodepool/task_manager.py @@ -26,6 +26,7 @@ import requests.exceptions from nodepool import stats + class ManagerStoppedException(Exception): pass @@ -106,7 +107,7 @@ class TaskManager(threading.Thread): self.log.debug("Manager %s ran task %s in %ss" % (self.name, type(task).__name__, dt)) if self.statsd: - #nodepool.task.PROVIDER.subkey + # nodepool.task.PROVIDER.subkey subkey = type(task).__name__ key = 'nodepool.task.%s.%s' % (self.name, subkey) self.statsd.timing(key, int(dt * 1000)) diff --git a/nodepool/tests/__init__.py b/nodepool/tests/__init__.py index 77aad1293..97068e988 100644 --- a/nodepool/tests/__init__.py +++ b/nodepool/tests/__init__.py @@ -163,10 +163,10 @@ class BaseTestCase(testtools.TestCase): logging.basicConfig(level=logging.DEBUG) l = logging.getLogger('kazoo') l.setLevel(logging.INFO) - l.propagate=False + l.propagate = False l = logging.getLogger('stevedore') l.setLevel(logging.INFO) - l.propagate=False + l.propagate = False self.useFixture(fixtures.NestedTempfile()) self.subprocesses = [] @@ -292,9 +292,9 @@ class DBTestCase(BaseTestCase): with open(configfile, 'rb') as conf_fd: config = conf_fd.read().decode('utf8') data = config.format(images_dir=images_dir.path, - zookeeper_host=self.zookeeper_host, - zookeeper_port=self.zookeeper_port, - zookeeper_chroot=self.zookeeper_chroot) + zookeeper_host=self.zookeeper_host, + zookeeper_port=self.zookeeper_port, + zookeeper_chroot=self.zookeeper_chroot) os.write(fd, data.encode('utf8')) os.close(fd) self._config_images_dir = images_dir @@ -500,8 +500,8 @@ class DBTestCase(BaseTestCase): def printZKTree(self, node): def join(a, b): if a.endswith('/'): - return a+b - return a+'/'+b + return a + b + return a + '/' + b data, stat = self.zk.client.get(node) self.log.debug("Node: %s" % (node,)) diff --git a/nodepool/tests/test_builder.py b/nodepool/tests/test_builder.py index 6547d7b0d..9e4deb942 100644 --- a/nodepool/tests/test_builder.py +++ b/nodepool/tests/test_builder.py @@ -86,6 +86,7 @@ class TestNodepoolBuilderDibImage(tests.BaseTestCase): image = builder.DibImageFile('myid1234') self.assertRaises(exceptions.BuilderError, image.to_path, '/imagedir/') + class TestNodePoolBuilder(tests.DBTestCase): def test_start_stop(self): @@ -155,7 +156,8 @@ class TestNodePoolBuilder(tests.DBTestCase): image = self.zk.getMostRecentImageUpload('fake-provider', 'fake-image') self.replace_config(configfile, 'node_two_provider_remove.yaml') self.waitForImageDeletion('fake-provider2', 'fake-image') - image2 = self.zk.getMostRecentImageUpload('fake-provider', 'fake-image') + image2 = self.zk.getMostRecentImageUpload('fake-provider', + 'fake-image') self.assertEqual(image, image2) def test_image_addition(self): diff --git a/nodepool/tests/test_commands.py b/nodepool/tests/test_commands.py index fe7829bed..037916887 100644 --- a/nodepool/tests/test_commands.py +++ b/nodepool/tests/test_commands.py @@ -52,7 +52,8 @@ class TestNodepoolCMD(tests.DBTestCase): self.assertEquals(rows_with_val, count) def assert_alien_images_listed(self, configfile, image_cnt, image_id): - self.assert_listed(configfile, ['alien-image-list'], 2, image_id, image_cnt) + self.assert_listed(configfile, ['alien-image-list'], 2, image_id, + image_cnt) def assert_alien_images_empty(self, configfile): self.assert_alien_images_listed(configfile, 0, 0) @@ -245,7 +246,7 @@ class TestNodepoolCMD(tests.DBTestCase): pool = self.useNodepool(configfile, watermark_sleep=1) self._useBuilder(configfile) pool.start() - self.waitForImage( 'fake-provider', 'fake-image') + self.waitForImage('fake-provider', 'fake-image') nodes = self.waitForNodes('fake-label') self.assertEqual(len(nodes), 1) @@ -282,7 +283,7 @@ class TestNodepoolCMD(tests.DBTestCase): pool = self.useNodepool(configfile, watermark_sleep=1) self._useBuilder(configfile) pool.start() - self.waitForImage( 'fake-provider', 'fake-image') + self.waitForImage('fake-provider', 'fake-image') nodes = self.waitForNodes('fake-label') self.assertEqual(len(nodes), 1) diff --git a/nodepool/tests/test_launcher.py b/nodepool/tests/test_launcher.py index b1fef76ed..317625e45 100644 --- a/nodepool/tests/test_launcher.py +++ b/nodepool/tests/test_launcher.py @@ -256,8 +256,7 @@ class TestLauncher(tests.DBTestCase): self._test_node_assignment_at_quota(config='node_quota_cloud.yaml', max_cores=math.inf, max_instances=math.inf, - max_ram=2*8192) - + max_ram=2 * 8192) @mock.patch('nodepool.driver.fake.provider.get_fake_quota') def test_over_quota(self, mock_quota, @@ -268,9 +267,9 @@ class TestLauncher(tests.DBTestCase): ''' # Start with an instance quota of 2 - max_cores=math.inf - max_instances=2 - max_ram=math.inf + max_cores = math.inf + max_instances = 2 + max_ram = math.inf # patch the cloud with requested quota mock_quota.return_value = (max_cores, max_instances, max_ram) diff --git a/nodepool/tests/test_zk.py b/nodepool/tests/test_zk.py index 2ebb526f7..d24125303 100644 --- a/nodepool/tests/test_zk.py +++ b/nodepool/tests/test_zk.py @@ -104,7 +104,8 @@ class TestZooKeeper(tests.DBTestCase): with testtools.ExpectedException( npe.ZKLockException, "Did not get lock on .*" ): - with self.zk.imageUploadLock(image, bnum, prov, blocking=False): + with self.zk.imageUploadLock(image, bnum, prov, + blocking=False): pass def test_imageUploadLock_exception_blocking(self): @@ -254,7 +255,8 @@ class TestZooKeeper(tests.DBTestCase): self.zk.storeImageUpload(image, bnum, provider, up3) # up2 should be the most recent 'ready' upload - data = self.zk.getMostRecentBuildImageUploads(1, image, bnum, provider, zk.READY) + data = self.zk.getMostRecentBuildImageUploads( + 1, image, bnum, provider, zk.READY) self.assertNotEqual([], data) self.assertEqual(1, len(data)) self.assertEqual(data[0].id, up2_id) @@ -278,7 +280,8 @@ class TestZooKeeper(tests.DBTestCase): up3_id = self.zk.storeImageUpload(image, bnum, provider, up3) # up3 should be the most recent upload, regardless of state - data = self.zk.getMostRecentBuildImageUploads(1, image, bnum, provider, None) + data = self.zk.getMostRecentBuildImageUploads( + 1, image, bnum, provider, None) self.assertNotEqual([], data) self.assertEqual(1, len(data)) self.assertEqual(data[0].id, up3_id) @@ -860,11 +863,11 @@ class TestZKModel(tests.BaseTestCase): self.assertEqual(o.image_id, d['image_id']) self.assertEqual(o.launcher, d['launcher']) self.assertEqual(o.external_id, d['external_id']) - self.assertEqual(o.hostname , d['hostname']) - self.assertEqual(o.comment , d['comment']) + self.assertEqual(o.hostname, d['hostname']) + self.assertEqual(o.comment, d['comment']) self.assertEqual(o.hold_job, d['hold_job']) - self.assertEqual(o.host_keys , d['host_keys']) - self.assertEqual(o.connection_port , d['connection_port']) + self.assertEqual(o.host_keys, d['host_keys']) + self.assertEqual(o.connection_port, d['connection_port']) def test_custom_connection_port(self): n = zk.Node('0001') @@ -875,4 +878,5 @@ class TestZKModel(tests.BaseTestCase): self.assertEqual(n.connection_port, 22, "Default port not 22") n.connection_port = 22022 d = n.toDict() - self.assertEqual(d["connection_port"], 22022, "Custom ssh port not set") + self.assertEqual(d["connection_port"], 22022, + "Custom ssh port not set") diff --git a/nodepool/zk.py b/nodepool/zk.py index 51e54f769..ca894cd16 100755 --- a/nodepool/zk.py +++ b/nodepool/zk.py @@ -583,9 +583,9 @@ class ZooKeeper(object): self.client = None self._became_lost = False - #======================================================================== + # ======================================================================= # Private Methods - #======================================================================== + # ======================================================================= def _imagePath(self, image): return "%s/%s" % (self.IMAGE_ROOT, image) @@ -709,10 +709,9 @@ class ZooKeeper(object): else: self.log.debug("ZooKeeper connection: CONNECTED") - - #======================================================================== + # ======================================================================= # Public Methods and Properties - #======================================================================== + # ======================================================================= @property def connected(self): @@ -1065,9 +1064,11 @@ class ZooKeeper(object): except kze.NoNodeError: return None - d = ImageUpload.fromDict( - self._bytesToDict(data), build_number, provider, image, upload_number - ) + d = ImageUpload.fromDict(self._bytesToDict(data), + build_number, + provider, + image, + upload_number) d.stat = stat return d @@ -1158,7 +1159,8 @@ class ZooKeeper(object): for upload in uploads: if upload == 'lock': # skip the upload lock node continue - data = self.getImageUpload(image, build_number, provider, upload) + data = self.getImageUpload( + image, build_number, provider, upload) if not data or data.state != state: continue elif (recent_data is None or @@ -1201,7 +1203,8 @@ class ZooKeeper(object): # Generate a path for the upload. This doesn't have to exist yet # since we'll create new provider/upload ID znodes automatically. # Append trailing / so the sequence node is created as a child node. - upload_path = self._imageUploadPath(image, build_number, provider) + "/" + upload_path = self._imageUploadPath( + image, build_number, provider) + "/" if upload_number is None: path = self.client.create( @@ -1268,8 +1271,8 @@ class ZooKeeper(object): # Verify that no upload znodes exist. for prov in self.getBuildProviders(image, build_number): - if self.getImageUploadNumbers(image, build_number, prov): - return False + if self.getImageUploadNumbers(image, build_number, prov): + return False try: # NOTE: Need to do recursively to remove lock znodes @@ -1486,7 +1489,8 @@ class ZooKeeper(object): :raises: ZKLockException if the request is not currently locked. ''' if request.lock is None: - raise npe.ZKLockException("Request %s does not hold a lock" % request) + raise npe.ZKLockException( + "Request %s does not hold a lock" % request) request.lock.release() request.lock = None @@ -1630,8 +1634,7 @@ class ZooKeeper(object): ret = {} for node in self.nodeIterator(): if (node.state == READY and - not node.allocated_to and node.type in labels - ): + not node.allocated_to and node.type in labels): if node.type not in ret: ret[node.type] = [] ret[node.type].append(node)