Update some variable names

Now that the component we registered is a "pool" change the call
sites to use "launcher_pools" instead of "launchers".  This may
reduce some ambiguity.

(s/launcher/pool/ might still be ambiguous since it may not be clear
whethere we're talking about our own pools or other pools; thus the
choice of "launcher_pool" for the variable name.)

Also, remove a redundant test assertion.

Change-Id: I865883cdb115bf72a3bd034d9290f60666d64b66
changes/20/843020/2
James E. Blair 9 months ago
parent ea35fd5152
commit 1323d0b556

@ -641,9 +641,9 @@ class NodeRequestHandler(NodeRequestHandlerNotifications,
# want to make sure we don't continuously grow this array.
if self.launcher_id not in self.request.declined_by:
self.request.declined_by.append(self.launcher_id)
launchers = set([x.id for x in self.zk.getRegisteredPools()])
if launchers.issubset(set(self.request.declined_by)):
# All launchers have declined it
launcher_pools = set([x.id for x in self.zk.getRegisteredPools()])
if launcher_pools.issubset(set(self.request.declined_by)):
# All launcher_pools have declined it
self.log.debug("Failing declined node request")
self.request.state = zk.FAILED
else:

@ -121,7 +121,7 @@ class PoolWorker(threading.Thread, stats.StatsReporter):
# become out of date as the loop progresses, but it should be
# good enough to determine whether we should process requests
# which express a preference for a specific provider.
launchers = self.zk.getRegisteredPools()
launcher_pools = self.zk.getRegisteredPools()
pm = self.getProviderManager()
has_quota_support = isinstance(pm, QuotaSupport)
@ -180,8 +180,8 @@ class PoolWorker(threading.Thread, stats.StatsReporter):
log = get_annotated_logger(self.log, event_id=req.event_id,
node_request_id=req.id)
# Get the candidate launchers for these nodes
candidate_launchers = set(
x for x in launchers
candidate_launcher_pools = set(
x for x in launcher_pools
if (set(x.supported_labels).issuperset(set(req.node_types)) and
x.id not in req.declined_by)
)
@ -189,26 +189,26 @@ class PoolWorker(threading.Thread, stats.StatsReporter):
# which is online
if req.provider and req.provider != self.provider_name:
# The request is asking for a specific provider
launcher_ids_for_provider = set(
x.id for x in candidate_launchers
launcher_pool_ids_for_provider = set(
x.id for x in candidate_launcher_pools
if x.provider_name == req.provider
)
if launcher_ids_for_provider:
if launcher_pool_ids_for_provider:
# There is a launcher online which can satisfy the
# request that has not yet declined the request,
# so yield to it.
log.debug("Yielding request to provider %s %s",
req.provider, launcher_ids_for_provider)
req.provider, launcher_pool_ids_for_provider)
continue
priority = self.getPriority()
launcher_ids_with_higher_priority = set(
x.id for x in candidate_launchers
launcher_pool_ids_with_higher_priority = set(
x.id for x in candidate_launcher_pools
if x.priority < priority and not x.paused
)
if launcher_ids_with_higher_priority:
if launcher_pool_ids_with_higher_priority:
log.debug("Yielding request to higher priority providers %s",
launcher_ids_with_higher_priority)
launcher_pool_ids_with_higher_priority)
continue
if has_quota_support and not all(label_quota.get(l, math.inf) > 0

@ -98,13 +98,13 @@ class StatsReporter(object):
states = {}
launchers = zk_conn.getRegisteredPools()
launcher_pools = zk_conn.getRegisteredPools()
labels = set()
for launcher in launchers:
labels.update(launcher.supported_labels)
for launcher_pool in launcher_pools:
labels.update(launcher_pool.supported_labels)
providers = set()
for launcher in launchers:
providers.add(launcher.provider_name)
for launcher_pool in launcher_pools:
providers.add(launcher_pool.provider_name)
# Initialize things we know about to zero
for state in zk.Node.VALID_STATES:

@ -267,9 +267,9 @@ def label_list(zk):
# NOTE(ianw): maybe add to each entry a list of which
# launchers support the label?
labels = set()
launchers = zk.getRegisteredPools()
for launcher in launchers:
labels.update(set(launcher.supported_labels))
launcher_pools = zk.getRegisteredPools()
for launcher_pool in launcher_pools:
labels.update(set(launcher_pool.supported_labels))
for label in labels:
objs.append({'label': label})

@ -1669,7 +1669,6 @@ class TestLauncher(tests.DBTestCase):
self.assertEqual('secret', fake_image.env_vars['REG_PASSWORD'])
zk_servers = pool.config.zookeeper_servers
self.assertTrue(len(zk_servers) > 0)
expected = (f'{self.zookeeper_host}:{self.zookeeper_port}'
f'{self.zookeeper_chroot}')
self.assertEqual(expected, zk_servers)
@ -1924,19 +1923,20 @@ class TestLauncher(tests.DBTestCase):
pool.start()
self.waitForNodes('fake-label')
launchers = self.zk.getRegisteredPools()
self.assertEqual(1, len(launchers))
launcher_pools = self.zk.getRegisteredPools()
self.assertEqual(1, len(launcher_pools))
# the fake-label-unused label should not appear
self.assertEqual({'fake-label'}, set(launchers[0].supported_labels))
self.assertEqual({'fake-label'},
set(launcher_pools[0].supported_labels))
self.replace_config(configfile, 'launcher_reg2.yaml')
# we should get 1 additional label now
while (set(launchers[0].supported_labels) !=
while (set(launcher_pools[0].supported_labels) !=
{'fake-label', 'fake-label2'}):
time.sleep(1)
launchers = self.zk.getRegisteredPools()
launcher_pools = self.zk.getRegisteredPools()
@mock.patch('nodepool.driver.openstack.handler.'
'OpenStackNodeLauncher._launchNode')

@ -40,13 +40,13 @@ class TestComponentRegistry(tests.DBTestCase):
})
launcher.register()
launchers = self.zk.getRegisteredPools()
self.assertEqual(1, len(launchers))
self.assertEqual(launcher.id, list(launchers)[0].id)
launcher_pools = self.zk.getRegisteredPools()
self.assertEqual(1, len(launcher_pools))
self.assertEqual(launcher.id, list(launcher_pools)[0].id)
launcher.unregister()
launchers = self.zk.getRegisteredPools()
self.assertEqual(0, len(launchers))
launcher_pools = self.zk.getRegisteredPools()
self.assertEqual(0, len(launcher_pools))
class TestZooKeeper(tests.DBTestCase):

Loading…
Cancel
Save