Increase ThreadPoolExecutor max_workers

The existing max_workers setting for the ThreadPoolExecutor caps the
number of nodes that can be deployed concurrently to 16. This change
allows more threads to be run in parallel, allowing the operator to
deploy more nodes at once.

Change-Id: I6d1dfa7ad8f5e03a8328311e6b59ee116a250462
This commit is contained in:
Phil Sphicas 2021-03-20 05:52:12 +00:00
parent 292e94ee2c
commit 3cbeb29f2d
7 changed files with 8 additions and 8 deletions

View File

@ -81,7 +81,7 @@ class PromenadeDriver(KubernetesDriver):
else:
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor() as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
for n in target_nodes:
prom_client = PromenadeClient()

View File

@ -149,7 +149,7 @@ class MaasNodeDriver(NodeDriver):
else:
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
maas_client = MaasRequestFactory(
config.config_mgr.conf.maasdriver.maas_api_url,

View File

@ -90,7 +90,7 @@ class LibvirtDriver(oob_driver.OobDriver):
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
for n in target_nodes:
sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n])

View File

@ -94,7 +94,7 @@ class PyghmiDriver(oob_driver.OobDriver):
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
for n in target_nodes:
sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n])

View File

@ -106,7 +106,7 @@ class RedfishDriver(oob_driver.OobDriver):
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
for n in target_nodes:
sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n])

View File

@ -52,7 +52,7 @@ class BaseAction(object):
"""
task_futures = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as te:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te:
for t in subtask_id_list:
task_futures[t.bytes] = te.submit(fn, t, *args, **kwargs)
@ -76,7 +76,7 @@ class BaseAction(object):
self.task.get_id()))
split_tasks = dict()
with concurrent.futures.ThreadPoolExecutor() as te:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te:
for n in target_nodes:
split_task = self.orchestrator.create_task(
design_ref=self.task.design_ref,

View File

@ -129,7 +129,7 @@ class Orchestrator(object):
# Loop trying to claim status as the active orchestrator
tp = concurrent.futures.ThreadPoolExecutor(max_workers=16)
tp = concurrent.futures.ThreadPoolExecutor(max_workers=64)
while True:
if self.stop_flag: