Browse Source

Increase ThreadPoolExecutor max_workers

The existing max_workers setting for the ThreadPoolExecutor caps the
number of nodes that can be deployed concurrently to 16. This change
allows more threads to be run in parallel, allowing the operator to
deploy more nodes at once.

Change-Id: I6d1dfa7ad8f5e03a8328311e6b59ee116a250462
changes/32/781932/8
Phil Sphicas 6 months ago
parent
commit
3cbeb29f2d
  1. 2
      python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py
  2. 2
      python/drydock_provisioner/drivers/node/maasdriver/driver.py
  3. 2
      python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py
  4. 2
      python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py
  5. 2
      python/drydock_provisioner/drivers/oob/redfish_driver/driver.py
  6. 4
      python/drydock_provisioner/orchestrator/actions/orchestrator.py
  7. 2
      python/drydock_provisioner/orchestrator/orchestrator.py

2
python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py

@ -81,7 +81,7 @@ class PromenadeDriver(KubernetesDriver):
else:
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor() as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
for n in target_nodes:
prom_client = PromenadeClient()

2
python/drydock_provisioner/drivers/node/maasdriver/driver.py

@ -149,7 +149,7 @@ class MaasNodeDriver(NodeDriver):
else:
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
maas_client = MaasRequestFactory(
config.config_mgr.conf.maasdriver.maas_api_url,

2
python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py

@ -90,7 +90,7 @@ class LibvirtDriver(oob_driver.OobDriver):
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
for n in target_nodes:
sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n])

2
python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py

@ -94,7 +94,7 @@ class PyghmiDriver(oob_driver.OobDriver):
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
for n in target_nodes:
sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n])

2
python/drydock_provisioner/drivers/oob/redfish_driver/driver.py

@ -106,7 +106,7 @@ class RedfishDriver(oob_driver.OobDriver):
target_nodes = self.orchestrator.get_target_nodes(task)
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e:
subtask_futures = dict()
for n in target_nodes:
sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n])

4
python/drydock_provisioner/orchestrator/actions/orchestrator.py

@ -52,7 +52,7 @@ class BaseAction(object):
"""
task_futures = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=16) as te:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te:
for t in subtask_id_list:
task_futures[t.bytes] = te.submit(fn, t, *args, **kwargs)
@ -76,7 +76,7 @@ class BaseAction(object):
self.task.get_id()))
split_tasks = dict()
with concurrent.futures.ThreadPoolExecutor() as te:
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te:
for n in target_nodes:
split_task = self.orchestrator.create_task(
design_ref=self.task.design_ref,

2
python/drydock_provisioner/orchestrator/orchestrator.py

@ -129,7 +129,7 @@ class Orchestrator(object):
# Loop trying to claim status as the active orchestrator
tp = concurrent.futures.ThreadPoolExecutor(max_workers=16)
tp = concurrent.futures.ThreadPoolExecutor(max_workers=64)
while True:
if self.stop_flag:

Loading…
Cancel
Save