From 3cbeb29f2dc925d633b4e3ee122b4812726d9e5d Mon Sep 17 00:00:00 2001 From: Phil Sphicas Date: Sat, 20 Mar 2021 05:52:12 +0000 Subject: [PATCH] Increase ThreadPoolExecutor max_workers The existing max_workers setting for the ThreadPoolExecutor caps the number of nodes that can be deployed concurrently to 16. This change allows more threads to be run in parallel, allowing the operator to deploy more nodes at once. Change-Id: I6d1dfa7ad8f5e03a8328311e6b59ee116a250462 --- .../drivers/kubernetes/promenade_driver/driver.py | 2 +- python/drydock_provisioner/drivers/node/maasdriver/driver.py | 2 +- .../drydock_provisioner/drivers/oob/libvirt_driver/driver.py | 2 +- .../drydock_provisioner/drivers/oob/pyghmi_driver/driver.py | 2 +- .../drydock_provisioner/drivers/oob/redfish_driver/driver.py | 2 +- .../drydock_provisioner/orchestrator/actions/orchestrator.py | 4 ++-- python/drydock_provisioner/orchestrator/orchestrator.py | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py index 583d97c3..c22e838a 100644 --- a/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py +++ b/python/drydock_provisioner/drivers/kubernetes/promenade_driver/driver.py @@ -81,7 +81,7 @@ class PromenadeDriver(KubernetesDriver): else: target_nodes = self.orchestrator.get_target_nodes(task) - with concurrent.futures.ThreadPoolExecutor() as e: + with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e: subtask_futures = dict() for n in target_nodes: prom_client = PromenadeClient() diff --git a/python/drydock_provisioner/drivers/node/maasdriver/driver.py b/python/drydock_provisioner/drivers/node/maasdriver/driver.py index 6ea8a08f..be604697 100644 --- a/python/drydock_provisioner/drivers/node/maasdriver/driver.py +++ b/python/drydock_provisioner/drivers/node/maasdriver/driver.py @@ -149,7 +149,7 @@ class MaasNodeDriver(NodeDriver): else: target_nodes = self.orchestrator.get_target_nodes(task) - with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e: + with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e: subtask_futures = dict() maas_client = MaasRequestFactory( config.config_mgr.conf.maasdriver.maas_api_url, diff --git a/python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py b/python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py index 1d34bec9..d3762142 100644 --- a/python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py +++ b/python/drydock_provisioner/drivers/oob/libvirt_driver/driver.py @@ -90,7 +90,7 @@ class LibvirtDriver(oob_driver.OobDriver): target_nodes = self.orchestrator.get_target_nodes(task) - with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e: + with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e: subtask_futures = dict() for n in target_nodes: sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n]) diff --git a/python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py b/python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py index 5086cc5c..08951c53 100644 --- a/python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py +++ b/python/drydock_provisioner/drivers/oob/pyghmi_driver/driver.py @@ -94,7 +94,7 @@ class PyghmiDriver(oob_driver.OobDriver): target_nodes = self.orchestrator.get_target_nodes(task) - with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e: + with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e: subtask_futures = dict() for n in target_nodes: sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n]) diff --git a/python/drydock_provisioner/drivers/oob/redfish_driver/driver.py b/python/drydock_provisioner/drivers/oob/redfish_driver/driver.py index fab298da..27905e8c 100644 --- a/python/drydock_provisioner/drivers/oob/redfish_driver/driver.py +++ b/python/drydock_provisioner/drivers/oob/redfish_driver/driver.py @@ -106,7 +106,7 @@ class RedfishDriver(oob_driver.OobDriver): target_nodes = self.orchestrator.get_target_nodes(task) - with concurrent.futures.ThreadPoolExecutor(max_workers=16) as e: + with concurrent.futures.ThreadPoolExecutor(max_workers=64) as e: subtask_futures = dict() for n in target_nodes: sub_nf = self.orchestrator.create_nodefilter_from_nodelist([n]) diff --git a/python/drydock_provisioner/orchestrator/actions/orchestrator.py b/python/drydock_provisioner/orchestrator/actions/orchestrator.py index 21f11a33..1632e80b 100644 --- a/python/drydock_provisioner/orchestrator/actions/orchestrator.py +++ b/python/drydock_provisioner/orchestrator/actions/orchestrator.py @@ -52,7 +52,7 @@ class BaseAction(object): """ task_futures = dict() - with concurrent.futures.ThreadPoolExecutor(max_workers=16) as te: + with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te: for t in subtask_id_list: task_futures[t.bytes] = te.submit(fn, t, *args, **kwargs) @@ -76,7 +76,7 @@ class BaseAction(object): self.task.get_id())) split_tasks = dict() - with concurrent.futures.ThreadPoolExecutor() as te: + with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te: for n in target_nodes: split_task = self.orchestrator.create_task( design_ref=self.task.design_ref, diff --git a/python/drydock_provisioner/orchestrator/orchestrator.py b/python/drydock_provisioner/orchestrator/orchestrator.py index 0e8b7741..fcdeeff6 100644 --- a/python/drydock_provisioner/orchestrator/orchestrator.py +++ b/python/drydock_provisioner/orchestrator/orchestrator.py @@ -129,7 +129,7 @@ class Orchestrator(object): # Loop trying to claim status as the active orchestrator - tp = concurrent.futures.ThreadPoolExecutor(max_workers=16) + tp = concurrent.futures.ThreadPoolExecutor(max_workers=64) while True: if self.stop_flag: