Switch to concurrent.futures for container prepare/upload
Using process based concurrency doesn't work with mistral's threading model. Switching to concurrent.futures allows the threading executor to be used instead. Change-Id: I2c7a1a7cb8d8e195e041b5b185c1ceba3a6d0e86
This commit is contained in:
parent
ca06bedb41
commit
f76ae7f854
@ -27,3 +27,4 @@ python-zaqarclient>=1.0.0 # Apache-2.0
|
||||
python-keystoneclient>=3.8.0 # Apache-2.0
|
||||
keystoneauth1>=3.4.0 # Apache-2.0
|
||||
tenacity>=4.4.0 # Apache-2.0
|
||||
futures>=3.0.0;python_version=='2.7' or python_version=='2.6' # BSD
|
||||
|
@ -81,6 +81,7 @@ mistral.actions =
|
||||
tripleo.config.download_config = tripleo_common.actions.config:DownloadConfigAction
|
||||
tripleo.config.get_overcloud_config = tripleo_common.actions.config:GetOvercloudConfig
|
||||
tripleo.container_images.prepare = tripleo_common.actions.container_images:PrepareContainerImageEnv
|
||||
tripleo.container_images.prepare_upload = tripleo_common.actions.container_images:PrepareUploadContainerImages
|
||||
tripleo.deployment.config = tripleo_common.actions.deployment:OrchestrationDeployAction
|
||||
tripleo.deployment.deploy = tripleo_common.actions.deployment:DeployStackAction
|
||||
tripleo.deployment.overcloudrc = tripleo_common.actions.deployment:OvercloudRcAction
|
||||
|
@ -15,9 +15,9 @@
|
||||
|
||||
|
||||
import abc
|
||||
from concurrent import futures
|
||||
import json
|
||||
import logging
|
||||
import multiprocessing
|
||||
import netifaces
|
||||
import os
|
||||
import requests
|
||||
@ -323,7 +323,7 @@ class DockerImageUploader(ImageUploader):
|
||||
for image in images:
|
||||
discover_args.append((image, tag_from_label,
|
||||
self.insecure_registries))
|
||||
p = multiprocessing.Pool(16)
|
||||
p = futures.ThreadPoolExecutor(max_workers=16)
|
||||
|
||||
versioned_images = {}
|
||||
for image, versioned_image in p.map(discover_tag_from_inspect,
|
||||
@ -376,7 +376,7 @@ class DockerImageUploader(ImageUploader):
|
||||
|
||||
# workers will be half the CPU count, to a minimum of 2
|
||||
workers = max(2, processutils.get_worker_count() // 2)
|
||||
p = multiprocessing.Pool(workers)
|
||||
p = futures.ThreadPoolExecutor(max_workers=workers)
|
||||
|
||||
for result in p.map(docker_upload, self.upload_tasks):
|
||||
local_images.extend(result)
|
||||
|
@ -396,7 +396,7 @@ class TestDockerImageUploader(base.TestCase):
|
||||
('docker.io/t/foo', 'rdo_version', sr)
|
||||
)
|
||||
|
||||
@mock.patch('multiprocessing.Pool')
|
||||
@mock.patch('concurrent.futures.ThreadPoolExecutor')
|
||||
def test_discover_image_tags(self, mock_pool):
|
||||
mock_pool.return_value.map.return_value = (
|
||||
('docker.io/t/foo', 'a'),
|
||||
|
Loading…
x
Reference in New Issue
Block a user