Replace Eventlet with concurrent.futures
- Removed dependencies on Eventlet for asynchronous execution - Implemented thread pool executor using 'concurrent.futures' - Updated code to submit tasks and manage futures with futures.ThreadPoolExecutor - Ensured proper waiting for task completion with futures.wait() Change-Id: Iec6893666c25e03c6d1a6246d41759dc4bff3b24
This commit is contained in:
@@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
"""Storage backend for S3 or Storage Servers that follow the S3 Protocol"""
|
"""Storage backend for S3 or Storage Servers that follow the S3 Protocol"""
|
||||||
|
|
||||||
|
from concurrent import futures
|
||||||
import io
|
import io
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
@@ -32,7 +33,6 @@ except ImportError:
|
|||||||
boto_exceptions = None
|
boto_exceptions = None
|
||||||
boto_utils = None
|
boto_utils = None
|
||||||
|
|
||||||
import eventlet
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_utils import encodeutils
|
from oslo_utils import encodeutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
@@ -726,7 +726,11 @@ class Store(glance_store.driver.Store):
|
|||||||
os_hash_value = utils.get_hasher(hashing_algo, False)
|
os_hash_value = utils.get_hasher(hashing_algo, False)
|
||||||
checksum = utils.get_hasher('md5', False)
|
checksum = utils.get_hasher('md5', False)
|
||||||
pool_size = self.s3_store_thread_pools
|
pool_size = self.s3_store_thread_pools
|
||||||
pool = eventlet.greenpool.GreenPool(size=pool_size)
|
# Replace eventlet.GreenPool with ThreadPoolExecutor
|
||||||
|
with futures.ThreadPoolExecutor(
|
||||||
|
max_workers=pool_size) as executor:
|
||||||
|
# Create a list to store the futures
|
||||||
|
futures_list = []
|
||||||
mpu = s3_client.create_multipart_upload(Bucket=bucket, Key=key)
|
mpu = s3_client.create_multipart_upload(Bucket=bucket, Key=key)
|
||||||
upload_id = mpu['UploadId']
|
upload_id = mpu['UploadId']
|
||||||
LOG.debug("Multipart initiate key=%(key)s, UploadId=%(UploadId)s",
|
LOG.debug("Multipart initiate key=%(key)s, UploadId=%(UploadId)s",
|
||||||
@@ -756,8 +760,11 @@ class Store(glance_store.driver.Store):
|
|||||||
verifier.update(write_chunk)
|
verifier.update(write_chunk)
|
||||||
fp = io.BytesIO(write_chunk)
|
fp = io.BytesIO(write_chunk)
|
||||||
fp.seek(0)
|
fp.seek(0)
|
||||||
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
|
part = UploadPart(
|
||||||
pool.spawn_n(run_upload, s3_client, bucket, key, part)
|
mpu, fp, cstart + 1, len(write_chunk))
|
||||||
|
# Spawn thread to upload part
|
||||||
|
futures_list.append(executor.submit(
|
||||||
|
run_upload, s3_client, bucket, key, part))
|
||||||
plist.append(part)
|
plist.append(part)
|
||||||
cstart += 1
|
cstart += 1
|
||||||
buffered_chunk = remained_data
|
buffered_chunk = remained_data
|
||||||
@@ -771,26 +778,23 @@ class Store(glance_store.driver.Store):
|
|||||||
verifier.update(write_chunk)
|
verifier.update(write_chunk)
|
||||||
fp = io.BytesIO(write_chunk)
|
fp = io.BytesIO(write_chunk)
|
||||||
fp.seek(0)
|
fp.seek(0)
|
||||||
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
|
part = UploadPart(
|
||||||
pool.spawn_n(run_upload, s3_client, bucket, key, part)
|
mpu, fp, cstart + 1, len(write_chunk))
|
||||||
|
futures_list.append(executor.submit(
|
||||||
|
run_upload, s3_client, bucket, key, part))
|
||||||
plist.append(part)
|
plist.append(part)
|
||||||
break
|
break
|
||||||
|
|
||||||
pedict = {}
|
# Wait for all uploads to finish
|
||||||
total_size = 0
|
futures.wait(futures_list)
|
||||||
pool.waitall()
|
|
||||||
|
|
||||||
for part in plist:
|
# Check success status
|
||||||
pedict.update(part.etag)
|
success = all(p.success for p in plist)
|
||||||
total_size += part.size
|
total_size = sum(p.size for p in plist)
|
||||||
|
|
||||||
success = True
|
|
||||||
for part in plist:
|
|
||||||
if not part.success:
|
|
||||||
success = False
|
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
# Complete
|
# Complete
|
||||||
|
pedict = {p.partnum: p.etag[p.partnum] for p in plist}
|
||||||
mpu_list = self._get_mpu_list(pedict)
|
mpu_list = self._get_mpu_list(pedict)
|
||||||
s3_client.complete_multipart_upload(Bucket=bucket,
|
s3_client.complete_multipart_upload(Bucket=bucket,
|
||||||
Key=key,
|
Key=key,
|
||||||
|
|||||||
Reference in New Issue
Block a user