Replace Eventlet with concurrent.futures

- Removed dependencies on Eventlet for asynchronous execution
- Implemented thread pool executor using 'concurrent.futures'
- Updated code to submit tasks and manage futures with
  futures.ThreadPoolExecutor
- Ensured proper waiting for task completion with futures.wait()

Change-Id: Iec6893666c25e03c6d1a6246d41759dc4bff3b24
This commit is contained in:
Abhishek Kekane
2025-05-13 13:28:31 +00:00
parent 5aa31d5ece
commit 8ac98ddf61

View File

@@ -15,6 +15,7 @@
"""Storage backend for S3 or Storage Servers that follow the S3 Protocol""" """Storage backend for S3 or Storage Servers that follow the S3 Protocol"""
from concurrent import futures
import io import io
import logging import logging
import math import math
@@ -32,7 +33,6 @@ except ImportError:
boto_exceptions = None boto_exceptions = None
boto_utils = None boto_utils = None
import eventlet
from oslo_config import cfg from oslo_config import cfg
from oslo_utils import encodeutils from oslo_utils import encodeutils
from oslo_utils import units from oslo_utils import units
@@ -726,71 +726,75 @@ class Store(glance_store.driver.Store):
os_hash_value = utils.get_hasher(hashing_algo, False) os_hash_value = utils.get_hasher(hashing_algo, False)
checksum = utils.get_hasher('md5', False) checksum = utils.get_hasher('md5', False)
pool_size = self.s3_store_thread_pools pool_size = self.s3_store_thread_pools
pool = eventlet.greenpool.GreenPool(size=pool_size) # Replace eventlet.GreenPool with ThreadPoolExecutor
mpu = s3_client.create_multipart_upload(Bucket=bucket, Key=key) with futures.ThreadPoolExecutor(
upload_id = mpu['UploadId'] max_workers=pool_size) as executor:
LOG.debug("Multipart initiate key=%(key)s, UploadId=%(UploadId)s", # Create a list to store the futures
{'key': key, 'UploadId': upload_id}) futures_list = []
cstart = 0 mpu = s3_client.create_multipart_upload(Bucket=bucket, Key=key)
plist = [] upload_id = mpu['UploadId']
LOG.debug("Multipart initiate key=%(key)s, UploadId=%(UploadId)s",
{'key': key, 'UploadId': upload_id})
cstart = 0
plist = []
chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM)) chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
write_chunk_size = max(self.s3_store_large_object_chunk_size, write_chunk_size = max(self.s3_store_large_object_chunk_size,
chunk_size) chunk_size)
it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE) it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
buffered_chunk = b'' buffered_chunk = b''
while True: while True:
try: try:
buffered_clen = len(buffered_chunk) buffered_clen = len(buffered_chunk)
if buffered_clen < write_chunk_size: if buffered_clen < write_chunk_size:
# keep reading data # keep reading data
read_chunk = next(it) read_chunk = next(it)
buffered_chunk += read_chunk buffered_chunk += read_chunk
continue continue
else: else:
write_chunk = buffered_chunk[:write_chunk_size] write_chunk = buffered_chunk[:write_chunk_size]
remained_data = buffered_chunk[write_chunk_size:] remained_data = buffered_chunk[write_chunk_size:]
os_hash_value.update(write_chunk) os_hash_value.update(write_chunk)
checksum.update(write_chunk) checksum.update(write_chunk)
if verifier: if verifier:
verifier.update(write_chunk) verifier.update(write_chunk)
fp = io.BytesIO(write_chunk) fp = io.BytesIO(write_chunk)
fp.seek(0) fp.seek(0)
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk)) part = UploadPart(
pool.spawn_n(run_upload, s3_client, bucket, key, part) mpu, fp, cstart + 1, len(write_chunk))
plist.append(part) # Spawn thread to upload part
cstart += 1 futures_list.append(executor.submit(
buffered_chunk = remained_data run_upload, s3_client, bucket, key, part))
except StopIteration: plist.append(part)
if len(buffered_chunk) > 0: cstart += 1
# Write the last chunk data buffered_chunk = remained_data
write_chunk = buffered_chunk except StopIteration:
os_hash_value.update(write_chunk) if len(buffered_chunk) > 0:
checksum.update(write_chunk) # Write the last chunk data
if verifier: write_chunk = buffered_chunk
verifier.update(write_chunk) os_hash_value.update(write_chunk)
fp = io.BytesIO(write_chunk) checksum.update(write_chunk)
fp.seek(0) if verifier:
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk)) verifier.update(write_chunk)
pool.spawn_n(run_upload, s3_client, bucket, key, part) fp = io.BytesIO(write_chunk)
plist.append(part) fp.seek(0)
break part = UploadPart(
mpu, fp, cstart + 1, len(write_chunk))
futures_list.append(executor.submit(
run_upload, s3_client, bucket, key, part))
plist.append(part)
break
pedict = {} # Wait for all uploads to finish
total_size = 0 futures.wait(futures_list)
pool.waitall()
for part in plist: # Check success status
pedict.update(part.etag) success = all(p.success for p in plist)
total_size += part.size total_size = sum(p.size for p in plist)
success = True
for part in plist:
if not part.success:
success = False
if success: if success:
# Complete # Complete
pedict = {p.partnum: p.etag[p.partnum] for p in plist}
mpu_list = self._get_mpu_list(pedict) mpu_list = self._get_mpu_list(pedict)
s3_client.complete_multipart_upload(Bucket=bucket, s3_client.complete_multipart_upload(Bucket=bucket,
Key=key, Key=key,