Fix race between container create jobs during upload
During a segmented object upload, attempts are made to create the container for the manifest and the segment container. These jobs are currently placed on separate threads and can occur out of order which causes the unit test failure seen in the bug report. The container jobs should be ordered because the segment container job attempts to HEAD the manifest container to determine correct storage policy. This patch enforces ordering of the two jobs and modifies the unit test to assert that both container PUTs occur in the expected order. Change-Id: If90bec126867a4be2df34067ccefee660202f59f Closes-Bug: #1376878
This commit is contained in:
parent
bbe3378ebb
commit
589d34ecda
@ -1197,6 +1197,13 @@ class SwiftService(object):
|
||||
)
|
||||
]
|
||||
|
||||
# wait for first container job to complete before possibly attempting
|
||||
# segment container job because segment container job may attempt
|
||||
# to HEAD the first container
|
||||
for r in interruptable_as_completed(create_containers):
|
||||
res = r.result()
|
||||
yield res
|
||||
|
||||
if options['segment_size'] is not None:
|
||||
seg_container = container + '_segments'
|
||||
if options['segment_container']:
|
||||
@ -1206,23 +1213,23 @@ class SwiftService(object):
|
||||
# rather than just letting swift pick the default storage
|
||||
# policy, we'll try to create the segments container with the
|
||||
# same as the upload container
|
||||
create_containers.append(
|
||||
self.thread_manager.object_uu_pool.submit(
|
||||
create_containers = [
|
||||
self.thread_manager.container_pool.submit(
|
||||
self._create_container_job, seg_container,
|
||||
policy_source=container
|
||||
)
|
||||
)
|
||||
]
|
||||
else:
|
||||
create_containers.append(
|
||||
self.thread_manager.object_uu_pool.submit(
|
||||
create_containers = [
|
||||
self.thread_manager.container_pool.submit(
|
||||
self._create_container_job, seg_container,
|
||||
headers=policy_header
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
for r in interruptable_as_completed(create_containers):
|
||||
res = r.result()
|
||||
yield res
|
||||
for r in interruptable_as_completed(create_containers):
|
||||
res = r.result()
|
||||
yield res
|
||||
|
||||
# We maintain a results queue here and a separate thread to monitor
|
||||
# the futures because we want to get results back from potential
|
||||
|
@ -291,7 +291,7 @@ class TestShell(unittest.TestCase):
|
||||
argv = ["", "upload", "container", self.tmpfile,
|
||||
"-H", "X-Storage-Policy:one"]
|
||||
swiftclient.shell.main(argv)
|
||||
connection.return_value.put_container.assert_called_with(
|
||||
connection.return_value.put_container.assert_called_once_with(
|
||||
'container',
|
||||
{'X-Storage-Policy': mock.ANY},
|
||||
response_dict={})
|
||||
@ -327,10 +327,13 @@ class TestShell(unittest.TestCase):
|
||||
with open(self.tmpfile, "wb") as fh:
|
||||
fh.write(b'12345678901234567890')
|
||||
swiftclient.shell.main(argv)
|
||||
connection.return_value.put_container.assert_called_with(
|
||||
'container_segments',
|
||||
{'X-Storage-Policy': mock.ANY},
|
||||
response_dict={})
|
||||
expected_calls = [mock.call('container',
|
||||
{'X-Storage-Policy': mock.ANY},
|
||||
response_dict={}),
|
||||
mock.call('container_segments',
|
||||
{'X-Storage-Policy': mock.ANY},
|
||||
response_dict={})]
|
||||
connection.return_value.put_container.has_calls(expected_calls)
|
||||
connection.return_value.put_object.assert_called_with(
|
||||
'container',
|
||||
self.tmpfile.lstrip('/'),
|
||||
|
Loading…
x
Reference in New Issue
Block a user