Revert "Update volume-status waiter for new cinder attach"
This reverts commit 030dd17f5e0862384dec4c0b201d66f97074c5fb. This masks a behavior change in the compute API when attaching a volume to a shelved offloaded instance, as indicated by the need to change Tempest to wait for a volume to be 'reserved' vs 'in-use' in this scenario with the new Cinder v3 attach flow. This is a problem that needs to be handled in Nova to keep API compatibility with the old volume attachment flow. Nova can either do that under the covers, or add a new microversion for callers to opt into this new behavior, but masking over it in Tempest is wrong. The related nova change: Ifc01dbf98545104c998ab96f65ff8623a6db0f28 Change-Id: If8e1f42d853f366a399c9e454a80ba3bf8cd136e
This commit is contained in:
parent
420f97383a
commit
b36186ba7c
@ -439,7 +439,7 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
|
||||
# is already detached.
|
||||
pass
|
||||
|
||||
def attach_volume(self, server, volume, device=None, check_reserved=False):
|
||||
def attach_volume(self, server, volume, device=None):
|
||||
"""Attaches volume to server and waits for 'in-use' volume status.
|
||||
|
||||
The volume will be detached when the test tears down.
|
||||
@ -448,15 +448,10 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
|
||||
:param volume: The volume to attach.
|
||||
:param device: Optional mountpoint for the attached volume. Note that
|
||||
this is not guaranteed for all hypervisors and is not recommended.
|
||||
:param check_reserved: Consider a status of reserved as valid for
|
||||
completion. This is to handle new Cinder attach where we more
|
||||
accurately use 'reserved' for things like attaching to a shelved
|
||||
server.
|
||||
"""
|
||||
attach_kwargs = dict(volumeId=volume['id'])
|
||||
if device:
|
||||
attach_kwargs['device'] = device
|
||||
|
||||
attachment = self.servers_client.attach_volume(
|
||||
server['id'], **attach_kwargs)['volumeAttachment']
|
||||
# On teardown detach the volume and wait for it to be available. This
|
||||
@ -467,11 +462,8 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest,
|
||||
# Ignore 404s on detach in case the server is deleted or the volume
|
||||
# is already detached.
|
||||
self.addCleanup(self._detach_volume, server, volume)
|
||||
statuses = ['in-use']
|
||||
if check_reserved:
|
||||
statuses.append('reserved')
|
||||
waiters.wait_for_volume_resource_status(self.volumes_client,
|
||||
volume['id'], statuses)
|
||||
volume['id'], 'in-use')
|
||||
return attachment
|
||||
|
||||
|
||||
|
@ -223,8 +223,7 @@ class AttachVolumeShelveTestJSON(AttachVolumeTestJSON):
|
||||
num_vol = self._count_volumes(server, validation_resources)
|
||||
self._shelve_server(server, validation_resources)
|
||||
attachment = self.attach_volume(server, volume,
|
||||
device=('/dev/%s' % self.device),
|
||||
check_reserved=True)
|
||||
device=('/dev/%s' % self.device))
|
||||
|
||||
# Unshelve the instance and check that attached volume exists
|
||||
self._unshelve_server_and_check_volumes(
|
||||
@ -250,8 +249,7 @@ class AttachVolumeShelveTestJSON(AttachVolumeTestJSON):
|
||||
self._shelve_server(server, validation_resources)
|
||||
|
||||
# Attach and then detach the volume
|
||||
self.attach_volume(server, volume, device=('/dev/%s' % self.device),
|
||||
check_reserved=True)
|
||||
self.attach_volume(server, volume, device=('/dev/%s' % self.device))
|
||||
self.servers_client.detach_volume(server['id'], volume['id'])
|
||||
waiters.wait_for_volume_resource_status(self.volumes_client,
|
||||
volume['id'], 'available')
|
||||
|
@ -179,15 +179,13 @@ def wait_for_image_status(client, image_id, status):
|
||||
raise lib_exc.TimeoutException(message)
|
||||
|
||||
|
||||
def wait_for_volume_resource_status(client, resource_id, statuses):
|
||||
"""Waits for a volume resource to reach any of the specified statuses.
|
||||
def wait_for_volume_resource_status(client, resource_id, status):
|
||||
"""Waits for a volume resource to reach a given status.
|
||||
|
||||
This function is a common function for volume, snapshot and backup
|
||||
resources. The function extracts the name of the desired resource from
|
||||
the client class name of the resource.
|
||||
"""
|
||||
if not isinstance(statuses, list):
|
||||
statuses = [statuses]
|
||||
resource_name = re.findall(
|
||||
r'(volume|group-snapshot|snapshot|backup|group)',
|
||||
client.resource_type)[-1].replace('-', '_')
|
||||
@ -195,11 +193,11 @@ def wait_for_volume_resource_status(client, resource_id, statuses):
|
||||
resource_status = show_resource(resource_id)[resource_name]['status']
|
||||
start = int(time.time())
|
||||
|
||||
while resource_status not in statuses:
|
||||
while resource_status != status:
|
||||
time.sleep(client.build_interval)
|
||||
resource_status = show_resource(resource_id)[
|
||||
'{}'.format(resource_name)]['status']
|
||||
if resource_status == 'error' and resource_status not in statuses:
|
||||
if resource_status == 'error' and resource_status != status:
|
||||
raise exceptions.VolumeResourceBuildErrorException(
|
||||
resource_name=resource_name, resource_id=resource_id)
|
||||
if resource_name == 'volume' and resource_status == 'error_restoring':
|
||||
@ -208,11 +206,11 @@ def wait_for_volume_resource_status(client, resource_id, statuses):
|
||||
if int(time.time()) - start >= client.build_timeout:
|
||||
message = ('%s %s failed to reach %s status (current %s) '
|
||||
'within the required time (%s s).' %
|
||||
(resource_name, resource_id, statuses, resource_status,
|
||||
(resource_name, resource_id, status, resource_status,
|
||||
client.build_timeout))
|
||||
raise lib_exc.TimeoutException(message)
|
||||
LOG.info('%s %s reached %s after waiting for %f seconds',
|
||||
resource_name, resource_id, statuses, time.time() - start)
|
||||
resource_name, resource_id, status, time.time() - start)
|
||||
|
||||
|
||||
def wait_for_volume_retype(client, volume_id, new_volume_type):
|
||||
|
Loading…
x
Reference in New Issue
Block a user