diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py index 429ded57f1..ddb035dc1f 100644 --- a/tempest/api/compute/base.py +++ b/tempest/api/compute/base.py @@ -424,7 +424,7 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, LOG.exception('Waiting for deletion of volume %s failed', volume['id']) - def attach_volume(self, server, volume, device=None): + def attach_volume(self, server, volume, device=None, check_reserved=False): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. @@ -433,10 +433,15 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. + :param check_reserved: Consider a status of reserved as valid for + completion. This is to handle new Cinder attach where we more + accurately use 'reserved' for things like attaching to a shelved + server. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device + attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and wait for it to be available. This @@ -449,8 +454,11 @@ class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.servers_client.detach_volume, server['id'], volume['id']) + statuses = ['in-use'] + if check_reserved: + statuses.append('reserved') waiters.wait_for_volume_resource_status(self.volumes_client, - volume['id'], 'in-use') + volume['id'], statuses) return attachment diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py index ed5f9a606e..502bc1b335 100644 --- a/tempest/api/compute/volumes/test_attach_volume.py +++ b/tempest/api/compute/volumes/test_attach_volume.py @@ -212,7 +212,8 @@ class AttachVolumeShelveTestJSON(AttachVolumeTestJSON): num_vol = self._count_volumes(server) self._shelve_server(server) attachment = self.attach_volume(server, volume, - device=('/dev/%s' % self.device)) + device=('/dev/%s' % self.device), + check_reserved=True) # Unshelve the instance and check that attached volume exists self._unshelve_server_and_check_volumes(server, num_vol + 1) @@ -239,7 +240,8 @@ class AttachVolumeShelveTestJSON(AttachVolumeTestJSON): self._shelve_server(server) # Attach and then detach the volume - self.attach_volume(server, volume, device=('/dev/%s' % self.device)) + self.attach_volume(server, volume, device=('/dev/%s' % self.device), + check_reserved=True) self.servers_client.detach_volume(server['id'], volume['id']) waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'available') diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py index 93e6fbf9c9..cf187e6bf4 100644 --- a/tempest/common/waiters.py +++ b/tempest/common/waiters.py @@ -179,25 +179,26 @@ def wait_for_image_status(client, image_id, status): raise lib_exc.TimeoutException(message) -def wait_for_volume_resource_status(client, resource_id, status): - """Waits for a volume resource to reach a given status. +def wait_for_volume_resource_status(client, resource_id, statuses): + """Waits for a volume resource to reach any of the specified statuses. This function is a common function for volume, snapshot and backup resources. The function extracts the name of the desired resource from the client class name of the resource. """ - resource_name = re.findall( - r'(Volume|Snapshot|Backup|Group)', - client.__class__.__name__)[0].lower() + if not isinstance(statuses, list): + statuses = [statuses] + resource_name = re.findall(r'(Volume|Snapshot|Backup|Group)', + client.__class__.__name__)[0].lower() show_resource = getattr(client, 'show_' + resource_name) resource_status = show_resource(resource_id)[resource_name]['status'] start = int(time.time()) - while resource_status != status: + while resource_status not in statuses: time.sleep(client.build_interval) resource_status = show_resource(resource_id)[ '{}'.format(resource_name)]['status'] - if resource_status == 'error' and resource_status != status: + if resource_status == 'error' and resource_status not in statuses: raise exceptions.VolumeResourceBuildErrorException( resource_name=resource_name, resource_id=resource_id) if resource_name == 'volume' and resource_status == 'error_restoring': @@ -206,7 +207,7 @@ def wait_for_volume_resource_status(client, resource_id, status): if int(time.time()) - start >= client.build_timeout: message = ('%s %s failed to reach %s status (current %s) ' 'within the required time (%s s).' % - (resource_name, resource_id, status, resource_status, + (resource_name, resource_id, statuses, resource_status, client.build_timeout)) raise lib_exc.TimeoutException(message)