Add functional tests to ensure BDM removal on delete

In certain cases, such as when an instance fails to be scheduled,
the volume may already have an attachment created (or the volume
has been reserved in the old flows).

This patch adds a test to check that these volume attachments
are deleted and removed once the instance has been deleted. It
also adds some functionality to allow checking when an volume
has been reserved in the Cinder fixtures.

Change-Id: I85cc3998fbcde30eefa5429913ca287246d51255
Related-Bug: #1404867
This commit is contained in:
Mohammed Naser 2018-02-14 16:26:38 -05:00
parent ad9e2a568f
commit 20edeb3623
2 changed files with 136 additions and 2 deletions

View File

@ -1318,6 +1318,7 @@ class CinderFixture(fixtures.Fixture):
self.swap_error = False
self.swap_volume_instance_uuid = None
self.swap_volume_instance_error_uuid = None
self.reserved_volumes = list()
# This is a map of instance UUIDs mapped to a list of volume IDs.
# This map gets updated on attach/detach operations.
self.attachments = collections.defaultdict(list)
@ -1378,8 +1379,9 @@ class CinderFixture(fixtures.Fixture):
break
else:
# This is a test that does not care about the actual details.
reserved_volume = (volume_id in self.reserved_volumes)
volume = {
'status': 'available',
'status': 'attaching' if reserved_volume else 'available',
'display_name': 'TEST2',
'attach_status': 'detached',
'id': volume_id,
@ -1409,7 +1411,16 @@ class CinderFixture(fixtures.Fixture):
new_volume_id, error):
return {'save_volume_id': new_volume_id}
def fake_reserve_volume(self_api, context, volume_id):
self.reserved_volumes.append(volume_id)
def fake_unreserve_volume(self_api, context, volume_id):
# NOTE(mnaser): It's possible that we unreserve a volume that was
# never reserved (ex: instance.volume_attach.error
# notification tests)
if volume_id in self.reserved_volumes:
self.reserved_volumes.remove(volume_id)
# Signaling that swap_volume has encountered the error
# from initialize_connection and is working on rolling back
# the reservation on SWAP_ERR_NEW_VOL.
@ -1431,6 +1442,12 @@ class CinderFixture(fixtures.Fixture):
def fake_detach(_self, context, volume_id, instance_uuid=None,
attachment_id=None):
# NOTE(mnaser): It's possible that we unreserve a volume that was
# never reserved (ex: instance.volume_attach.error
# notification tests)
if volume_id in self.reserved_volumes:
self.reserved_volumes.remove(volume_id)
if instance_uuid is not None:
# If the volume isn't attached to this instance it will
# result in a ValueError which indicates a broken test or
@ -1454,7 +1471,7 @@ class CinderFixture(fixtures.Fixture):
'nova.volume.cinder.API.migrate_volume_completion',
fake_migrate_volume_completion)
self.test.stub_out('nova.volume.cinder.API.reserve_volume',
lambda *args, **kwargs: None)
fake_reserve_volume)
self.test.stub_out('nova.volume.cinder.API.roll_detaching',
lambda *args, **kwargs: None)
self.test.stub_out('nova.volume.cinder.API.terminate_connection',

View File

@ -0,0 +1,117 @@
# Copyright 2018 VEXXHOST, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.compute import api as compute_api
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase,
integrated_helpers.InstanceHelperMixin):
"""Test deleting of an instance in error state that has a reserved volume.
This test boots a server from volume which will fail to be scheduled,
ending up in ERROR state with no host assigned and then deletes the server.
Since the server failed to be scheduled, a local delete should run which
will make sure that reserved volumes at the API layer are properly cleaned
up.
The regression is that Nova would not clean up the reserved volumes and
the volume would be stuck in 'attaching' state.
"""
api_major_version = 'v2.1'
microversion = 'latest'
def _setup_compute_service(self):
# Override `_setup_compute_service` to make sure that we do not start
# up the compute service, making sure that the instance will end up
# failing to find a valid host.
pass
def _create_error_server(self, volume_id):
server = self.api.post_server({
'server': {
'flavorRef': '1',
'name': 'bfv-delete-server-in-error-status',
'networks': 'none',
'block_device_mapping_v2': [
{
'boot_index': 0,
'uuid': volume_id,
'source_type': 'volume',
'destination_type': 'volume'
},
]
}
})
return self._wait_for_state_change(self.api, server, 'ERROR')
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=compute_api.BFV_RESERVE_MIN_COMPUTE_VERSION)
def test_delete_with_reserved_volumes(self, mock_version_get=None):
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
# Create a server which should go to ERROR state because we don't
# have any active computes.
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server = self._create_error_server(volume_id)
# The status of the volume at this point should be 'attaching' as it
# is reserved by Nova by the API.
self.assertIn(volume_id, self.cinder.reserved_volumes)
# Delete this server, which should delete BDMs and remove the
# reservation on the instances.
self.api.delete_server(server['id'])
# The volume should no longer be reserved as the deletion of the
# server should have released all the resources.
# TODO(mnaser): Uncomment this in patch resolving the issue
# self.assertNotIn(volume_id, self.cinder.reserved_volumes)
# The volume is still reserved at this point, which it should not be.
# TODO(mnaser): Remove this in patch resolving the issue
self.assertIn(volume_id, self.cinder.reserved_volumes)
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION)
def test_delete_with_reserved_volumes_new(self, mock_version_get=None):
self.cinder = self.useFixture(
nova_fixtures.CinderFixtureNewAttachFlow(self))
# Create a server which should go to ERROR state because we don't
# have any active computes.
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server = self._create_error_server(volume_id)
server_id = server['id']
# There should now exist an attachment to the volume as it was created
# by Nova.
self.assertIn(volume_id, self.cinder.attachments[server_id])
# Delete this server, which should delete BDMs and remove the
# reservation on the instances.
self.api.delete_server(server['id'])
# The volume should no longer have any attachments as instance delete
# should have removed them.
# TODO(mnaser): Uncomment this in patch resolving the issue
# self.assertNotIn(volume_id, self.cinder.attachments[server_id])
# The volume still has attachments, which it should not have.
# TODO(mnaser): Remove this in patch resolving the issue
self.assertIn(volume_id, self.cinder.attachments[server_id])