Fixes: bfv vm reboot ends up in an error state.

we only need to verify if bdm has attachment id and it should be present in both nova and cinde DB.

For tests coverage, added tests for bfv server to test different bdm source type.

Closes-Bug: 2048154
Closes-Bug: 2048184
Change-Id: Icffcbad27d99a800e3f285565c0b823f697e388c
This commit is contained in:
Amit Uniyal 2024-01-05 08:41:29 +00:00
parent 12ca930e45
commit b5173b4192
4 changed files with 108 additions and 9 deletions

View File

@ -4222,8 +4222,7 @@ class ComputeManager(manager.Manager):
nova_attachments = []
bdms_to_delete = []
for bdm in bdms.objects:
if bdm.volume_id and bdm.source_type == 'volume' and \
bdm.destination_type == 'volume':
if bdm.volume_id and bdm.attachment_id:
try:
self.volume_api.attachment_get(context, bdm.attachment_id)
except exception.VolumeAttachmentNotFound:

View File

@ -101,7 +101,9 @@ class CinderFixture(fixtures.Fixture):
# multi-attach, as some flows create a blank 'reservation' attachment
# before deleting another attachment. However, a non-multiattach volume
# can only have at most one attachment with a host connector at a time.
self.volumes = collections.defaultdict(dict)
self.volume_to_attachment = collections.defaultdict(dict)
self.volume_snapshots = collections.defaultdict(dict)
def setUp(self):
super().setUp()
@ -165,6 +167,15 @@ class CinderFixture(fixtures.Fixture):
self.useFixture(fixtures.MockPatch(
'nova.volume.cinder.API.attachment_get_all',
side_effect=self.fake_attachment_get_all, autospec=False))
self.useFixture(fixtures.MockPatch(
'nova.volume.cinder.API.create_snapshot_force',
side_effect=self.fake_create_snapshot_force, autospec=False))
self.useFixture(fixtures.MockPatch(
'nova.volume.cinder.API.get_snapshot',
side_effect=self.fake_get_snapshot, autospec=False))
self.useFixture(fixtures.MockPatch(
'nova.volume.cinder.API.create',
side_effect=self.fake_vol_create, autospec=False))
def _is_multiattach(self, volume_id):
return volume_id in [
@ -218,13 +229,16 @@ class CinderFixture(fixtures.Fixture):
return {'save_volume_id': new_volume_id}
def fake_get(self, context, volume_id, microversion=None):
volume = {
'display_name': volume_id,
'id': volume_id,
'size': 1,
'multiattach': self._is_multiattach(volume_id),
'availability_zone': self.az
}
if volume_id in self.volumes:
volume = self.volumes[volume_id]
else:
volume = {
'display_name': volume_id,
'id': volume_id,
'size': 1,
'multiattach': self._is_multiattach(volume_id),
'availability_zone': self.az
}
# Add any attachment details the fixture has
fixture_attachments = self.volume_to_attachment[volume_id]
@ -466,3 +480,36 @@ class CinderFixture(fixtures.Fixture):
def delete_vol_attachment(self, vol_id):
del self.volume_to_attachment[vol_id]
def fake_create_snapshot_force(self, _ctxt, volume_id, name, description):
_id = uuidutils.generate_uuid()
snapshot = {
'id': _id,
'volume_id': volume_id,
'display_name': name,
'display_description': description,
'status': 'creating',
}
self.volume_snapshots[_id] = snapshot
return snapshot
def fake_get_snapshot(self, _ctxt, snap_id):
if snap_id in self.volume_snapshots:
# because instance is getting unquiesce_instance
self.volume_snapshots[snap_id]['status'] = 'available'
return self.volume_snapshots[snap_id]
def fake_vol_create(self, _ctxt, size, name, description,
snapshot=None, image_id=None, volume_type=None, metadata=None,
availability_zone=None):
_id = uuidutils.generate_uuid()
volume = {
'id': _id,
'status': 'available',
'display_name': name or 'fake-cinder-vol',
'attach_status': 'detached',
'size': size,
'display_description': description or 'fake-description',
}
self.volumes[_id] = volume
return volume

View File

@ -27,6 +27,7 @@ import time
from oslo_concurrency import lockutils
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_actions
from nova.compute import rpcapi as compute_rpcapi
@ -702,6 +703,42 @@ class InstanceHelperMixin:
self.cinder.create_vol_attachment(
volume_id, server['id'])
def _create_server_boot_from_volume(self):
bfv_image_id = uuids.bfv_image_uuid
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image = {
'id': bfv_image_id,
'name': 'fake_image_name',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'container_format': 'raw',
'disk_format': 'raw',
'min_disk': 0
}
self.glance.create(None, image)
# for bfv, image is not required in server request
server = self._build_server()
server.pop('imageRef')
# as bfv-image will be used as source in block_device_mapping_v2
# here block device will be created based on bfv-image
# i.e bfvimage_id
server['block_device_mapping_v2'] = [{
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'uuid': bfv_image_id,
'volume_size': 1,
}]
server = self.api.post_server({'server': server})
return self._wait_for_state_change(server, 'ACTIVE')
class PlacementHelperMixin:
"""A helper mixin for interacting with placement."""

View File

@ -200,6 +200,22 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
self.assertIn('You specified more local devices than the limit allows',
str(ex))
def test_reboot_bfv_instance(self):
# verify bdm 'source_type': 'image' and 'destination_type': 'volume'
server = self._create_server_boot_from_volume()
server = self._reboot_server(server, hard=True)
self.assertEqual(server['status'], 'ACTIVE')
def test_reboot_bfv_instance_snapshot(self):
# verify bdm 'source_type': 'snapshot' and 'destination_type': 'volume'
server = self._create_server_boot_from_volume()
self._snapshot_server(server, 'snap1')
images = self.api.get_images()
snap_img = [img for img in images if img['name'] == 'snap1'][0]
server1 = self._create_server(image_uuid=snap_img['id'])
self._reboot_server(server1, hard=True)
self.assertEqual(server['status'], 'ACTIVE')
class BootFromVolumeLargeRequestTest(test.TestCase,
integrated_helpers.InstanceHelperMixin):