Browse Source

Merge "libvirt: Add support for stable device rescue"

tags/21.0.0.0rc1
Zuul 3 months ago
committed by Gerrit Code Review
parent
commit
57ff308d6d
8 changed files with 558 additions and 49 deletions
  1. +9
    -0
      nova/exception.py
  2. +4
    -1
      nova/tests/unit/virt/libvirt/fake_imagebackend.py
  3. +251
    -14
      nova/tests/unit/virt/libvirt/test_blockinfo.py
  4. +143
    -13
      nova/tests/unit/virt/libvirt/test_driver.py
  5. +16
    -0
      nova/tests/unit/virt/test_hardware.py
  6. +7
    -0
      nova/virt/hardware.py
  7. +51
    -12
      nova/virt/libvirt/blockinfo.py
  8. +77
    -9
      nova/virt/libvirt/driver.py

+ 9
- 0
nova/exception.py View File

@@ -1393,6 +1393,15 @@ class UnsupportedHardware(Invalid):
"the '%(virt)s' virt driver")


class UnsupportedRescueBus(Invalid):
msg_fmt = _("Requested rescue bus '%(bus)s' is not supported by "
"the '%(virt)s' virt driver")


class UnsupportedRescueDevice(Invalid):
msg_fmt = _("Requested rescue device '%(device)s' is not supported")


class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")



+ 4
- 1
nova/tests/unit/virt/libvirt/fake_imagebackend.py View File

@@ -207,7 +207,8 @@ class ImageBackendFixture(fixtures.Fixture):
self.imported_files.append((local_filename, remote_filename))

def _fake_libvirt_info(self, mock_disk, disk_info, cache_mode,
extra_specs, hypervisor_version, disk_unit=None):
extra_specs, hypervisor_version, disk_unit=None,
boot_order=None):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
@@ -218,4 +219,6 @@ class ImageBackendFixture(fixtures.Fixture):
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
if boot_order:
info.boot_order = boot_order
return info

+ 251
- 14
nova/tests/unit/virt/libvirt/test_blockinfo.py View File

@@ -76,22 +76,25 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
self.test_instance['old_flavor'] = None
self.test_instance['new_flavor'] = None

def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
def _test_block_device_info(self, with_eph=True, with_swap=True,
with_bdms=True):
swap = {'device_name': '/dev/vdb', 'swap_size': 1}
ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/sdc1', 'size': 10},
'device_name': '/dev/vdc1', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
'device_name': '/dev/sdd', 'size': 10}]
'device_name': '/dev/vdd', 'size': 10}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
return {'root_device_name': '/dev/vda',
'swap': swap if with_swap else {},
'ephemerals': ephemerals if with_eph else [],
'block_device_mapping':
block_device_mapping if with_bdms else []}

def test_volume_in_mapping(self):
block_device_info = self._test_block_device_info()

def _assert_volume_in_mapping(device_name, true_or_false):
self.assertEqual(
@@ -99,10 +102,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
block_device.volume_in_mapping(device_name,
block_device_info))

_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('vda', False)
_assert_volume_in_mapping('vdb', True)
_assert_volume_in_mapping('vdc1', True)
_assert_volume_in_mapping('vdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
@@ -268,6 +271,206 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
}
self.assertEqual(expect, mapping)

def _test_get_disk_mapping_stable_rescue(
self, rescue_props, expected, block_device_info, with_local=False):
instance = objects.Instance(**self.test_instance)

# Make disk.local disks optional per test as found in
# nova.virt.libvirt.BlockInfo.get_default_ephemeral_info
instance.ephemeral_gb = '20' if with_local else None

image_meta = objects.ImageMeta.from_dict(self.test_image_meta)

rescue_image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
rescue_props = objects.ImageMetaProps.from_dict(rescue_props)
rescue_image_meta.properties = rescue_props

mapping = blockinfo.get_disk_mapping("kvm", instance, "virtio", "ide",
image_meta, rescue=True, block_device_info=block_device_info,
rescue_image_meta=rescue_image_meta)

# Assert that the expected mapping is returned from get_disk_mapping
self.assertEqual(expected, mapping)

def test_get_disk_mapping_stable_rescue_virtio_disk(self):
"""Assert the disk mapping when rescuing using a virtio disk"""
rescue_props = {'hw_rescue_bus': 'virtio'}
block_info = self._test_block_device_info(
with_eph=False, with_swap=False, with_bdms=False)
expected = {
'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.rescue': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info)

def test_get_disk_mapping_stable_rescue_ide_disk(self):
"""Assert the disk mapping when rescuing using an IDE disk"""
rescue_props = {'hw_rescue_bus': 'ide'}
block_info = self._test_block_device_info(
with_eph=False, with_swap=False, with_bdms=False)
expected = {
'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.rescue': {'bus': 'ide', 'dev': 'hda', 'type': 'disk'},
'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info)

def test_get_disk_mapping_stable_rescue_usb_disk(self):
"""Assert the disk mapping when rescuing using a USB disk"""
rescue_props = {'hw_rescue_bus': 'usb'}
block_info = self._test_block_device_info(
with_eph=False, with_swap=False, with_bdms=False)
expected = {
'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.rescue': {'bus': 'usb', 'dev': 'sda', 'type': 'disk'},
'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info)

def test_get_disk_mapping_stable_rescue_ide_cdrom(self):
"""Assert the disk mapping when rescuing using an IDE cd-rom"""
rescue_props = {'hw_rescue_device': 'cdrom'}
block_info = self._test_block_device_info(
with_eph=False, with_swap=False, with_bdms=False)
expected = {
'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.rescue': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'},
'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info)

def test_get_disk_mapping_stable_rescue_virtio_disk_with_local(self):
"""Assert the disk mapping when rescuing using a virtio disk with
default ephemeral (local) disks also attached to the instance.
"""
rescue_props = {'hw_rescue_bus': 'virtio'}
block_info = self._test_block_device_info(
with_eph=False, with_swap=False, with_bdms=False)
expected = {
'disk': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'disk.rescue': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'root': {'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info, with_local=True)

def test_get_disk_mapping_stable_rescue_virtio_disk_with_eph(self):
"""Assert the disk mapping when rescuing using a virtio disk with
ephemeral disks also attached to the instance.
"""
rescue_props = {'hw_rescue_bus': 'virtio'}
block_info = self._test_block_device_info(
with_swap=False, with_bdms=False)
expected = {
'disk': {
'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.eph0': {
'bus': 'virtio', 'dev': 'vdc1', 'format': 'ext4',
'type': 'disk'},
'disk.eph1': {
'bus': 'ide', 'dev': 'vdd', 'type': 'disk'},
'disk.rescue': {
'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {
'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info, with_local=True)

def test_get_disk_mapping_stable_rescue_virtio_disk_with_swap(self):
"""Assert the disk mapping when rescuing using a virtio disk with
swap attached to the instance.
"""
rescue_props = {'hw_rescue_bus': 'virtio'}
block_info = self._test_block_device_info(
with_eph=False, with_bdms=False)
expected = {
'disk': {
'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.rescue': {
'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'disk.swap': {
'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {
'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info)

def test_get_disk_mapping_stable_rescue_virtio_disk_with_bdm(self):
"""Assert the disk mapping when rescuing using a virtio disk with
volumes also attached to the instance.
"""
rescue_props = {'hw_rescue_bus': 'virtio'}
block_info = self._test_block_device_info(
with_eph=False, with_swap=False)
expected = {
'/dev/sde': {
'bus': 'scsi', 'dev': 'sde', 'type': 'disk'},
'/dev/sdf': {
'bus': 'scsi', 'dev': 'sdf', 'type': 'disk'},
'disk': {
'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.rescue': {
'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {
'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info)

def test_get_disk_mapping_stable_rescue_virtio_disk_with_everything(self):
"""Assert the disk mapping when rescuing using a virtio disk with
volumes, ephemerals and swap also attached to the instance.
"""
rescue_props = {'hw_rescue_bus': 'virtio'}
block_info = self._test_block_device_info()
expected = {
'/dev/sde': {
'bus': 'scsi', 'dev': 'sde', 'type': 'disk'},
'/dev/sdf': {
'bus': 'scsi', 'dev': 'sdf', 'type': 'disk'},
'disk': {
'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'},
'disk.eph0': {
'bus': 'virtio', 'dev': 'vdc1', 'format': 'ext4',
'type': 'disk'},
'disk.eph1': {
'bus': 'ide', 'dev': 'vdd', 'type': 'disk'},
'disk.rescue': {
'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
'disk.swap': {
'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
'root': {
'boot_index': '1', 'bus': 'virtio', 'dev': 'vda',
'type': 'disk'}
}
self._test_get_disk_mapping_stable_rescue(
rescue_props, expected, block_info, with_local=True)

def test_get_disk_mapping_lxc(self):
# A simple disk mapping setup, but for lxc

@@ -1077,6 +1280,40 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
expected_order = ['hd', 'cdrom']
self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))

def _get_rescue_image_meta(self, props_dict):
meta_dict = dict(self.test_image_meta)
meta_dict['properties'] = props_dict
return objects.ImageMeta.from_dict(meta_dict)

def test_get_rescue_device(self):
# Assert that all supported device types are returned correctly
for device in blockinfo.SUPPORTED_DEVICE_TYPES:
meta = self._get_rescue_image_meta({'hw_rescue_device': device})
self.assertEqual(device, blockinfo.get_rescue_device(meta))

# Assert that disk is returned if hw_rescue_device isn't set
meta = self._get_rescue_image_meta({'hw_rescue_bus': 'virtio'})
self.assertEqual('disk', blockinfo.get_rescue_device(meta))

# Assert that UnsupportedHardware is raised for unsupported devices
meta = self._get_rescue_image_meta({'hw_rescue_device': 'fs'})
self.assertRaises(exception.UnsupportedRescueDevice,
blockinfo.get_rescue_device, meta)

def test_get_rescue_bus(self):
# Assert that all supported device bus types are returned. Stable
# device rescue is not supported by xen or lxc so ignore these.
for virt_type in ['qemu', 'kvm', 'uml', 'parallels']:
for bus in blockinfo.SUPPORTED_DEVICE_BUS[virt_type]:
meta = self._get_rescue_image_meta({'hw_rescue_bus': bus})
self.assertEqual(bus, blockinfo.get_rescue_bus(None, virt_type,
meta, None))

# Assert that UnsupportedHardware is raised for unsupported devices
meta = self._get_rescue_image_meta({'hw_rescue_bus': 'xen'})
self.assertRaises(exception.UnsupportedRescueBus,
blockinfo.get_rescue_bus, None, 'kvm', meta, 'disk')


class DefaultDeviceNamesTestCase(test.NoDBTestCase):
def setUp(self):


+ 143
- 13
nova/tests/unit/virt/libvirt/test_driver.py View File

@@ -19590,13 +19590,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,

if rescue:
rescue_data = ct_instance
disk_info = {'mapping': {'root': {'dev': 'hda'},
'disk.rescue': {'dev': 'hda'}}}
else:
rescue_data = None
disk_info = {'mapping': {'disk': {}}}

cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self),
image_meta, {'mapping': {'disk': {}}},
rescue_data)
image_meta, disk_info, rescue_data)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory)
@@ -22532,6 +22534,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_detach.assert_called_once_with(expected.to_xml(),
flags=expected_flags)

@mock.patch('nova.objects.block_device.BlockDeviceMapping.save',
new=mock.Mock())
@mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_all_assigned_mediated_devices')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
@@ -22541,13 +22546,12 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
@mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata')
@mock.patch('nova.privsep.utils.supports_direct_io')
@mock.patch('nova.api.metadata.base.InstanceMetadata')
def _test_rescue(self, instance,
mock_instance_metadata, mock_supports_direct_io,
mock_build_device_metadata, mock_set_host_enabled,
mock_write_to_file,
mock_get_mdev,
image_meta_dict=None,
exists=None):
def _test_rescue(self, instance, mock_instance_metadata,
mock_supports_direct_io, mock_build_device_metadata,
mock_set_host_enabled, mock_write_to_file, mock_get_mdev,
mock_get_image_meta_by_ref, image_meta_dict=None, exists=None,
instance_image_meta_dict=None, block_device_info=None):

self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
mock_build_device_metadata.return_value = None
mock_supports_direct_io.return_value = True
@@ -22561,6 +22565,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
image_meta_dict = {'id': uuids.image_id, 'name': 'fake'}
image_meta = objects.ImageMeta.from_dict(image_meta_dict)

if instance_image_meta_dict:
meta = objects.ImageMeta.from_dict(instance_image_meta_dict)
mock_get_image_meta_by_ref.return_value = meta

network_info = _fake_network_info(self)
rescue_password = 'fake_password'

@@ -22572,11 +22580,15 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
if post_xml_callback is not None:
post_xml_callback()

with mock.patch.object(
self.drvr, '_create_domain',
side_effect=fake_create_domain) as mock_create_domain:
with test.nested(
mock.patch.object(self.drvr, '_create_domain',
side_effect=fake_create_domain),
mock.patch.object(self.drvr, '_connect_volume'),
) as (mock_create_domain, mock_connect_volume):

self.drvr.rescue(self.context, instance,
network_info, image_meta, rescue_password, None)
network_info, image_meta, rescue_password,
block_device_info)

self.assertTrue(mock_create_domain.called)

@@ -22693,6 +22705,124 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual(expected_kernel_ramdisk_paths,
kernel_ramdisk_paths)

@mock.patch('nova.virt.libvirt.utils.write_to_file')
def test_rescue_stable_device_unsupported_virt_types(self,
mock_libvirt_write_to_file):
network_info = _fake_network_info(self, 1)
instance = self._create_instance({'config_drive': str(True)})
rescue_image_meta_dict = {'id': uuids.rescue_image_id,
'name': 'rescue',
'properties': {'hw_rescue_device': 'disk',
'hw_rescue_bus': 'virtio'}}
rescue_image_meta = objects.ImageMeta.from_dict(rescue_image_meta_dict)

# Assert that InstanceNotRescuable is raised for xen and lxc virt_types
self.flags(virt_type='xen', group='libvirt')
self.assertRaises(exception.InstanceNotRescuable, self.drvr.rescue,
self.context, instance, network_info,
rescue_image_meta, None, None)

self.flags(virt_type='lxc', group='libvirt')
self.assertRaises(exception.InstanceNotRescuable, self.drvr.rescue,
self.context, instance, network_info,
rescue_image_meta, None, None)

def test_rescue_stable_device(self):
# Assert the imagebackend behaviour and domain device layout
instance = self._create_instance({'config_drive': str(True)})
inst_image_meta_dict = {'id': uuids.image_id, 'name': 'fake'}
rescue_image_meta_dict = {'id': uuids.rescue_image_id,
'name': 'rescue',
'properties': {'hw_rescue_device': 'disk',
'hw_rescue_bus': 'virtio'}}
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [
{'guest_format': None,
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 20,
'device_type': 'disk'}],
'swap': None,
'block_device_mapping': None}

backend, domain = self._test_rescue(
instance,
image_meta_dict=rescue_image_meta_dict,
instance_image_meta_dict=inst_image_meta_dict,
block_device_info=block_device_info)

# Assert that we created the expected set of disks, and no others
self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'],
sorted(backend.created_disks.keys()))

# Assert that the original disks are presented first with the rescue
# disk attached as the final device in the domain.
expected_disk_paths = [backend.disks[name].path for name
in ('disk', 'disk.eph0', 'disk.config',
'disk.rescue')]
disk_paths = domain.xpath('devices/disk/source/@file')
self.assertEqual(expected_disk_paths, disk_paths)

# Assert that the disk.rescue device has a boot order of 1
disk_path = backend.disks['disk.rescue'].path
query = "devices/disk[source/@file = '%s']/boot/@order" % disk_path
self.assertEqual('1', domain.xpath(query)[0])

def test_rescue_stable_device_with_volume_attached(self):
# Assert the imagebackend behaviour and domain device layout
instance = self._create_instance({'config_drive': str(True)})
inst_image_meta_dict = {'id': uuids.image_id, 'name': 'fake'}
rescue_image_meta_dict = {'id': uuids.rescue_image_id,
'name': 'rescue',
'properties': {'hw_rescue_device': 'disk',
'hw_rescue_bus': 'virtio'}}
conn_info = {'driver_volume_type': 'iscsi',
'data': {'device_path': '/dev/sdb'}}
bdm = objects.BlockDeviceMapping(
self.context,
**fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vdd'}))
bdms = driver_block_device.convert_volumes([bdm])
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [
{'guest_format': None,
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 20,
'device_type': 'disk'}],
'swap': None,
'block_device_mapping': bdms}
bdm = block_device_info['block_device_mapping'][0]
bdm['connection_info'] = conn_info

backend, domain = self._test_rescue(
instance,
image_meta_dict=rescue_image_meta_dict,
instance_image_meta_dict=inst_image_meta_dict,
block_device_info=block_device_info)

# Assert that we created the expected set of disks, and no others
self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'],
sorted(backend.created_disks.keys()))

# Assert that the original disks are presented first with the rescue
# disk attached as the final device in the domain.
expected_disk_paths = [
backend.disks['disk'].path, backend.disks['disk.eph0'].path,
backend.disks['disk.config'].path, '/dev/sdb',
backend.disks['disk.rescue'].path]
query = 'devices/disk/source/@*[name()="file" or name()="dev"]'
disk_paths = domain.xpath(query)
self.assertEqual(expected_disk_paths, disk_paths)

# Assert that the disk.rescue device has a boot order of 1
disk_path = backend.disks['disk.rescue'].path
query = "devices/disk[source/@file = '%s']/boot/@order" % disk_path
self.assertEqual('1', domain.xpath(query)[0])

@mock.patch.object(libvirt_utils, 'get_instance_path')
@mock.patch.object(libvirt_utils, 'load_file')
@mock.patch.object(host.Host, '_get_domain')


+ 16
- 0
nova/tests/unit/virt/test_hardware.py View File

@@ -14,6 +14,7 @@

import collections
import copy
import ddt

import mock
import testtools
@@ -4376,3 +4377,18 @@ class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
hw.get_pci_numa_policy_constraint, flavor, image_meta)
with testtools.ExpectedException(ValueError):
image_meta.properties.hw_pci_numa_affinity_policy = "fake"


@ddt.ddt
class RescuePropertyTestCase(test.NoDBTestCase):

@ddt.unpack
@ddt.data({'props': {'hw_rescue_device': 'disk',
'hw_rescue_bus': 'virtio'}, 'expected': True},
{'props': {'hw_rescue_device': 'disk'}, 'expected': True},
{'props': {'hw_rescue_bus': 'virtio'}, 'expected': True},
{'props': {'hw_disk_bus': 'virtio'}, 'expected': False})
def test_check_hw_rescue_props(self, props=None, expected=None):
meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
meta.properties = objects.ImageMetaProps.from_dict(props)
self.assertEqual(expected, hw.check_hw_rescue_props(meta))

+ 7
- 0
nova/virt/hardware.py View File

@@ -2259,3 +2259,10 @@ def get_vpmems(flavor):
if formed_label:
formed_labels.append(formed_label)
return formed_labels


def check_hw_rescue_props(image_meta):
"""Confirm that hw_rescue_* image properties are present.
"""
hw_rescue_props = ['hw_rescue_device', 'hw_rescue_bus']
return any(key in image_meta.properties for key in hw_rescue_props)

+ 51
- 12
nova/virt/libvirt/blockinfo.py View File

@@ -87,9 +87,17 @@ from nova.virt import osinfo

CONF = cfg.CONF


SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun')
BOOT_DEV_FOR_TYPE = {'disk': 'hd', 'cdrom': 'cdrom', 'floppy': 'fd'}
# NOTE(aspiers): If you change this, don't forget to update the docs and
# metadata for hw_*_bus in glance.
SUPPORTED_DEVICE_BUS = {
'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'],
'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc', 'sata'],
'xen': ['xen', 'ide'],
'uml': ['uml'],
'lxc': ['lxc'],
'parallels': ['ide', 'scsi']}
SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun')


def has_disk_dev(mapping, disk_dev):
@@ -511,11 +519,9 @@ def update_bdm(bdm, info):
info['bus'], info['type']))))


def get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
image_meta,
block_device_info=None,
rescue=False):
def get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta,
block_device_info=None, rescue=False,
rescue_image_meta=None):
"""Determine how to map default disks to the virtual machine.

This is about figuring out whether the default 'disk',
@@ -527,7 +533,7 @@ def get_disk_mapping(virt_type, instance,

mapping = {}

if rescue:
if rescue and rescue_image_meta is None:
rescue_info = get_next_disk_info(mapping,
disk_bus, boot_index=1)
mapping['disk.rescue'] = rescue_info
@@ -633,11 +639,21 @@ def get_disk_mapping(virt_type, instance,
device_type)
mapping['disk.config'] = config_info

# NOTE(lyarwood): This can only be a stable device rescue so add the rescue
# disk as the final disk in the mapping.
if rescue and rescue_image_meta:
rescue_device = get_rescue_device(rescue_image_meta)
rescue_bus = get_rescue_bus(instance, virt_type, rescue_image_meta,
rescue_device)
rescue_info = get_next_disk_info(mapping, rescue_bus,
device_type=rescue_device)
mapping['disk.rescue'] = rescue_info

return mapping


def get_disk_info(virt_type, instance, image_meta,
block_device_info=None, rescue=False):
def get_disk_info(virt_type, instance, image_meta, block_device_info=None,
rescue=False, rescue_image_meta=None):
"""Determine guest disk mapping info.

This is a wrapper around get_disk_mapping, which
@@ -658,8 +674,9 @@ def get_disk_info(virt_type, instance, image_meta,
mapping = get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
image_meta,
block_device_info,
rescue)
block_device_info=block_device_info,
rescue=rescue,
rescue_image_meta=rescue_image_meta)

return {'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
@@ -678,3 +695,25 @@ def get_boot_order(disk_info):
return [el for el in lst if el not in s and not s.add(el)]

return uniq(boot_devs_dup)


def get_rescue_device(rescue_image_meta):
# Find and validate the hw_rescue_device rescue device
rescue_device = rescue_image_meta.properties.get("hw_rescue_device",
"disk")
if rescue_device not in SUPPORTED_DEVICE_TYPES:
raise exception.UnsupportedRescueDevice(device=rescue_device)
return rescue_device


def get_rescue_bus(instance, virt_type, rescue_image_meta, rescue_device):
# Find and validate the hw_rescue_bus
rescue_bus = rescue_image_meta.properties.get("hw_rescue_bus")
if rescue_bus is not None:
if is_disk_bus_valid_for_virt(virt_type, rescue_bus):
return rescue_bus
else:
raise exception.UnsupportedRescueBus(bus=rescue_bus,
virt=virt_type)
return get_disk_bus_for_device_type(instance, virt_type, rescue_image_meta,
device_type=rescue_device)

+ 77
- 9
nova/virt/libvirt/driver.py View File

@@ -3417,6 +3417,29 @@ class LibvirtDriver(driver.ComputeDriver):
should not edit or over-ride the original image, only allow for
data recovery.

Two modes are provided when rescuing an instance with this driver.

The original and default rescue mode, where the rescue boot disk,
original root disk and optional regenerated config drive are attached
to the instance.

A second stable device rescue mode is also provided where all of the
original devices are attached to the instance during the rescue attempt
with the addition of the rescue boot disk. This second mode is
controlled by the hw_rescue_device and hw_rescue_bus image properties
on the rescue image provided to this method via image_meta.

:param nova.context.RequestContext context:
The context for the rescue.
:param nova.objects.instance.Instance instance:
The instance being rescued.
:param nova.network.model.NetworkInfo network_info:
Necessary network information for the resume.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
:param rescue_password: new root password to set for rescue.
:param dict block_device_info:
The block device mapping of the instance.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
@@ -3424,6 +3447,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)

rescue_image_id = None
rescue_image_meta = None
if image_meta.obj_attr_is_set("id"):
rescue_image_id = image_meta.id

@@ -3435,10 +3459,43 @@ class LibvirtDriver(driver.ComputeDriver):
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
rescue=True)

virt_type = CONF.libvirt.virt_type
if hardware.check_hw_rescue_props(image_meta):
LOG.info("Attempting a stable device rescue", instance=instance)
# NOTE(lyarwood): Stable device rescue is not supported when using
# the LXC and Xen virt_types as they do not support the required
# <boot order=''> definitions allowing an instance to boot from the
# rescue device added as a final device to the domain.
if virt_type in ('lxc', 'xen'):
reason = ("Stable device rescue is not supported by virt_type "
"%s", virt_type)
raise exception.InstanceNotRescuable(instance_id=instance.uuid,
reason=reason)
# NOTE(lyarwood): Stable device rescue provides the original disk
# mapping of the instance with the rescue device appened to the
# end. As a result we need to provide the original image_meta, the
# new rescue_image_meta and block_device_info when calling
# get_disk_info.
rescue_image_meta = image_meta
if instance.image_ref:
image_meta = objects.ImageMeta.from_image_ref(
context, self._image_api, instance.image_ref)
else:
image_meta = objects.ImageMeta.from_dict({})

else:
LOG.info("Attempting an unstable device rescue", instance=instance)
# NOTE(lyarwood): An unstable rescue only provides the rescue
# device and the original root device so we don't need to provide
# block_device_info to the get_disk_info call.
block_device_info = None

disk_info = blockinfo.get_disk_info(virt_type, instance, image_meta,
rescue=True, block_device_info=block_device_info,
rescue_image_meta=rescue_image_meta)
LOG.debug("rescue generated disk_info: %s", disk_info)

injection_info = InjectionInfo(network_info=network_info,
admin_pass=rescue_password,
files=None)
@@ -3454,7 +3511,8 @@ class LibvirtDriver(driver.ComputeDriver):
disk_images=rescue_images)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
mdevs=mdevs)
mdevs=mdevs,
block_device_info=block_device_info)
self._destroy(instance)
self._create_domain(xml, post_xml_callback=gen_confdrive)

@@ -4407,7 +4465,7 @@ class LibvirtDriver(driver.ComputeDriver):
return cpu

def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image_type=None, boot_order=None):
disk_unit = None
disk = self.image_backend.by_name(instance, name, image_type)
if (name == 'disk.config' and image_type == 'rbd' and
@@ -4430,7 +4488,8 @@ class LibvirtDriver(driver.ComputeDriver):
conf = disk.libvirt_info(disk_info, self.disk_cachemode,
inst_type['extra_specs'],
self._host.get_version(),
disk_unit=disk_unit)
disk_unit=disk_unit,
boot_order=boot_order)
return conf

def _get_guest_fs_config(self, instance, name, image_type=None):
@@ -4502,7 +4561,7 @@ class LibvirtDriver(driver.ComputeDriver):
devices = devices + _get_ephemeral_devices()
else:

if rescue:
if rescue and disk_mapping['disk.rescue'] == disk_mapping['root']:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
@@ -4542,7 +4601,10 @@ class LibvirtDriver(driver.ComputeDriver):
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))

config_name = 'disk.config.rescue' if rescue else 'disk.config'
config_name = 'disk.config'
if rescue and disk_mapping['disk.rescue'] == disk_mapping['root']:
config_name = 'disk.config.rescue'

if config_name in disk_mapping:
diskconfig = self._get_guest_disk_config(
instance, config_name, disk_mapping, inst_type,
@@ -4575,6 +4637,12 @@ class LibvirtDriver(driver.ComputeDriver):
if scsi_controller:
devices.append(scsi_controller)

if rescue and disk_mapping['disk.rescue'] != disk_mapping['root']:
diskrescue = self._get_guest_disk_config(instance, 'disk.rescue',
disk_mapping, inst_type,
boot_order='1')
devices.append(diskrescue)

return devices

@staticmethod


Loading…
Cancel
Save