Merge "Apply SEV-specific guest config when SEV is required"

This commit is contained in:
Zuul 2019-09-11 08:33:59 +00:00 committed by Gerrit Code Review
commit 95d190b0d8
10 changed files with 475 additions and 32 deletions

View File

@ -2460,6 +2460,11 @@ class FlavorImageConflict(NovaException):
"(%(flavor_val)s) and the image (%(image_val)s).")
class MissingDomainCapabilityFeatureException(NovaException):
msg_fmt = _("Guest config could not be built without domain capabilities "
"including <%(feature)s> feature.")
class HealPortAllocationException(NovaException):
msg_fmt = _("Healing port allocation failed.")

View File

@ -87,13 +87,69 @@ def fake_kvm_guest():
obj.sysinfo.bios_vendor = "Acme"
obj.sysinfo.system_version = "1.0.0"
# obj.devices[0]
disk = config.LibvirtConfigGuestDisk()
disk.source_type = "file"
disk.source_path = "/tmp/img"
disk.target_dev = "/dev/vda"
disk.source_path = "/tmp/disk-img"
disk.target_dev = "vda"
disk.target_bus = "virtio"
obj.add_device(disk)
# obj.devices[1]
disk = config.LibvirtConfigGuestDisk()
disk.source_device = "cdrom"
disk.source_type = "file"
disk.source_path = "/tmp/cdrom-img"
disk.target_dev = "sda"
disk.target_bus = "sata"
obj.add_device(disk)
# obj.devices[2]
intf = config.LibvirtConfigGuestInterface()
intf.net_type = "network"
intf.mac_addr = "52:54:00:f6:35:8f"
intf.model = "virtio"
intf.source_dev = "virbr0"
obj.add_device(intf)
# obj.devices[3]
balloon = config.LibvirtConfigMemoryBalloon()
balloon.model = 'virtio'
balloon.period = 11
obj.add_device(balloon)
# obj.devices[4]
mouse = config.LibvirtConfigGuestInput()
mouse.type = "mouse"
mouse.bus = "virtio"
obj.add_device(mouse)
# obj.devices[5]
gfx = config.LibvirtConfigGuestGraphics()
gfx.type = "vnc"
gfx.autoport = True
gfx.keymap = "en_US"
gfx.listen = "127.0.0.1"
obj.add_device(gfx)
# obj.devices[6]
video = config.LibvirtConfigGuestVideo()
video.type = 'qxl'
obj.add_device(video)
# obj.devices[7]
serial = config.LibvirtConfigGuestSerial()
serial.type = "file"
serial.source_path = "/tmp/vm.log"
obj.add_device(serial)
# obj.devices[8]
rng = config.LibvirtConfigGuestRng()
rng.backend = '/dev/urandom'
rng.rate_period = '12'
rng.rate_bytes = '34'
obj.add_device(rng)
return obj
@ -151,9 +207,33 @@ FAKE_KVM_GUEST = """
</cputune>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
<target bus="virtio" dev="/dev/vda"/>
<source file="/tmp/disk-img"/>
<target bus="virtio" dev="vda"/>
</disk>
<disk type="file" device="cdrom">
<source file="/tmp/cdrom-img"/>
<target bus="sata" dev="sda"/>
</disk>
<interface type='network'>
<mac address='52:54:00:f6:35:8f'/>
<model type='virtio'/>
<source bridge='virbr0'/>
</interface>
<memballoon model='virtio'>
<stats period='11'/>
</memballoon>
<input type="mouse" bus="virtio"/>
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
<video>
<model type='qxl'/>
</video>
<serial type="file">
<source path="/tmp/vm.log"/>
</serial>
<rng model='virtio'>
<rate period='12' bytes='34'/>
<backend model='random'>/dev/urandom</backend>
</rng>
</devices>
<launchSecurity type="sev">
<policy>0x0033</policy>

View File

@ -17,6 +17,7 @@ import mock
from nova.pci import utils as pci_utils
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit.virt.libvirt import fake_libvirt_data
from nova.virt.libvirt import config
from nova.virt.libvirt import designer
from nova.virt.libvirt import host
@ -224,3 +225,24 @@ class DesignerTestCase(test.NoDBTestCase):
conf = config.LibvirtConfigGuestInterface()
designer.set_vif_mtu_config(conf, 9000)
self.assertEqual(9000, conf.mtu)
def test_set_driver_iommu_for_sev(self):
conf = fake_libvirt_data.fake_kvm_guest()
designer.set_driver_iommu_for_sev(conf)
# All disks/interfaces/memballoon are expected to be virtio,
# thus driver_iommu should be on
self.assertEqual(9, len(conf.devices))
for i in (0, 2, 3, 8):
dev = conf.devices[i]
self.assertTrue(
dev.driver_iommu,
"expected device %d to have driver_iommu enabled\n%s" %
(i, dev.to_xml()))
for i in (1, 4, 6):
dev = conf.devices[i]
self.assertFalse(
dev.driver_iommu,
"didn't expect device %i to have driver_iommu enabled\n%s" %
(i, dev.to_xml()))

View File

@ -14,6 +14,7 @@
# under the License.
import binascii
from collections import defaultdict
from collections import deque
from collections import OrderedDict
import contextlib
@ -87,7 +88,7 @@ from nova.tests.unit import fake_diagnostics
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
import nova.tests.unit.image.fake as fake_image
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_diagnostics
from nova.tests.unit.objects import test_pci_device
@ -105,10 +106,12 @@ from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt.host import SEV_KERNEL_PARAM_FILE
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import migration as libvirt_migrate
@ -2531,11 +2534,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def _test_get_guest_memory_backing_config(
self, host_topology, inst_topology, numatune):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
with mock.patch.object(
drvr, "_get_host_numa_topology",
return_value=host_topology):
return drvr._get_guest_memory_backing_config(
inst_topology, numatune, {})
inst_topology, numatune, flavor, image_meta)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@ -2596,16 +2603,212 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsNone(result)
def test_get_guest_memory_backing_config_realtime(self):
flavor = {"extra_specs": {
extra_specs = {
"hw:cpu_realtime": "yes",
"hw:cpu_policy": "dedicated"
}}
}
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
membacking = drvr._get_guest_memory_backing_config(
None, None, flavor)
None, None, flavor, image_meta)
self.assertTrue(membacking.locked)
self.assertFalse(membacking.sharedpages)
def _test_sev_enabled(self, expected=None, host_sev_enabled=False,
enc_extra_spec=None, enc_image_prop=None,
hw_machine_type=None, hw_firmware_type=None):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._host._supports_amd_sev = host_sev_enabled
extra_specs = {}
if enc_extra_spec is not None:
extra_specs['hw:mem_encryption'] = enc_extra_spec
flavor = objects.Flavor(name='m1.fake')
flavor.extra_specs = extra_specs
image_props = {}
if hw_machine_type is not None:
image_props['hw_machine_type'] = hw_machine_type
if hw_firmware_type is not None:
image_props['hw_firmware_type'] = hw_firmware_type
if enc_image_prop is not None:
image_props['hw_mem_encryption'] = enc_image_prop
image_meta = fake_image.fake_image_obj(
{'id': '150d530b-1c57-4367-b754-1f1b5237923d'},
{}, image_props)
enabled = drvr._sev_enabled(flavor, image_meta)
if expected is None:
self.fail("_test_sev_enabled called without an expected "
"return value. Maybe you expected an exception?")
self.assertEqual(expected, enabled)
def test_sev_enabled_no_host_support(self):
self._test_sev_enabled(False)
def test_sev_enabled_host_support_no_flavor_image(self):
self._test_sev_enabled(False, host_sev_enabled=True)
def test_sev_enabled_no_host_support_flavor_requested(self):
self._test_sev_enabled(False, enc_extra_spec=True)
def test_sev_enabled_no_host_support_image_requested(self):
self._test_sev_enabled(False, enc_image_prop=True)
def test_sev_enabled_host_support_flavor_requested(self):
self._test_sev_enabled(True, host_sev_enabled=True,
enc_extra_spec=True, hw_firmware_type='uefi',
hw_machine_type='q35')
def test_sev_enabled_host_support_image_requested(self):
self._test_sev_enabled(True, host_sev_enabled=True,
enc_image_prop=True, hw_firmware_type='uefi',
hw_machine_type='q35')
# The cases where the flavor and image requests contradict each other
# are already covered by test_hardware.MemEncryptionConflictTestCase
# so we don't need to test them in great detail here.
def test_sev_enabled_host_extra_spec_image_conflict(self):
exc = self.assertRaises(exception.FlavorImageConflict,
self._test_sev_enabled,
host_sev_enabled=True, enc_extra_spec=False,
enc_image_prop=True)
self.assertEqual(
"Flavor m1.fake has hw:mem_encryption extra spec explicitly set "
"to False, conflicting with image fake_image which has "
"hw_mem_encryption property explicitly set to True", str(exc))
def test_sev_enabled_host_extra_spec_no_uefi(self):
exc = self.assertRaises(exception.FlavorImageConflict,
self._test_sev_enabled,
host_sev_enabled=True, enc_extra_spec=True)
self.assertEqual(
"Memory encryption requested by hw:mem_encryption extra spec in "
"m1.fake flavor but image fake_image doesn't have "
"'hw_firmware_type' property set to 'uefi'", str(exc))
def test_sev_enabled_host_extra_spec_no_machine_type(self):
exc = self.assertRaises(exception.InvalidMachineType,
self._test_sev_enabled,
host_sev_enabled=True, enc_extra_spec=True,
hw_firmware_type='uefi')
self.assertEqual(
"Machine type 'pc' is not compatible with image fake_image "
"(150d530b-1c57-4367-b754-1f1b5237923d): q35 type is required "
"for SEV to work", str(exc))
def test_sev_enabled_host_extra_spec_pc(self):
exc = self.assertRaises(exception.InvalidMachineType,
self._test_sev_enabled,
host_sev_enabled=True, enc_extra_spec=True,
hw_firmware_type='uefi', hw_machine_type='pc')
self.assertEqual(
"Machine type 'pc' is not compatible with image fake_image "
"(150d530b-1c57-4367-b754-1f1b5237923d): q35 type is required "
"for SEV to work", str(exc))
def _setup_fake_domain_caps(self, fake_domain_caps):
sev_feature = vconfig.LibvirtConfigDomainCapsFeatureSev()
sev_feature.cbitpos = 47
sev_feature.reduced_phys_bits = 1
domain_caps = vconfig.LibvirtConfigDomainCaps()
domain_caps._features = vconfig.LibvirtConfigDomainCapsFeatures()
domain_caps._features.features = [sev_feature]
fake_domain_caps.return_value = defaultdict(
dict, {'x86_64': {'q35': domain_caps}})
@mock.patch.object(host.Host, 'get_domain_capabilities')
def test_find_sev_feature_missing_arch(self, fake_domain_caps):
self._setup_fake_domain_caps(fake_domain_caps)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertIsNone(drvr._find_sev_feature('arm1', 'q35'))
@mock.patch.object(host.Host, 'get_domain_capabilities')
def test_find_sev_feature_missing_mach_type(self, fake_domain_caps):
self._setup_fake_domain_caps(fake_domain_caps)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertIsNone(drvr._find_sev_feature('x86_64', 'g3beige'))
@mock.patch.object(host.Host, 'get_domain_capabilities')
def test_find_sev_feature(self, fake_domain_caps):
self._setup_fake_domain_caps(fake_domain_caps)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
feature = drvr._find_sev_feature('x86_64', 'q35')
self.assertIsInstance(feature,
vconfig.LibvirtConfigDomainCapsFeatureSev)
self.assertEqual(47, feature.cbitpos)
self.assertEqual(1, feature.reduced_phys_bits)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_has_uefi_support", new=mock.Mock(return_value=True))
def _setup_sev_guest(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._host._supports_amd_sev = True
ctxt = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
extra_specs = {
"hw:mem_encryption": True,
}
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict({
'id': 'd9c6aeee-8258-4bdb-bca4-39940461b182',
'name': 'fakeimage',
'disk_format': 'raw',
'properties': {'hw_firmware_type': 'uefi',
'hw_machine_type': 'q35'}
})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
return drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info,
context=ctxt)
def test_get_guest_config_sev_no_feature(self):
self.assertRaises(exception.MissingDomainCapabilityFeatureException,
self._setup_sev_guest)
@mock.patch.object(host.Host, 'get_domain_capabilities')
@mock.patch.object(designer, 'set_driver_iommu_for_sev')
def test_get_guest_config_sev(self, mock_designer, fake_domain_caps):
self._setup_fake_domain_caps(fake_domain_caps)
cfg = self._setup_sev_guest()
# SEV-related tag should be set
self.assertIsInstance(cfg.launch_security,
vconfig.LibvirtConfigGuestSEVLaunchSecurity)
self.assertIsInstance(cfg.membacking,
vconfig.LibvirtConfigGuestMemoryBacking)
self.assertTrue(cfg.membacking.locked)
mock_designer.assert_called_once_with(cfg)
def test_get_guest_memory_backing_config_file_backed(self):
self.flags(file_backed_memory=1024, group="libvirt")
@ -5822,6 +6025,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[5].rate_period, 2)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
@test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
def test_get_guest_config_with_rng_backend(self, mock_path):
self.flags(virt_type='kvm',
rng_dev_path='/dev/hw_rng',
@ -6500,6 +6704,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
@mock.patch('os.path.exists', return_value=True)
@test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
def test_get_guest_config_aarch64(self, mock_path_exists,
mock_numa, mock_storage, mock_get_arch):
def get_host_capabilities_stub(self):
@ -6554,6 +6759,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
@mock.patch('os.path.exists', return_value=True)
@test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
def test_get_guest_config_aarch64_with_graphics(self, mock_path_exists,
mock_numa, mock_storage,
mock_get_arch):
@ -6592,26 +6798,33 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(usbhost_exists)
self.assertTrue(keyboard_exists)
def test_get_guest_config_machine_type_through_image_meta(self):
self.flags(virt_type="kvm",
group='libvirt')
def _get_guest_config_machine_type_through_image_meta(self, mach_type):
self.flags(virt_type="kvm", group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_machine_type":
"fake_machine_type"}})
"properties": {"hw_machine_type": mach_type}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
return drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
def test_get_guest_config_machine_type_through_image_meta(self):
cfg = self._get_guest_config_machine_type_through_image_meta(
"fake_machine_type")
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def test_get_guest_config_machine_type_through_image_meta_sev(self):
fake_q35 = "fake-q35-2.11"
cfg = self._get_guest_config_machine_type_through_image_meta(fake_q35)
self.assertEqual(cfg.os_mach_type, fake_q35)
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],

View File

@ -993,3 +993,10 @@ sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
group="libvirt", hw_machine_type=['x86_64=q35', 'foo']))
self.assertEqual('q35',
libvirt_utils.get_default_machine_type('x86_64'))
def test_get_machine_type_from_image(self):
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw", "properties": {"hw_machine_type": "q35"}
})
os_mach_type = libvirt_utils.get_machine_type(image_meta)
self.assertEqual('q35', os_mach_type)

View File

@ -596,6 +596,9 @@ class LibvirtVifTestCase(test.NoDBTestCase):
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
if image_meta is None:
image_meta = objects.ImageMeta.from_dict({})
conf = self._get_conf()
hostimpl = host.Host("qemu:///system")
with mock.patch.object(hostimpl, 'has_min_version',

View File

@ -1137,7 +1137,7 @@ def _get_flavor_image_meta(key, flavor, image_meta, default=None):
return flavor_policy, image_policy
def get_mem_encryption_constraint(flavor, image_meta):
def get_mem_encryption_constraint(flavor, image_meta, machine_type=None):
"""Return a boolean indicating whether encryption of guest memory was
requested, either via the hw:mem_encryption extra spec or the
hw_mem_encryption image property (or both).
@ -1156,12 +1156,16 @@ def get_mem_encryption_constraint(flavor, image_meta):
3) the flavor and/or image request memory encryption, but the
machine type is set to a value which does not contain 'q35'
This is called from the API layer, so get_machine_type() cannot be
called since it relies on being run from the compute node in order
to retrieve CONF.libvirt.hw_machine_type.
This can be called from the libvirt driver on the compute node, in
which case the driver should pass the result of
nova.virt.libvirt.utils.get_machine_type() as the machine_type
parameter, or from the API layer, in which case get_machine_type()
cannot be called since it relies on being run from the compute
node in order to retrieve CONF.libvirt.hw_machine_type.
:param instance_type: Flavor object
:param image: an ImageMeta object
:param machine_type: a string representing the machine type (optional)
:raises: nova.exception.FlavorImageConflict
:raises: nova.exception.InvalidMachineType
:returns: boolean indicating whether encryption of guest memory
@ -1196,7 +1200,7 @@ def get_mem_encryption_constraint(flavor, image_meta):
image_meta.name)
_check_mem_encryption_uses_uefi_image(requesters, image_meta)
_check_mem_encryption_machine_type(image_meta)
_check_mem_encryption_machine_type(image_meta, machine_type)
LOG.debug("Memory encryption requested by %s", " and ".join(requesters))
return True
@ -1236,7 +1240,7 @@ def _check_mem_encryption_uses_uefi_image(requesters, image_meta):
raise exception.FlavorImageConflict(emsg % data)
def _check_mem_encryption_machine_type(image_meta):
def _check_mem_encryption_machine_type(image_meta, machine_type=None):
# NOTE(aspiers): As explained in the SEV spec, SEV needs a q35
# machine type in order to bind all the virtio devices to the PCIe
# bridge so that they use virtio 1.0 and not virtio 0.9, since
@ -1247,10 +1251,12 @@ def _check_mem_encryption_machine_type(image_meta):
# So if the image explicitly requests a machine type which is not
# in the q35 family, raise an exception.
#
# Note that this check occurs at API-level, therefore we can't
# check here what value of CONF.libvirt.hw_machine_type may have
# been configured on the compute node.
mach_type = image_meta.properties.get('hw_machine_type')
# This check can be triggered both at API-level, at which point we
# can't check here what value of CONF.libvirt.hw_machine_type may
# have been configured on the compute node, and by the libvirt
# driver, in which case the driver can check that config option
# and will pass the machine_type parameter.
mach_type = machine_type or image_meta.properties.get('hw_machine_type')
# If hw_machine_type is not specified on the image and is not
# configured correctly on SEV compute nodes, then a separate check

View File

@ -2991,6 +2991,7 @@ class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
super(LibvirtConfigGuestRng, self).__init__(root_name="rng",
**kwargs)
self.device_model = 'virtio'
self.model = 'random'
self.backend = None
self.rate_period = None
@ -2999,7 +3000,7 @@ class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
def format_dom(self):
dev = super(LibvirtConfigGuestRng, self).format_dom()
dev.set('model', 'virtio')
dev.set('model', self.device_model)
backend = etree.Element("backend")
backend.set("model", self.model)

View File

@ -21,6 +21,7 @@ classes based on common operational needs / policies
from nova.pci import utils as pci_utils
from nova.virt.libvirt import config
MIN_LIBVIRT_ETHERNET_SCRIPT_PATH_NONE = (1, 3, 3)
@ -196,3 +197,17 @@ def set_vcpu_realtime_scheduler(conf, vcpus_rt, priority):
conf.vcpus = vcpus_rt
conf.scheduler = "fifo"
conf.priority = priority
def set_driver_iommu_for_sev(conf):
virtio_attrs = {
config.LibvirtConfigGuestDisk: 'target_bus',
config.LibvirtConfigGuestInterface: 'model',
config.LibvirtConfigGuestRng: 'device_model',
config.LibvirtConfigMemoryBalloon: 'model',
}
for dev in conf.devices:
virtio_attr = virtio_attrs.get(dev.__class__)
if virtio_attr and getattr(dev, virtio_attr) == 'virtio':
dev.driver_iommu = True

View File

@ -4888,7 +4888,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._add_rng_device(guest, flavor)
def _get_guest_memory_backing_config(
self, inst_topology, numatune, flavor):
self, inst_topology, numatune, flavor, image_meta):
wantsmempages = False
if inst_topology:
for cell in inst_topology.cells:
@ -4928,6 +4928,10 @@ class LibvirtDriver(driver.ComputeDriver):
MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION,
MIN_QEMU_FILE_BACKED_DISCARD_VERSION):
membacking.discard = True
if self._sev_enabled(flavor, image_meta):
if not membacking:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
return membacking
@ -5034,7 +5038,8 @@ class LibvirtDriver(driver.ComputeDriver):
return True
def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
image_meta, flavor, root_device_name):
image_meta, flavor, root_device_name,
sev_enabled):
if virt_type == "xen":
if guest.os_type == fields.VMMode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
@ -5367,7 +5372,7 @@ class LibvirtDriver(driver.ComputeDriver):
guest.membacking = self._get_guest_memory_backing_config(
instance.numa_topology,
guest_numa_config.numatune,
flavor)
flavor, image_meta)
guest.metadata.append(self._get_guest_config_meta(instance))
guest.idmaps = self._get_guest_idmaps()
@ -5399,9 +5404,11 @@ class LibvirtDriver(driver.ComputeDriver):
self._get_guest_os_type(virt_type))
caps = self._host.get_capabilities()
sev_enabled = self._sev_enabled(flavor, image_meta)
self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
image_meta, flavor,
root_device_name)
root_device_name, sev_enabled)
if virt_type not in ('lxc', 'uml'):
self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info)
@ -5457,8 +5464,92 @@ class LibvirtDriver(driver.ComputeDriver):
if mdevs:
self._guest_add_mdevs(guest, mdevs)
if sev_enabled:
self._guest_configure_sev(guest, caps.host.cpu.arch,
guest.os_mach_type)
return guest
def _sev_enabled(self, flavor, image_meta):
"""To enable AMD SEV, the following should be true:
a) the supports_amd_sev instance variable in the host is
true,
b) the instance extra specs and/or image properties request
memory encryption to be enabled, and
c) there are no conflicts between extra specs, image properties
and machine type selection.
Most potential conflicts in c) should already be caught in the
API layer. However there is still one remaining case which
needs to be handled here: when the image does not contain an
hw_machine_type property, the machine type will be chosen from
CONF.libvirt.hw_machine_type if configured, otherwise falling
back to the hardcoded value which is currently 'pc'. If it
ends up being 'pc' or another value not in the q35 family, we
need to raise an exception. So calculate the machine type and
pass it to be checked alongside the other sanity checks which
are run while determining whether SEV is selected.
"""
if not self._host.supports_amd_sev:
return False
mach_type = libvirt_utils.get_machine_type(image_meta)
return hardware.get_mem_encryption_constraint(flavor, image_meta,
mach_type)
def _guest_configure_sev(self, guest, arch, mach_type):
sev = self._find_sev_feature(arch, mach_type)
if sev is None:
# In theory this should never happen because it should
# only get called if SEV was requested, in which case the
# guest should only get scheduled on this host if it
# supports SEV, and SEV support is dependent on the
# presence of this <sev> feature. That said, it's
# conceivable that something could get messed up along the
# way, e.g. a mismatch in the choice of machine type. So
# make sure that if it ever does happen, we at least get a
# helpful error rather than something cryptic like
# "AttributeError: 'NoneType' object has no attribute 'cbitpos'
raise exception.MissingDomainCapabilityFeatureException(
feature='sev')
designer.set_driver_iommu_for_sev(guest)
self._guest_add_launch_security(guest, sev)
def _guest_add_launch_security(self, guest, sev):
launch_security = vconfig.LibvirtConfigGuestSEVLaunchSecurity()
launch_security.cbitpos = sev.cbitpos
launch_security.reduced_phys_bits = sev.reduced_phys_bits
guest.launch_security = launch_security
def _find_sev_feature(self, arch, mach_type):
"""Search domain capabilities for the given arch and machine type
for the <sev> element under <features>, and return it if found.
"""
domain_caps = self._host.get_domain_capabilities()
if arch not in domain_caps:
LOG.warning(
"Wanted to add SEV to config for guest with arch %(arch)s "
"but only had domain capabilities for: %(archs)s",
{'arch': arch, 'archs': ' '.join(domain_caps)})
return None
if mach_type not in domain_caps[arch]:
LOG.warning(
"Wanted to add SEV to config for guest with machine type "
"%(mtype)s but for arch %(arch)s only had domain capabilities "
"for machine types: %(mtypes)s",
{'mtype': mach_type, 'arch': arch,
'mtypes': ' '.join(domain_caps[arch])})
return None
for feature in domain_caps[arch][mach_type].features:
if feature.root_name == 'sev':
return feature
return None
def _guest_add_mdevs(self, guest, chosen_mdevs):
for chosen_mdev in chosen_mdevs:
mdev = vconfig.LibvirtConfigGuestHostdevMDEV()