libvirt: Use firmware metadata files to configure instance

We now have the machinery in place to parse QEMU's firmware metadata
files, which means we no longer need to store a hardcoded list of
possible firmware locations nor build upon this to include additional
information like "does this support secure boot". Start using this and
cut out the legacy stuff.

Eventually all of this will be changed yet again in favour of libvirt's
firmware auto-selection functionality, but that needs a little more work
before it's suitable for us [1].

[1] https://bugzilla.redhat.com/show_bug.cgi?id=1906500

Blueprint: allow-secure-boot-for-qemu-kvm-guests
Change-Id: Ie99e43cb0408eae4034d410b9dd204cd39984fd1
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2021-02-12 19:16:03 +00:00
parent 452d2fb3a0
commit 9fff6893ce
5 changed files with 134 additions and 146 deletions

View File

@ -21,6 +21,7 @@ import typing as ty
import fixtures
from lxml import etree
import mock
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
@ -1963,13 +1964,60 @@ class FakeLibvirtFixture(fixtures.Fixture):
self.useFixture(
fixtures.MockPatch('os.uname', return_value=fake_uname))
# Ensure UEFI checks don't actually check the host
def fake_has_uefi_support():
return os.uname().machine == obj_fields.Architecture.AARCH64
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.LibvirtDriver._has_uefi_support',
side_effect=fake_has_uefi_support))
# ...and on all machine types
fake_loaders = [
{
'description': 'UEFI firmware for x86_64',
'interface-types': ['uefi'],
'mapping': {
'device': 'flash',
'executable': {
'filename': '/usr/share/OVMF/OVMF_CODE.fd',
'format': 'raw',
},
'nvram-template': {
'filename': '/usr/share/OVMF/OVMF_VARS.fd',
'format': 'raw',
},
},
'targets': [
{
'architecture': 'x86_64',
'machines': ['pc-i440fx-*', 'pc-q35-*'],
},
],
'features': ['acpi-s3', 'amd-sev', 'verbose-dynamic'],
'tags': [],
},
{
'description': 'UEFI firmware for aarch64',
'interface-types': ['uefi'],
'mapping': {
'device': 'flash',
'executable': {
'filename': '/usr/share/AAVMF/AAVMF_CODE.fd',
'format': 'raw',
},
'nvram-template': {
'filename': '/usr/share/AAVMF/AAVMF_VARS.fd',
'format': 'raw',
}
},
'targets': [
{
'architecture': 'aarch64',
'machines': ['virt-*'],
}
],
'features': ['verbose-static'],
"tags": [],
}
]
self.useFixture(
fixtures.MockPatch(
'nova.virt.libvirt.host.Host.loaders',
new_callable=mock.PropertyMock,
return_value=fake_loaders))
disable_event_thread(self)

View File

@ -2459,7 +2459,6 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
xml = obj.to_xml()
self.assertXmlEqual(
xml,
"""
<domain type="kvm">
<uuid>f01cf68d-515c-4daf-b85f-ef1424d93bfc</uuid>
@ -2471,6 +2470,7 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
<loader secure='yes' readonly='yes' type='pflash'>/tmp/OVMF_CODE.secboot.fd</loader>
</os>
</domain>""", # noqa: E501
xml,
)
def _test_config_uefi_autoconfigure(self, secure):

View File

@ -3466,6 +3466,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
domain_caps = vconfig.LibvirtConfigDomainCaps()
domain_caps._features = vconfig.LibvirtConfigDomainCapsFeatures()
domain_caps._features.features = [sev_feature]
domain_caps._os = vconfig.LibvirtConfigDomainCapsOS()
domain_caps._os.loader_paths = ['foo']
fake_domain_caps.return_value = collections.defaultdict(
dict, {'x86_64': {'q35': domain_caps}})
@ -3491,10 +3494,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(47, feature.cbitpos)
self.assertEqual(1, feature.reduced_phys_bits)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_has_uefi_support", new=mock.Mock(return_value=True))
def _setup_sev_guest(self, extra_image_properties=None):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._host._supports_uefi = True
drvr._host._supports_amd_sev = True
ctxt = context.RequestContext(project_id=123,
@ -5260,21 +5262,18 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"properties": {"hw_firmware_type": "uefi"}})
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(drvr, "_has_uefi_support",
return_value=True) as mock_support:
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
mock_support.assert_called_once_with()
self.assertEqual(cfg.os_loader_type, "pflash")
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance_ref, image_meta)
cfg = drvr._get_guest_config(
instance_ref, [], image_meta, disk_info)
# these values are derived from the FakeLibvirtFixture
self.assertEqual('/usr/share/OVMF/OVMF_CODE.fd', cfg.os_loader)
self.assertEqual('/usr/share/OVMF/OVMF_VARS.fd', cfg.os_nvram_template)
def test_check_uefi_support_aarch64(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.AARCH64)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._has_uefi_support = mock.Mock(return_value=True)
self.assertTrue(drvr._check_uefi_support(None))
def test_get_guest_config_with_block_device(self):
@ -6229,8 +6228,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.S390X)
self._stub_host_capabilities_cpu_arch(fields.Architecture.S390X)
instance_ref = objects.Instance(**self.test_instance)
cfg = self._get_guest_config_via_fake_api(instance_ref)
@ -6246,19 +6243,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual("pty", terminal_device.type)
self.assertEqual("s390-ccw-virtio", cfg.os_mach_type)
def _stub_host_capabilities_cpu_arch(self, cpu_arch):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = cpu_arch
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stub_out('nova.virt.libvirt.host.Host.get_capabilities',
get_host_capabilities_stub)
def _get_guest_config_via_fake_api(self, instance):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
@ -6644,7 +6628,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(host.Host, 'get_domain_capabilities')
@mock.patch.object(designer, 'set_driver_iommu_for_sev')
def test_get_guest_config_with_qga_through_image_meta_with_sev(
self, mock_designer, fake_domain_caps):
self, mock_designer, fake_domain_caps,
):
self._setup_fake_domain_caps(fake_domain_caps)
extra_properties = {"hw_qemu_guest_agent": "yes"}
cfg = self._setup_sev_guest(extra_properties)
@ -7678,9 +7663,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
_fake_network_info(self),
image_meta, disk_info)
self.assertTrue(mock_path_exists.called)
expected = mock.call(libvirt_driver.
DEFAULT_UEFI_LOADER_PATH['aarch64'][0])
self.assertIn(expected, mock_path_exists.mock_calls)
self.assertEqual(cfg.os_mach_type, "virt")
num_ports = 0
@ -7711,9 +7693,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = self._get_guest_config_with_graphics()
self.assertTrue(mock_path_exists.called)
expected = mock.call(libvirt_driver.
DEFAULT_UEFI_LOADER_PATH['aarch64'][0])
self.assertIn(expected, mock_path_exists.mock_calls)
self.assertEqual(cfg.os_mach_type, "virt")
usbhost_exists = False
@ -7972,27 +7951,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_host_passthrough(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-passthrough", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_host_passthrough_aarch64(self):
def test_get_guest_cpu_config_automatic(self):
expected = {
fields.Architecture.X86_64: "host-model",
fields.Architecture.I686: "host-model",
@ -8016,34 +7975,32 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, expect_mode)
def test_get_guest_cpu_config_host_model(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
def test_get_guest_cpu_config_manual(self):
for mode in ('host-passthrough', 'host-model'):
self.flags(cpu_mode=mode, group='libvirt')
self.flags(cpu_mode="host-model", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance_ref, image_meta)
conf = drvr._get_guest_config(
instance_ref, _fake_network_info(self), image_meta, disk_info)
self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, mode)
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_custom(self):
self.flags(cpu_mode="custom", cpu_models=["Penryn"], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
@ -16556,7 +16513,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=False)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
@ -16579,7 +16535,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=False)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
@ -16605,7 +16560,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=False)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=
hardware.InstanceInfo(state=power_state.SHUTDOWN, internal_id=-1)
@ -16631,7 +16585,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=True)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=hardware.InstanceInfo(
state=power_state.SHUTDOWN, internal_id=-1))
@ -16660,7 +16613,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._host._get_domain = mock.Mock(return_value=mock_domain)
drvr._has_uefi_support = mock.Mock(return_value=True)
drvr.delete_instance_files = mock.Mock(return_value=None)
drvr.get_info = mock.Mock(return_value=hardware.InstanceInfo(
state=power_state.SHUTDOWN, internal_id=-1))

View File

@ -84,6 +84,9 @@ class LibvirtConfigObject(object):
pretty_print=pretty_print)
return xml_str
def __repr__(self):
return self.to_xml(pretty_print=False)
class LibvirtConfigCaps(LibvirtConfigObject):
@ -2784,6 +2787,8 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.os_firmware = None
self.os_loader_type = None
self.os_loader_secure = None
self.os_nvram = None
self.os_nvram_template = None
self.os_kernel = None
self.os_initrd = None
self.os_cmdline = None
@ -2841,20 +2846,28 @@ class LibvirtConfigGuest(LibvirtConfigObject):
if self.os_kernel is not None:
os.append(self._text_node("kernel", self.os_kernel))
# Generate XML nodes for UEFI boot.
if (
self.os_loader is not None or
self.os_loader_type is not None or
self.os_loader_secure is not None
):
loader = self._text_node("loader", self.os_loader)
if self.os_loader_type is not None:
loader.set("type", "pflash")
loader.set("type", self.os_loader_type)
loader.set("readonly", "yes")
if self.os_loader_secure is not None:
loader.set(
"secure", self.get_yes_no_str(self.os_loader_secure))
os.append(loader)
if (
self.os_nvram is not None or
self.os_nvram_template is not None
):
nvram = self._text_node("nvram", self.os_nvram)
nvram.set("template", self.os_nvram_template)
os.append(nvram)
if self.os_initrd is not None:
os.append(self._text_node("initrd", self.os_initrd))
if self.os_cmdline is not None:

View File

@ -140,14 +140,6 @@ LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
DEFAULT_UEFI_LOADER_PATH = {
"x86_64": ['/usr/share/OVMF/OVMF_CODE.fd',
'/usr/share/OVMF/OVMF_CODE.secboot.fd',
'/usr/share/qemu/ovmf-x86_64-code.bin'],
"aarch64": ['/usr/share/AAVMF/AAVMF_CODE.fd',
'/usr/share/qemu/aavmf-aarch64-code.bin']
}
MAX_CONSOLE_BYTES = 100 * units.Ki
VALID_DISK_CACHEMODES = [
"default", "none", "writethrough", "writeback", "directsync", "unsafe",
@ -5819,40 +5811,12 @@ class LibvirtDriver(driver.ComputeDriver):
return flavor
return instance.flavor
def _has_uefi_support(self):
# This means that the host can support UEFI booting for guests
supported_archs = [fields.Architecture.X86_64,
fields.Architecture.AARCH64]
caps = self._host.get_capabilities()
# TODO(dmllr, kchamart): Get rid of probing the OVMF binary file
# paths, it is not robust, because nothing but the binary's
# filename is reported, which means you have to detect its
# architecture and features by other means. To solve this,
# query the libvirt's getDomainCapabilities() to get the
# firmware paths (as reported in the 'loader' value). Nova now
# has a wrapper method for this, get_domain_capabilities().
# This is a more reliable way to detect UEFI boot support.
#
# Further, with libvirt 5.3 onwards, support for UEFI boot is
# much more simplified by the "firmware auto-selection" feature.
# When using this, Nova doesn't need to query OVMF file paths at
# all; libvirt will take care of it. This is done by taking
# advantage of the so-called firmware "descriptor files" --
# small JSON files (which will be shipped by Linux
# distributions) that describe a UEFI firmware binary's
# "characteristics", such as the binary's file path, its
# features, architecture, supported machine type, NVRAM template
# and so forth.
return ((caps.host.cpu.arch in supported_archs) and
any((os.path.exists(p)
for p in DEFAULT_UEFI_LOADER_PATH[caps.host.cpu.arch])))
def _check_uefi_support(self, hw_firmware_type):
caps = self._host.get_capabilities()
return (self._has_uefi_support() and
(hw_firmware_type == fields.FirmwareType.UEFI or
caps.host.cpu.arch == fields.Architecture.AARCH64))
return self._host.supports_uefi and (
hw_firmware_type == fields.FirmwareType.UEFI or
caps.host.cpu.arch == fields.Architecture.AARCH64
)
def _get_supported_perf_events(self):
if not len(CONF.libvirt.enabled_perf_events):
@ -5891,6 +5855,9 @@ class LibvirtDriver(driver.ComputeDriver):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
mach_type = libvirt_utils.get_machine_type(image_meta)
guest.os_mach_type = mach_type
hw_firmware_type = image_meta.properties.get('hw_firmware_type')
if arch == fields.Architecture.AARCH64:
@ -5898,22 +5865,30 @@ class LibvirtDriver(driver.ComputeDriver):
hw_firmware_type = fields.FirmwareType.UEFI
if hw_firmware_type == fields.FirmwareType.UEFI:
if self._has_uefi_support():
global uefi_logged
if not uefi_logged:
LOG.warning("uefi support is without some kind of "
"functional testing and therefore "
"considered experimental.")
uefi_logged = True
for lpath in DEFAULT_UEFI_LOADER_PATH[arch]:
if os.path.exists(lpath):
guest.os_loader = lpath
guest.os_loader_type = "pflash"
else:
global uefi_logged
if not uefi_logged:
LOG.warning("uefi support is without some kind of "
"functional testing and therefore "
"considered experimental.")
uefi_logged = True
if not self._host.supports_uefi:
raise exception.UEFINotSupported()
mtype = libvirt_utils.get_machine_type(image_meta)
guest.os_mach_type = mtype
# TODO(stephenfin): Drop this when we drop support for legacy
# architectures
if not mach_type:
# loaders are specific to arch and machine type - if we
# don't have a machine type here, we're on a legacy
# architecture that we have no default machine type for
raise exception.UEFINotSupported()
loader, nvram_template = self._host.get_loader(
arch, mach_type, has_secure_boot=False)
guest.os_loader = loader
guest.os_loader_type = 'pflash'
guest.os_nvram_template = nvram_template
# NOTE(lyarwood): If the machine type isn't recorded in the stashed
# image metadata then record it through the system metadata table.
@ -5925,7 +5900,7 @@ class LibvirtDriver(driver.ComputeDriver):
# nova.objects.ImageMeta.from_instance and the
# nova.utils.get_image_from_system_metadata function.
if image_meta.properties.get('hw_machine_type') is None:
instance.system_metadata['image_hw_machine_type'] = mtype
instance.system_metadata['image_hw_machine_type'] = mach_type
if image_meta.properties.get('hw_boot_menu') is None:
guest.os_bootmenu = strutils.bool_from_string(