Browse Source

Merge "Apply SEV-specific guest config when SEV is required"

changes/47/678447/13
Zuul 1 week ago
parent
commit
95d190b0d8

+ 5
- 0
nova/exception.py View File

@@ -2460,6 +2460,11 @@ class FlavorImageConflict(NovaException):
2460 2460
                 "(%(flavor_val)s) and the image (%(image_val)s).")
2461 2461
 
2462 2462
 
2463
+class MissingDomainCapabilityFeatureException(NovaException):
2464
+    msg_fmt = _("Guest config could not be built without domain capabilities "
2465
+                "including <%(feature)s> feature.")
2466
+
2467
+
2463 2468
 class HealPortAllocationException(NovaException):
2464 2469
     msg_fmt = _("Healing port allocation failed.")
2465 2470
 

+ 84
- 4
nova/tests/unit/virt/libvirt/fake_libvirt_data.py View File

@@ -87,13 +87,69 @@ def fake_kvm_guest():
87 87
     obj.sysinfo.bios_vendor = "Acme"
88 88
     obj.sysinfo.system_version = "1.0.0"
89 89
 
90
+    # obj.devices[0]
90 91
     disk = config.LibvirtConfigGuestDisk()
91 92
     disk.source_type = "file"
92
-    disk.source_path = "/tmp/img"
93
-    disk.target_dev = "/dev/vda"
93
+    disk.source_path = "/tmp/disk-img"
94
+    disk.target_dev = "vda"
94 95
     disk.target_bus = "virtio"
95 96
     obj.add_device(disk)
96 97
 
98
+    # obj.devices[1]
99
+    disk = config.LibvirtConfigGuestDisk()
100
+    disk.source_device = "cdrom"
101
+    disk.source_type = "file"
102
+    disk.source_path = "/tmp/cdrom-img"
103
+    disk.target_dev = "sda"
104
+    disk.target_bus = "sata"
105
+    obj.add_device(disk)
106
+
107
+    # obj.devices[2]
108
+    intf = config.LibvirtConfigGuestInterface()
109
+    intf.net_type = "network"
110
+    intf.mac_addr = "52:54:00:f6:35:8f"
111
+    intf.model = "virtio"
112
+    intf.source_dev = "virbr0"
113
+    obj.add_device(intf)
114
+
115
+    # obj.devices[3]
116
+    balloon = config.LibvirtConfigMemoryBalloon()
117
+    balloon.model = 'virtio'
118
+    balloon.period = 11
119
+    obj.add_device(balloon)
120
+
121
+    # obj.devices[4]
122
+    mouse = config.LibvirtConfigGuestInput()
123
+    mouse.type = "mouse"
124
+    mouse.bus = "virtio"
125
+    obj.add_device(mouse)
126
+
127
+    # obj.devices[5]
128
+    gfx = config.LibvirtConfigGuestGraphics()
129
+    gfx.type = "vnc"
130
+    gfx.autoport = True
131
+    gfx.keymap = "en_US"
132
+    gfx.listen = "127.0.0.1"
133
+    obj.add_device(gfx)
134
+
135
+    # obj.devices[6]
136
+    video = config.LibvirtConfigGuestVideo()
137
+    video.type = 'qxl'
138
+    obj.add_device(video)
139
+
140
+    # obj.devices[7]
141
+    serial = config.LibvirtConfigGuestSerial()
142
+    serial.type = "file"
143
+    serial.source_path = "/tmp/vm.log"
144
+    obj.add_device(serial)
145
+
146
+    # obj.devices[8]
147
+    rng = config.LibvirtConfigGuestRng()
148
+    rng.backend = '/dev/urandom'
149
+    rng.rate_period = '12'
150
+    rng.rate_bytes = '34'
151
+    obj.add_device(rng)
152
+
97 153
     return obj
98 154
 
99 155
 
@@ -151,9 +207,33 @@ FAKE_KVM_GUEST = """
151 207
     </cputune>
152 208
     <devices>
153 209
       <disk type="file" device="disk">
154
-        <source file="/tmp/img"/>
155
-        <target bus="virtio" dev="/dev/vda"/>
210
+        <source file="/tmp/disk-img"/>
211
+        <target bus="virtio" dev="vda"/>
212
+      </disk>
213
+      <disk type="file" device="cdrom">
214
+        <source file="/tmp/cdrom-img"/>
215
+        <target bus="sata" dev="sda"/>
156 216
       </disk>
217
+      <interface type='network'>
218
+        <mac address='52:54:00:f6:35:8f'/>
219
+        <model type='virtio'/>
220
+        <source bridge='virbr0'/>
221
+      </interface>
222
+      <memballoon model='virtio'>
223
+        <stats period='11'/>
224
+      </memballoon>
225
+      <input type="mouse" bus="virtio"/>
226
+      <graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
227
+      <video>
228
+        <model type='qxl'/>
229
+      </video>
230
+      <serial type="file">
231
+        <source path="/tmp/vm.log"/>
232
+      </serial>
233
+      <rng model='virtio'>
234
+          <rate period='12' bytes='34'/>
235
+          <backend model='random'>/dev/urandom</backend>
236
+      </rng>
157 237
     </devices>
158 238
     <launchSecurity type="sev">
159 239
       <policy>0x0033</policy>

+ 22
- 0
nova/tests/unit/virt/libvirt/test_designer.py View File

@@ -17,6 +17,7 @@ import mock
17 17
 from nova.pci import utils as pci_utils
18 18
 from nova import test
19 19
 from nova.tests.unit import matchers
20
+from nova.tests.unit.virt.libvirt import fake_libvirt_data
20 21
 from nova.virt.libvirt import config
21 22
 from nova.virt.libvirt import designer
22 23
 from nova.virt.libvirt import host
@@ -224,3 +225,24 @@ class DesignerTestCase(test.NoDBTestCase):
224 225
         conf = config.LibvirtConfigGuestInterface()
225 226
         designer.set_vif_mtu_config(conf, 9000)
226 227
         self.assertEqual(9000, conf.mtu)
228
+
229
+    def test_set_driver_iommu_for_sev(self):
230
+        conf = fake_libvirt_data.fake_kvm_guest()
231
+        designer.set_driver_iommu_for_sev(conf)
232
+
233
+        # All disks/interfaces/memballoon are expected to be virtio,
234
+        # thus driver_iommu should be on
235
+        self.assertEqual(9, len(conf.devices))
236
+        for i in (0, 2, 3, 8):
237
+            dev = conf.devices[i]
238
+            self.assertTrue(
239
+                dev.driver_iommu,
240
+                "expected device %d to have driver_iommu enabled\n%s" %
241
+                (i, dev.to_xml()))
242
+
243
+        for i in (1, 4, 6):
244
+            dev = conf.devices[i]
245
+            self.assertFalse(
246
+                dev.driver_iommu,
247
+                "didn't expect device %i to have driver_iommu enabled\n%s" %
248
+                (i, dev.to_xml()))

+ 226
- 13
nova/tests/unit/virt/libvirt/test_driver.py View File

@@ -14,6 +14,7 @@
14 14
 #    under the License.
15 15
 
16 16
 import binascii
17
+from collections import defaultdict
17 18
 from collections import deque
18 19
 from collections import OrderedDict
19 20
 import contextlib
@@ -87,7 +88,7 @@ from nova.tests.unit import fake_diagnostics
87 88
 from nova.tests.unit import fake_flavor
88 89
 from nova.tests.unit import fake_instance
89 90
 from nova.tests.unit import fake_network
90
-import nova.tests.unit.image.fake
91
+import nova.tests.unit.image.fake as fake_image
91 92
 from nova.tests.unit import matchers
92 93
 from nova.tests.unit.objects import test_diagnostics
93 94
 from nova.tests.unit.objects import test_pci_device
@@ -105,10 +106,12 @@ from nova.virt.image import model as imgmodel
105 106
 from nova.virt import images
106 107
 from nova.virt.libvirt import blockinfo
107 108
 from nova.virt.libvirt import config as vconfig
109
+from nova.virt.libvirt import designer
108 110
 from nova.virt.libvirt import driver as libvirt_driver
109 111
 from nova.virt.libvirt import firewall
110 112
 from nova.virt.libvirt import guest as libvirt_guest
111 113
 from nova.virt.libvirt import host
114
+from nova.virt.libvirt.host import SEV_KERNEL_PARAM_FILE
112 115
 from nova.virt.libvirt import imagebackend
113 116
 from nova.virt.libvirt import imagecache
114 117
 from nova.virt.libvirt import migration as libvirt_migrate
@@ -2531,11 +2534,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
2531 2534
     def _test_get_guest_memory_backing_config(
2532 2535
             self, host_topology, inst_topology, numatune):
2533 2536
         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
2537
+        flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
2538
+                                ephemeral_gb=8128, swap=33550336, name='fake',
2539
+                                extra_specs={})
2540
+        image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
2534 2541
         with mock.patch.object(
2535 2542
                 drvr, "_get_host_numa_topology",
2536 2543
                 return_value=host_topology):
2537 2544
             return drvr._get_guest_memory_backing_config(
2538
-                inst_topology, numatune, {})
2545
+                inst_topology, numatune, flavor, image_meta)
2539 2546
 
2540 2547
     @mock.patch.object(host.Host,
2541 2548
                        'has_min_version', return_value=True)
@@ -2596,16 +2603,212 @@ class LibvirtConnTestCase(test.NoDBTestCase,
2596 2603
         self.assertIsNone(result)
2597 2604
 
2598 2605
     def test_get_guest_memory_backing_config_realtime(self):
2599
-        flavor = {"extra_specs": {
2606
+        extra_specs = {
2600 2607
             "hw:cpu_realtime": "yes",
2601 2608
             "hw:cpu_policy": "dedicated"
2602
-        }}
2609
+        }
2610
+        flavor = objects.Flavor(name='m1.small',
2611
+                                memory_mb=6,
2612
+                                vcpus=28,
2613
+                                root_gb=496,
2614
+                                ephemeral_gb=8128,
2615
+                                swap=33550336,
2616
+                                extra_specs=extra_specs)
2617
+        image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
2603 2618
         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
2604 2619
         membacking = drvr._get_guest_memory_backing_config(
2605
-            None, None, flavor)
2620
+            None, None, flavor, image_meta)
2606 2621
         self.assertTrue(membacking.locked)
2607 2622
         self.assertFalse(membacking.sharedpages)
2608 2623
 
2624
+    def _test_sev_enabled(self, expected=None, host_sev_enabled=False,
2625
+                          enc_extra_spec=None, enc_image_prop=None,
2626
+                          hw_machine_type=None, hw_firmware_type=None):
2627
+        drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
2628
+        drvr._host._supports_amd_sev = host_sev_enabled
2629
+
2630
+        extra_specs = {}
2631
+        if enc_extra_spec is not None:
2632
+            extra_specs['hw:mem_encryption'] = enc_extra_spec
2633
+        flavor = objects.Flavor(name='m1.fake')
2634
+        flavor.extra_specs = extra_specs
2635
+
2636
+        image_props = {}
2637
+        if hw_machine_type is not None:
2638
+            image_props['hw_machine_type'] = hw_machine_type
2639
+        if hw_firmware_type is not None:
2640
+            image_props['hw_firmware_type'] = hw_firmware_type
2641
+        if enc_image_prop is not None:
2642
+            image_props['hw_mem_encryption'] = enc_image_prop
2643
+
2644
+        image_meta = fake_image.fake_image_obj(
2645
+            {'id': '150d530b-1c57-4367-b754-1f1b5237923d'},
2646
+            {}, image_props)
2647
+
2648
+        enabled = drvr._sev_enabled(flavor, image_meta)
2649
+
2650
+        if expected is None:
2651
+            self.fail("_test_sev_enabled called without an expected "
2652
+                      "return value. Maybe you expected an exception?")
2653
+
2654
+        self.assertEqual(expected, enabled)
2655
+
2656
+    def test_sev_enabled_no_host_support(self):
2657
+        self._test_sev_enabled(False)
2658
+
2659
+    def test_sev_enabled_host_support_no_flavor_image(self):
2660
+        self._test_sev_enabled(False, host_sev_enabled=True)
2661
+
2662
+    def test_sev_enabled_no_host_support_flavor_requested(self):
2663
+        self._test_sev_enabled(False, enc_extra_spec=True)
2664
+
2665
+    def test_sev_enabled_no_host_support_image_requested(self):
2666
+        self._test_sev_enabled(False, enc_image_prop=True)
2667
+
2668
+    def test_sev_enabled_host_support_flavor_requested(self):
2669
+        self._test_sev_enabled(True, host_sev_enabled=True,
2670
+                               enc_extra_spec=True, hw_firmware_type='uefi',
2671
+                               hw_machine_type='q35')
2672
+
2673
+    def test_sev_enabled_host_support_image_requested(self):
2674
+        self._test_sev_enabled(True, host_sev_enabled=True,
2675
+                               enc_image_prop=True, hw_firmware_type='uefi',
2676
+                               hw_machine_type='q35')
2677
+
2678
+    # The cases where the flavor and image requests contradict each other
2679
+    # are already covered by test_hardware.MemEncryptionConflictTestCase
2680
+    # so we don't need to test them in great detail here.
2681
+    def test_sev_enabled_host_extra_spec_image_conflict(self):
2682
+        exc = self.assertRaises(exception.FlavorImageConflict,
2683
+                                self._test_sev_enabled,
2684
+                                host_sev_enabled=True, enc_extra_spec=False,
2685
+                                enc_image_prop=True)
2686
+        self.assertEqual(
2687
+            "Flavor m1.fake has hw:mem_encryption extra spec explicitly set "
2688
+            "to False, conflicting with image fake_image which has "
2689
+            "hw_mem_encryption property explicitly set to True", str(exc))
2690
+
2691
+    def test_sev_enabled_host_extra_spec_no_uefi(self):
2692
+        exc = self.assertRaises(exception.FlavorImageConflict,
2693
+                                self._test_sev_enabled,
2694
+                                host_sev_enabled=True, enc_extra_spec=True)
2695
+        self.assertEqual(
2696
+            "Memory encryption requested by hw:mem_encryption extra spec in "
2697
+            "m1.fake flavor but image fake_image doesn't have "
2698
+            "'hw_firmware_type' property set to 'uefi'", str(exc))
2699
+
2700
+    def test_sev_enabled_host_extra_spec_no_machine_type(self):
2701
+        exc = self.assertRaises(exception.InvalidMachineType,
2702
+                                self._test_sev_enabled,
2703
+                                host_sev_enabled=True, enc_extra_spec=True,
2704
+                                hw_firmware_type='uefi')
2705
+        self.assertEqual(
2706
+            "Machine type 'pc' is not compatible with image fake_image "
2707
+            "(150d530b-1c57-4367-b754-1f1b5237923d): q35 type is required "
2708
+            "for SEV to work", str(exc))
2709
+
2710
+    def test_sev_enabled_host_extra_spec_pc(self):
2711
+        exc = self.assertRaises(exception.InvalidMachineType,
2712
+                                self._test_sev_enabled,
2713
+                                host_sev_enabled=True, enc_extra_spec=True,
2714
+                                hw_firmware_type='uefi', hw_machine_type='pc')
2715
+        self.assertEqual(
2716
+            "Machine type 'pc' is not compatible with image fake_image "
2717
+            "(150d530b-1c57-4367-b754-1f1b5237923d): q35 type is required "
2718
+            "for SEV to work", str(exc))
2719
+
2720
+    def _setup_fake_domain_caps(self, fake_domain_caps):
2721
+        sev_feature = vconfig.LibvirtConfigDomainCapsFeatureSev()
2722
+        sev_feature.cbitpos = 47
2723
+        sev_feature.reduced_phys_bits = 1
2724
+        domain_caps = vconfig.LibvirtConfigDomainCaps()
2725
+        domain_caps._features = vconfig.LibvirtConfigDomainCapsFeatures()
2726
+        domain_caps._features.features = [sev_feature]
2727
+        fake_domain_caps.return_value = defaultdict(
2728
+            dict, {'x86_64': {'q35': domain_caps}})
2729
+
2730
+    @mock.patch.object(host.Host, 'get_domain_capabilities')
2731
+    def test_find_sev_feature_missing_arch(self, fake_domain_caps):
2732
+        self._setup_fake_domain_caps(fake_domain_caps)
2733
+        drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
2734
+        self.assertIsNone(drvr._find_sev_feature('arm1', 'q35'))
2735
+
2736
+    @mock.patch.object(host.Host, 'get_domain_capabilities')
2737
+    def test_find_sev_feature_missing_mach_type(self, fake_domain_caps):
2738
+        self._setup_fake_domain_caps(fake_domain_caps)
2739
+        drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
2740
+        self.assertIsNone(drvr._find_sev_feature('x86_64', 'g3beige'))
2741
+
2742
+    @mock.patch.object(host.Host, 'get_domain_capabilities')
2743
+    def test_find_sev_feature(self, fake_domain_caps):
2744
+        self._setup_fake_domain_caps(fake_domain_caps)
2745
+        drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
2746
+        feature = drvr._find_sev_feature('x86_64', 'q35')
2747
+        self.assertIsInstance(feature,
2748
+                              vconfig.LibvirtConfigDomainCapsFeatureSev)
2749
+        self.assertEqual(47, feature.cbitpos)
2750
+        self.assertEqual(1, feature.reduced_phys_bits)
2751
+
2752
+    @mock.patch.object(libvirt_driver.LibvirtDriver,
2753
+                       "_has_uefi_support", new=mock.Mock(return_value=True))
2754
+    def _setup_sev_guest(self):
2755
+        drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
2756
+        drvr._host._supports_amd_sev = True
2757
+
2758
+        ctxt = context.RequestContext(project_id=123,
2759
+                                      project_name="aubergine",
2760
+                                      user_id=456,
2761
+                                      user_name="pie")
2762
+
2763
+        extra_specs = {
2764
+            "hw:mem_encryption": True,
2765
+        }
2766
+        flavor = objects.Flavor(name='m1.small',
2767
+                                memory_mb=6,
2768
+                                vcpus=28,
2769
+                                root_gb=496,
2770
+                                ephemeral_gb=8128,
2771
+                                swap=33550336,
2772
+                                extra_specs=extra_specs)
2773
+
2774
+        instance_ref = objects.Instance(**self.test_instance)
2775
+        instance_ref.flavor = flavor
2776
+        image_meta = objects.ImageMeta.from_dict({
2777
+            'id': 'd9c6aeee-8258-4bdb-bca4-39940461b182',
2778
+            'name': 'fakeimage',
2779
+            'disk_format': 'raw',
2780
+            'properties': {'hw_firmware_type': 'uefi',
2781
+                           'hw_machine_type': 'q35'}
2782
+        })
2783
+
2784
+        disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
2785
+                                            instance_ref,
2786
+                                            image_meta)
2787
+
2788
+        return drvr._get_guest_config(instance_ref,
2789
+                                      _fake_network_info(self, 1),
2790
+                                      image_meta, disk_info,
2791
+                                      context=ctxt)
2792
+
2793
+    def test_get_guest_config_sev_no_feature(self):
2794
+        self.assertRaises(exception.MissingDomainCapabilityFeatureException,
2795
+                          self._setup_sev_guest)
2796
+
2797
+    @mock.patch.object(host.Host, 'get_domain_capabilities')
2798
+    @mock.patch.object(designer, 'set_driver_iommu_for_sev')
2799
+    def test_get_guest_config_sev(self, mock_designer, fake_domain_caps):
2800
+        self._setup_fake_domain_caps(fake_domain_caps)
2801
+        cfg = self._setup_sev_guest()
2802
+
2803
+        # SEV-related tag should be set
2804
+        self.assertIsInstance(cfg.launch_security,
2805
+                              vconfig.LibvirtConfigGuestSEVLaunchSecurity)
2806
+        self.assertIsInstance(cfg.membacking,
2807
+                              vconfig.LibvirtConfigGuestMemoryBacking)
2808
+        self.assertTrue(cfg.membacking.locked)
2809
+
2810
+        mock_designer.assert_called_once_with(cfg)
2811
+
2609 2812
     def test_get_guest_memory_backing_config_file_backed(self):
2610 2813
         self.flags(file_backed_memory=1024, group="libvirt")
2611 2814
 
@@ -5822,6 +6025,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
5822 6025
         self.assertEqual(cfg.devices[5].rate_period, 2)
5823 6026
 
5824 6027
     @mock.patch('nova.virt.libvirt.driver.os.path.exists')
6028
+    @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
5825 6029
     def test_get_guest_config_with_rng_backend(self, mock_path):
5826 6030
         self.flags(virt_type='kvm',
5827 6031
                    rng_dev_path='/dev/hw_rng',
@@ -6500,6 +6704,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
6500 6704
                        "_get_guest_storage_config")
6501 6705
     @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
6502 6706
     @mock.patch('os.path.exists', return_value=True)
6707
+    @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
6503 6708
     def test_get_guest_config_aarch64(self, mock_path_exists,
6504 6709
                                       mock_numa, mock_storage, mock_get_arch):
6505 6710
         def get_host_capabilities_stub(self):
@@ -6554,6 +6759,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
6554 6759
                        "_get_guest_storage_config")
6555 6760
     @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
6556 6761
     @mock.patch('os.path.exists', return_value=True)
6762
+    @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
6557 6763
     def test_get_guest_config_aarch64_with_graphics(self, mock_path_exists,
6558 6764
                                                     mock_numa, mock_storage,
6559 6765
                                                     mock_get_arch):
@@ -6592,26 +6798,33 @@ class LibvirtConnTestCase(test.NoDBTestCase,
6592 6798
         self.assertTrue(usbhost_exists)
6593 6799
         self.assertTrue(keyboard_exists)
6594 6800
 
6595
-    def test_get_guest_config_machine_type_through_image_meta(self):
6596
-        self.flags(virt_type="kvm",
6597
-                   group='libvirt')
6801
+    def _get_guest_config_machine_type_through_image_meta(self, mach_type):
6802
+        self.flags(virt_type="kvm", group='libvirt')
6598 6803
 
6599 6804
         drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
6600 6805
         instance_ref = objects.Instance(**self.test_instance)
6601 6806
         image_meta = objects.ImageMeta.from_dict({
6602 6807
             "disk_format": "raw",
6603
-            "properties": {"hw_machine_type":
6604
-                           "fake_machine_type"}})
6808
+            "properties": {"hw_machine_type": mach_type}})
6605 6809
 
6606 6810
         disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
6607 6811
                                             instance_ref,
6608 6812
                                             image_meta)
6609 6813
 
6610
-        cfg = drvr._get_guest_config(instance_ref,
6611
-                                     _fake_network_info(self, 1),
6612
-                                     image_meta, disk_info)
6814
+        return drvr._get_guest_config(instance_ref,
6815
+                                      _fake_network_info(self, 1),
6816
+                                      image_meta, disk_info)
6817
+
6818
+    def test_get_guest_config_machine_type_through_image_meta(self):
6819
+        cfg = self._get_guest_config_machine_type_through_image_meta(
6820
+            "fake_machine_type")
6613 6821
         self.assertEqual(cfg.os_mach_type, "fake_machine_type")
6614 6822
 
6823
+    def test_get_guest_config_machine_type_through_image_meta_sev(self):
6824
+        fake_q35 = "fake-q35-2.11"
6825
+        cfg = self._get_guest_config_machine_type_through_image_meta(fake_q35)
6826
+        self.assertEqual(cfg.os_mach_type, fake_q35)
6827
+
6615 6828
     def test_get_guest_config_machine_type_from_config(self):
6616 6829
         self.flags(virt_type='kvm', group='libvirt')
6617 6830
         self.flags(hw_machine_type=['x86_64=fake_machine_type'],

+ 7
- 0
nova/tests/unit/virt/libvirt/test_utils.py View File

@@ -993,3 +993,10 @@ sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
993 993
             group="libvirt", hw_machine_type=['x86_64=q35', 'foo']))
994 994
         self.assertEqual('q35',
995 995
                          libvirt_utils.get_default_machine_type('x86_64'))
996
+
997
+    def test_get_machine_type_from_image(self):
998
+        image_meta = objects.ImageMeta.from_dict({
999
+            "disk_format": "raw", "properties": {"hw_machine_type": "q35"}
1000
+        })
1001
+        os_mach_type = libvirt_utils.get_machine_type(image_meta)
1002
+        self.assertEqual('q35', os_mach_type)

+ 3
- 0
nova/tests/unit/virt/libvirt/test_vif.py View File

@@ -596,6 +596,9 @@ class LibvirtVifTestCase(test.NoDBTestCase):
596 596
                                 is_public=True, vcpu_weight=None,
597 597
                                 id=2, disabled=False, rxtx_factor=1.0)
598 598
 
599
+        if image_meta is None:
600
+            image_meta = objects.ImageMeta.from_dict({})
601
+
599 602
         conf = self._get_conf()
600 603
         hostimpl = host.Host("qemu:///system")
601 604
         with mock.patch.object(hostimpl, 'has_min_version',

+ 16
- 10
nova/virt/hardware.py View File

@@ -1137,7 +1137,7 @@ def _get_flavor_image_meta(key, flavor, image_meta, default=None):
1137 1137
     return flavor_policy, image_policy
1138 1138
 
1139 1139
 
1140
-def get_mem_encryption_constraint(flavor, image_meta):
1140
+def get_mem_encryption_constraint(flavor, image_meta, machine_type=None):
1141 1141
     """Return a boolean indicating whether encryption of guest memory was
1142 1142
     requested, either via the hw:mem_encryption extra spec or the
1143 1143
     hw_mem_encryption image property (or both).
@@ -1156,12 +1156,16 @@ def get_mem_encryption_constraint(flavor, image_meta):
1156 1156
         3) the flavor and/or image request memory encryption, but the
1157 1157
            machine type is set to a value which does not contain 'q35'
1158 1158
 
1159
-    This is called from the API layer, so get_machine_type() cannot be
1160
-    called since it relies on being run from the compute node in order
1161
-    to retrieve CONF.libvirt.hw_machine_type.
1159
+    This can be called from the libvirt driver on the compute node, in
1160
+    which case the driver should pass the result of
1161
+    nova.virt.libvirt.utils.get_machine_type() as the machine_type
1162
+    parameter, or from the API layer, in which case get_machine_type()
1163
+    cannot be called since it relies on being run from the compute
1164
+    node in order to retrieve CONF.libvirt.hw_machine_type.
1162 1165
 
1163 1166
     :param instance_type: Flavor object
1164 1167
     :param image: an ImageMeta object
1168
+    :param machine_type: a string representing the machine type (optional)
1165 1169
     :raises: nova.exception.FlavorImageConflict
1166 1170
     :raises: nova.exception.InvalidMachineType
1167 1171
     :returns: boolean indicating whether encryption of guest memory
@@ -1196,7 +1200,7 @@ def get_mem_encryption_constraint(flavor, image_meta):
1196 1200
                           image_meta.name)
1197 1201
 
1198 1202
     _check_mem_encryption_uses_uefi_image(requesters, image_meta)
1199
-    _check_mem_encryption_machine_type(image_meta)
1203
+    _check_mem_encryption_machine_type(image_meta, machine_type)
1200 1204
 
1201 1205
     LOG.debug("Memory encryption requested by %s", " and ".join(requesters))
1202 1206
     return True
@@ -1236,7 +1240,7 @@ def _check_mem_encryption_uses_uefi_image(requesters, image_meta):
1236 1240
     raise exception.FlavorImageConflict(emsg % data)
1237 1241
 
1238 1242
 
1239
-def _check_mem_encryption_machine_type(image_meta):
1243
+def _check_mem_encryption_machine_type(image_meta, machine_type=None):
1240 1244
     # NOTE(aspiers): As explained in the SEV spec, SEV needs a q35
1241 1245
     # machine type in order to bind all the virtio devices to the PCIe
1242 1246
     # bridge so that they use virtio 1.0 and not virtio 0.9, since
@@ -1247,10 +1251,12 @@ def _check_mem_encryption_machine_type(image_meta):
1247 1251
     # So if the image explicitly requests a machine type which is not
1248 1252
     # in the q35 family, raise an exception.
1249 1253
     #
1250
-    # Note that this check occurs at API-level, therefore we can't
1251
-    # check here what value of CONF.libvirt.hw_machine_type may have
1252
-    # been configured on the compute node.
1253
-    mach_type = image_meta.properties.get('hw_machine_type')
1254
+    # This check can be triggered both at API-level, at which point we
1255
+    # can't check here what value of CONF.libvirt.hw_machine_type may
1256
+    # have been configured on the compute node, and by the libvirt
1257
+    # driver, in which case the driver can check that config option
1258
+    # and will pass the machine_type parameter.
1259
+    mach_type = machine_type or image_meta.properties.get('hw_machine_type')
1254 1260
 
1255 1261
     # If hw_machine_type is not specified on the image and is not
1256 1262
     # configured correctly on SEV compute nodes, then a separate check

+ 2
- 1
nova/virt/libvirt/config.py View File

@@ -2991,6 +2991,7 @@ class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
2991 2991
         super(LibvirtConfigGuestRng, self).__init__(root_name="rng",
2992 2992
                                                       **kwargs)
2993 2993
 
2994
+        self.device_model = 'virtio'
2994 2995
         self.model = 'random'
2995 2996
         self.backend = None
2996 2997
         self.rate_period = None
@@ -2999,7 +3000,7 @@ class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
2999 3000
 
3000 3001
     def format_dom(self):
3001 3002
         dev = super(LibvirtConfigGuestRng, self).format_dom()
3002
-        dev.set('model', 'virtio')
3003
+        dev.set('model', self.device_model)
3003 3004
 
3004 3005
         backend = etree.Element("backend")
3005 3006
         backend.set("model", self.model)

+ 15
- 0
nova/virt/libvirt/designer.py View File

@@ -21,6 +21,7 @@ classes based on common operational needs / policies
21 21
 
22 22
 
23 23
 from nova.pci import utils as pci_utils
24
+from nova.virt.libvirt import config
24 25
 
25 26
 MIN_LIBVIRT_ETHERNET_SCRIPT_PATH_NONE = (1, 3, 3)
26 27
 
@@ -196,3 +197,17 @@ def set_vcpu_realtime_scheduler(conf, vcpus_rt, priority):
196 197
     conf.vcpus = vcpus_rt
197 198
     conf.scheduler = "fifo"
198 199
     conf.priority = priority
200
+
201
+
202
+def set_driver_iommu_for_sev(conf):
203
+    virtio_attrs = {
204
+        config.LibvirtConfigGuestDisk: 'target_bus',
205
+        config.LibvirtConfigGuestInterface: 'model',
206
+        config.LibvirtConfigGuestRng: 'device_model',
207
+        config.LibvirtConfigMemoryBalloon: 'model',
208
+    }
209
+
210
+    for dev in conf.devices:
211
+        virtio_attr = virtio_attrs.get(dev.__class__)
212
+        if virtio_attr and getattr(dev, virtio_attr) == 'virtio':
213
+            dev.driver_iommu = True

+ 95
- 4
nova/virt/libvirt/driver.py View File

@@ -4888,7 +4888,7 @@ class LibvirtDriver(driver.ComputeDriver):
4888 4888
             self._add_rng_device(guest, flavor)
4889 4889
 
4890 4890
     def _get_guest_memory_backing_config(
4891
-            self, inst_topology, numatune, flavor):
4891
+            self, inst_topology, numatune, flavor, image_meta):
4892 4892
         wantsmempages = False
4893 4893
         if inst_topology:
4894 4894
             for cell in inst_topology.cells:
@@ -4928,6 +4928,10 @@ class LibvirtDriver(driver.ComputeDriver):
4928 4928
                     MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION,
4929 4929
                     MIN_QEMU_FILE_BACKED_DISCARD_VERSION):
4930 4930
                 membacking.discard = True
4931
+        if self._sev_enabled(flavor, image_meta):
4932
+            if not membacking:
4933
+                membacking = vconfig.LibvirtConfigGuestMemoryBacking()
4934
+            membacking.locked = True
4931 4935
 
4932 4936
         return membacking
4933 4937
 
@@ -5034,7 +5038,8 @@ class LibvirtDriver(driver.ComputeDriver):
5034 5038
         return True
5035 5039
 
5036 5040
     def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
5037
-                                      image_meta, flavor, root_device_name):
5041
+                                      image_meta, flavor, root_device_name,
5042
+                                      sev_enabled):
5038 5043
         if virt_type == "xen":
5039 5044
             if guest.os_type == fields.VMMode.HVM:
5040 5045
                 guest.os_loader = CONF.libvirt.xen_hvmloader_path
@@ -5367,7 +5372,7 @@ class LibvirtDriver(driver.ComputeDriver):
5367 5372
         guest.membacking = self._get_guest_memory_backing_config(
5368 5373
             instance.numa_topology,
5369 5374
             guest_numa_config.numatune,
5370
-            flavor)
5375
+            flavor, image_meta)
5371 5376
 
5372 5377
         guest.metadata.append(self._get_guest_config_meta(instance))
5373 5378
         guest.idmaps = self._get_guest_idmaps()
@@ -5399,9 +5404,11 @@ class LibvirtDriver(driver.ComputeDriver):
5399 5404
                 self._get_guest_os_type(virt_type))
5400 5405
         caps = self._host.get_capabilities()
5401 5406
 
5407
+        sev_enabled = self._sev_enabled(flavor, image_meta)
5408
+
5402 5409
         self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
5403 5410
                                            image_meta, flavor,
5404
-                                           root_device_name)
5411
+                                           root_device_name, sev_enabled)
5405 5412
         if virt_type not in ('lxc', 'uml'):
5406 5413
             self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
5407 5414
                     instance, inst_path, image_meta, disk_info)
@@ -5457,8 +5464,92 @@ class LibvirtDriver(driver.ComputeDriver):
5457 5464
         if mdevs:
5458 5465
             self._guest_add_mdevs(guest, mdevs)
5459 5466
 
5467
+        if sev_enabled:
5468
+            self._guest_configure_sev(guest, caps.host.cpu.arch,
5469
+                                      guest.os_mach_type)
5470
+
5460 5471
         return guest
5461 5472
 
5473
+    def _sev_enabled(self, flavor, image_meta):
5474
+        """To enable AMD SEV, the following should be true:
5475
+
5476
+        a) the supports_amd_sev instance variable in the host is
5477
+           true,
5478
+        b) the instance extra specs and/or image properties request
5479
+           memory encryption to be enabled, and
5480
+        c) there are no conflicts between extra specs, image properties
5481
+           and machine type selection.
5482
+
5483
+        Most potential conflicts in c) should already be caught in the
5484
+        API layer.  However there is still one remaining case which
5485
+        needs to be handled here: when the image does not contain an
5486
+        hw_machine_type property, the machine type will be chosen from
5487
+        CONF.libvirt.hw_machine_type if configured, otherwise falling
5488
+        back to the hardcoded value which is currently 'pc'.  If it
5489
+        ends up being 'pc' or another value not in the q35 family, we
5490
+        need to raise an exception.  So calculate the machine type and
5491
+        pass it to be checked alongside the other sanity checks which
5492
+        are run while determining whether SEV is selected.
5493
+        """
5494
+        if not self._host.supports_amd_sev:
5495
+            return False
5496
+
5497
+        mach_type = libvirt_utils.get_machine_type(image_meta)
5498
+        return hardware.get_mem_encryption_constraint(flavor, image_meta,
5499
+                                                      mach_type)
5500
+
5501
+    def _guest_configure_sev(self, guest, arch, mach_type):
5502
+        sev = self._find_sev_feature(arch, mach_type)
5503
+        if sev is None:
5504
+            # In theory this should never happen because it should
5505
+            # only get called if SEV was requested, in which case the
5506
+            # guest should only get scheduled on this host if it
5507
+            # supports SEV, and SEV support is dependent on the
5508
+            # presence of this <sev> feature.  That said, it's
5509
+            # conceivable that something could get messed up along the
5510
+            # way, e.g. a mismatch in the choice of machine type.  So
5511
+            # make sure that if it ever does happen, we at least get a
5512
+            # helpful error rather than something cryptic like
5513
+            # "AttributeError: 'NoneType' object has no attribute 'cbitpos'
5514
+            raise exception.MissingDomainCapabilityFeatureException(
5515
+                feature='sev')
5516
+
5517
+        designer.set_driver_iommu_for_sev(guest)
5518
+        self._guest_add_launch_security(guest, sev)
5519
+
5520
+    def _guest_add_launch_security(self, guest, sev):
5521
+        launch_security = vconfig.LibvirtConfigGuestSEVLaunchSecurity()
5522
+        launch_security.cbitpos = sev.cbitpos
5523
+        launch_security.reduced_phys_bits = sev.reduced_phys_bits
5524
+        guest.launch_security = launch_security
5525
+
5526
+    def _find_sev_feature(self, arch, mach_type):
5527
+        """Search domain capabilities for the given arch and machine type
5528
+        for the <sev> element under <features>, and return it if found.
5529
+        """
5530
+        domain_caps = self._host.get_domain_capabilities()
5531
+        if arch not in domain_caps:
5532
+            LOG.warning(
5533
+                "Wanted to add SEV to config for guest with arch %(arch)s "
5534
+                "but only had domain capabilities for: %(archs)s",
5535
+                {'arch': arch, 'archs': ' '.join(domain_caps)})
5536
+            return None
5537
+
5538
+        if mach_type not in domain_caps[arch]:
5539
+            LOG.warning(
5540
+                "Wanted to add SEV to config for guest with machine type "
5541
+                "%(mtype)s but for arch %(arch)s only had domain capabilities "
5542
+                "for machine types: %(mtypes)s",
5543
+                {'mtype': mach_type, 'arch': arch,
5544
+                 'mtypes': ' '.join(domain_caps[arch])})
5545
+            return None
5546
+
5547
+        for feature in domain_caps[arch][mach_type].features:
5548
+            if feature.root_name == 'sev':
5549
+                return feature
5550
+
5551
+        return None
5552
+
5462 5553
     def _guest_add_mdevs(self, guest, chosen_mdevs):
5463 5554
         for chosen_mdev in chosen_mdevs:
5464 5555
             mdev = vconfig.LibvirtConfigGuestHostdevMDEV()

Loading…
Cancel
Save