Hyper-V PCI Passthrough

Discrete Device Assignment is a new feature in Windows Server 2016,
offering users the possibility of taking some of the PCI Express
devices in their systems and pass them through directly to a guest VM.

DocImpact: The compute-pci-passthrough page in the admin-guide will
have to be updated to include details regarding PCI passthrough on
Hyper-V.

Co-Authored-By: Iulia Toader <itoader@cloudbasesolutions.com>

Depends-On: I8e7782d3e1e9f8e92406604f05504a7754ffa3c2

Change-Id: I5a243213ff4241b6f70d21a02c606e8fc96ce6e6
Implements: blueprint hyper-v-pci-passthrough
This commit is contained in:
Claudiu Belu 2017-01-16 11:52:35 +02:00
parent cee14139bd
commit 9d6f9e9cd5
5 changed files with 136 additions and 4 deletions

View File

@ -21,6 +21,7 @@ from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import units
from nova.objects import fields as obj_fields
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostops
@ -151,6 +152,7 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
mock_NUMATopology.assert_called_once_with(
cells=[mock_NUMACell.return_value])
@mock.patch.object(hostops.HostOps, '_get_pci_passthrough_devices')
@mock.patch.object(hostops.HostOps, '_get_host_numa_topology')
@mock.patch.object(hostops.HostOps, '_get_remotefx_gpu_info')
@mock.patch.object(hostops.HostOps, '_get_cpu_info')
@ -162,7 +164,8 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_storage_info_gb,
mock_get_hypervisor_version,
mock_get_memory_info, mock_get_cpu_info,
mock_get_gpu_info, mock_get_numa_topology):
mock_get_gpu_info, mock_get_numa_topology,
mock_get_pci_devices):
mock_get_storage_info_gb.return_value = (mock.sentinel.LOCAL_GB,
mock.sentinel.LOCAL_GB_FREE,
mock.sentinel.LOCAL_GB_USED)
@ -174,6 +177,7 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_hypervisor_version.return_value = mock.sentinel.VERSION
mock_get_numa_topology.return_value._to_json.return_value = (
mock.sentinel.numa_topology_json)
mock_get_pci_devices.return_value = mock.sentinel.pcis
mock_gpu_info = self._get_mock_gpu_info()
mock_get_gpu_info.return_value = mock_gpu_info
@ -183,6 +187,7 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_memory_info.assert_called_once_with()
mock_get_cpu_info.assert_called_once_with()
mock_get_hypervisor_version.assert_called_once_with()
mock_get_pci_devices.assert_called_once_with()
expected = {'supported_instances': [("i686", "hyperv", "hvm"),
("x86_64", "hyperv", "hvm")],
'hypervisor_hostname': mock_node(),
@ -199,9 +204,32 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
'remotefx_available_video_ram': 2048,
'remotefx_gpu_info': mock.sentinel.FAKE_GPU_INFO,
'remotefx_total_video_ram': 4096,
'pci_passthrough_devices': mock.sentinel.pcis,
}
self.assertEqual(expected, response)
@mock.patch.object(hostops.jsonutils, 'dumps')
def test_get_pci_passthrough_devices(self, mock_jsonutils_dumps):
mock_pci_dev = {'vendor_id': 'fake_vendor_id',
'product_id': 'fake_product_id',
'dev_id': 'fake_dev_id',
'address': 'fake_address'}
mock_get_pcis = self._hostops._hostutils.get_pci_passthrough_devices
mock_get_pcis.return_value = [mock_pci_dev]
expected_label = 'label_%(vendor_id)s_%(product_id)s' % {
'vendor_id': mock_pci_dev['vendor_id'],
'product_id': mock_pci_dev['product_id']}
expected_pci_dev = mock_pci_dev.copy()
expected_pci_dev.update(dev_type=obj_fields.PciDeviceType.STANDARD,
label= expected_label,
numa_node=None)
result = self._hostops._get_pci_passthrough_devices()
self.assertEqual(mock_jsonutils_dumps.return_value, result)
mock_jsonutils_dumps.assert_called_once_with([expected_pci_dev])
def _test_host_power_action(self, action):
self._hostops._hostutils.host_power_action = mock.Mock()

View File

@ -528,6 +528,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self.assertEqual([], events)
mock_is_neutron.assert_called_once_with()
@mock.patch.object(vmops.VMOps, '_attach_pci_devices')
@mock.patch.object(vmops.VMOps, '_requires_secure_boot')
@mock.patch.object(vmops.VMOps, '_requires_certificate')
@mock.patch.object(vmops.VMOps, '_get_instance_vnuma_config')
@ -547,9 +548,11 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_vnuma_config,
mock_requires_certificate,
mock_requires_secure_boot,
mock_attach_pci_devices,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1,
vnuma_enabled=False):
vnuma_enabled=False,
pci_requests=None):
self.flags(dynamic_memory_ratio=2.0, group='hyperv')
self.flags(enable_instance_metrics_collection=enable_instance_metrics,
group='hyperv')
@ -563,6 +566,11 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
flavor = flavor_obj.Flavor(**test_flavor.fake_flavor)
mock_instance.flavor = flavor
instance_pci_requests = objects.InstancePCIRequests(
requests=pci_requests or [], instance_uuid=mock_instance.uuid)
mock_instance.pci_requests = instance_pci_requests
host_shutdown_action = (os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN
if pci_requests else None)
if vnuma_enabled:
mock_get_vnuma_config.return_value = (
@ -590,7 +598,8 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self._vmops._vmutils.update_vm.assert_called_once_with(
mock_instance.name, mock_instance.flavor.memory_mb, mem_per_numa,
mock_instance.flavor.vcpus, cpus_per_numa,
CONF.hyperv.limit_cpu_features, dynamic_memory_ratio)
CONF.hyperv.limit_cpu_features, dynamic_memory_ratio,
host_shutdown_action=host_shutdown_action)
mock_configure_remotefx.assert_called_once_with(mock_instance, vm_gen)
mock_create_scsi_ctrl = self._vmops._vmutils.create_scsi_controller
@ -617,6 +626,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
enable_secure_boot.assert_called_once_with(
mock_instance.name,
msft_ca_required=mock_requires_certificate.return_value)
mock_attach_pci_devices.assert_called_once_with(mock_instance)
def test_create_instance(self):
self._test_create_instance(enable_instance_metrics=True)
@ -632,6 +642,29 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self._test_create_instance(enable_instance_metrics=False,
vnuma_enabled=True)
def test_create_instance_pci_requested(self):
vendor_id = 'fake_vendor_id'
product_id = 'fake_product_id'
spec = {'vendor_id': vendor_id, 'product_id': product_id}
request = objects.InstancePCIRequest(count=1, spec=[spec])
self._test_create_instance(enable_instance_metrics=False,
pci_requests=[request])
def test_attach_pci_devices(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
vendor_id = 'fake_vendor_id'
product_id = 'fake_product_id'
spec = {'vendor_id': vendor_id, 'product_id': product_id}
request = objects.InstancePCIRequest(count=2, spec=[spec])
instance_pci_requests = objects.InstancePCIRequests(
requests=[request], instance_uuid=mock_instance.uuid)
mock_instance.pci_requests = instance_pci_requests
self._vmops._attach_pci_devices(mock_instance)
self._vmops._vmutils.add_pci_device.assert_has_calls(
[mock.call(mock_instance.name, vendor_id, product_id)] * 2)
@mock.patch.object(vmops.hardware, 'numa_get_constraints')
def _check_get_instance_vnuma_config_exception(self, mock_get_numa,
numa_cells):

View File

@ -176,12 +176,38 @@ class HostOps(object):
obj_fields.HVType.HYPERV,
obj_fields.VMMode.HVM)],
'numa_topology': self._get_host_numa_topology()._to_json(),
'pci_passthrough_devices': self._get_pci_passthrough_devices(),
}
gpu_info = self._get_remotefx_gpu_info()
dic.update(gpu_info)
return dic
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains PCI devices information and returns it as a JSON string.
:returns: a JSON string containing a list of the assignable PCI
devices information.
"""
pci_devices = self._hostutils.get_pci_passthrough_devices()
for pci_dev in pci_devices:
# NOTE(claudiub): These fields are required by the PCI tracker.
dev_label = 'label_%(vendor_id)s_%(product_id)s' % {
'vendor_id': pci_dev['vendor_id'],
'product_id': pci_dev['product_id']}
# TODO(claudiub): Find a way to associate the PCI devices with
# the NUMA nodes they are in.
pci_dev.update(dev_type=obj_fields.PciDeviceType.STANDARD,
label=dev_label,
numa_node=None)
return jsonutils.dumps(pci_devices)
def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
if action in [constants.HOST_POWER_ACTION_SHUTDOWN,

View File

@ -367,6 +367,13 @@ class VMOps(object):
dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
vnuma_enabled = False
if instance.pci_requests.requests:
# NOTE(claudiub): if the instance requires PCI devices, its
# host shutdown action MUST be shutdown.
host_shutdown_action = os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN
else:
host_shutdown_action = None
self._vmutils.create_vm(instance_name,
vnuma_enabled,
vm_gen,
@ -379,7 +386,8 @@ class VMOps(object):
instance.flavor.vcpus,
cpus_per_numa_node,
CONF.hyperv.limit_cpu_features,
dynamic_memory_ratio)
dynamic_memory_ratio,
host_shutdown_action=host_shutdown_action)
self._configure_remotefx(instance, vm_gen)
@ -415,6 +423,16 @@ class VMOps(object):
self._vmutils.enable_secure_boot(
instance.name, msft_ca_required=certificate_required)
self._attach_pci_devices(instance)
def _attach_pci_devices(self, instance):
for pci_request in instance.pci_requests.requests:
spec = pci_request.spec[0]
for counter in range(pci_request.count):
self._vmutils.add_pci_device(instance.name,
spec['vendor_id'],
spec['product_id'])
def _get_instance_vnuma_config(self, instance, image_meta):
"""Returns the appropriate NUMA configuration for Hyper-V instances,
given the desired instance NUMA topology.

View File

@ -0,0 +1,27 @@
---
features:
- |
The nova Hyper-V driver now supports adding PCI passthrough devices to
Hyper-V instances (discrete device assignment). This feature has been
introduced in Windows / Hyper-V Server 2016 and offers the possibility to
attach some of the host's PCI devices (e.g.: GPU devices) directly to
Hyper-V instances.
In order to benefit from this feature, Hyper-V compute nodes must support
SR-IOV and must have assignable PCI devices. This can easily be checked by
running the following powershell commands::
Start-BitsTransfer https://raw.githubusercontent.com/Microsoft/Virtualization-Documentation/master/hyperv-samples/benarm-powershell/DDA/survey-dda.ps1
.\survey-dda.ps1
The script above will print a list of assignable PCI devices available on
the host, and if the host supports SR-IOV.
If the host supports this feature and it has at least an assignable PCI
device, the host must be configured to allow those PCI devices to be
assigned to VMs. For information on how to do this, follow this guide [1].
After the compute nodes have been configured, the nova-api, nova-scheduler,
and the nova-compute services will have to be configured next [2].
[1] https://blogs.technet.microsoft.com/heyscriptingguy/2016/07/14/passing-through-devices-to-hyper-v-vms-by-using-discrete-device-assignment/
[2] http://docs.openstack.org/admin-guide/compute-pci-passthrough.html