Hyper-V: Adds vNUMA implementation

vNUMA can improve the performance of workloads running on virtual machines
that are configured with large amounts of memory. This feature is useful
for high-performance NUMA-aware applications, such as database or web
servers.

Returns Hyper-V host NUMA node information during get_available_resource
Adds validation for instances requiring NUMA topology (no asymmetric
topology and no CPU pinning supported).
Creates NUMA aware instances, if necessary.

The compute-cpu-topologies page in the admin-guide will have to be
updated to include Hyper-V NUMA topologies usage and configuration.

DocImpact

Change-Id: Iba2110e95e80b9511698cb7df2963fd218264c8e
Implements: blueprint hyper-v-vnuma-enable
This commit is contained in:
Claudiu Belu 2016-02-19 18:12:57 +02:00 committed by Matt Riedemann
parent 07b6580a16
commit 2195e4d684
5 changed files with 198 additions and 12 deletions

View File

@ -133,6 +133,25 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
self.assertEqual(0, ret_val['used_video_ram'])
self._hostops._hostutils.get_remotefx_gpu_info.assert_not_called()
@mock.patch.object(hostops.objects, 'NUMACell')
@mock.patch.object(hostops.objects, 'NUMATopology')
def test_get_host_numa_topology(self, mock_NUMATopology, mock_NUMACell):
numa_node = {'id': mock.sentinel.id, 'memory': mock.sentinel.memory,
'memory_usage': mock.sentinel.memory_usage,
'cpuset': mock.sentinel.cpuset,
'cpu_usage': mock.sentinel.cpu_usage}
self._hostops._hostutils.get_numa_nodes.return_value = [
numa_node.copy()]
result = self._hostops._get_host_numa_topology()
self.assertEqual(mock_NUMATopology.return_value, result)
mock_NUMACell.assert_called_once_with(
pinned_cpus=set([]), mempages=[], siblings=[], **numa_node)
mock_NUMATopology.assert_called_once_with(
cells=[mock_NUMACell.return_value])
@mock.patch.object(hostops.HostOps, '_get_host_numa_topology')
@mock.patch.object(hostops.HostOps, '_get_remotefx_gpu_info')
@mock.patch.object(hostops.HostOps, '_get_cpu_info')
@mock.patch.object(hostops.HostOps, '_get_memory_info')
@ -143,7 +162,7 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_storage_info_gb,
mock_get_hypervisor_version,
mock_get_memory_info, mock_get_cpu_info,
mock_get_gpu_info):
mock_get_gpu_info, mock_get_numa_topology):
mock_get_storage_info_gb.return_value = (mock.sentinel.LOCAL_GB,
mock.sentinel.LOCAL_GB_FREE,
mock.sentinel.LOCAL_GB_USED)
@ -153,6 +172,8 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
mock_cpu_info = self._get_mock_cpu_info()
mock_get_cpu_info.return_value = mock_cpu_info
mock_get_hypervisor_version.return_value = mock.sentinel.VERSION
mock_get_numa_topology.return_value._to_json.return_value = (
mock.sentinel.numa_topology_json)
mock_gpu_info = self._get_mock_gpu_info()
mock_get_gpu_info.return_value = mock_gpu_info
@ -174,7 +195,7 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
'vcpus': self.FAKE_NUM_CPUS,
'vcpus_used': 0,
'hypervisor_type': 'hyperv',
'numa_topology': None,
'numa_topology': mock.sentinel.numa_topology_json,
'remotefx_available_video_ram': 2048,
'remotefx_gpu_info': mock.sentinel.FAKE_GPU_INFO,
'remotefx_total_video_ram': 4096,

View File

@ -530,6 +530,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
@mock.patch.object(vmops.VMOps, '_requires_secure_boot')
@mock.patch.object(vmops.VMOps, '_requires_certificate')
@mock.patch.object(vmops.VMOps, '_get_instance_vnuma_config')
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.attach_volumes')
@mock.patch.object(vmops.VMOps, '_set_instance_disk_qos_specs')
@ -543,10 +544,13 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_create_pipes,
mock_set_qos_specs,
mock_attach_volumes,
mock_get_vnuma_config,
mock_requires_certificate,
mock_requires_secure_boot,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1):
vm_gen=constants.VM_GEN_1,
vnuma_enabled=False):
self.flags(dynamic_memory_ratio=2.0, group='hyperv')
self.flags(enable_instance_metrics_collection=enable_instance_metrics,
group='hyperv')
root_device_info = mock.sentinel.ROOT_DEV_INFO
@ -560,17 +564,33 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
flavor = flavor_obj.Flavor(**test_flavor.fake_flavor)
mock_instance.flavor = flavor
if vnuma_enabled:
mock_get_vnuma_config.return_value = (
mock.sentinel.mem_per_numa, mock.sentinel.cpus_per_numa)
cpus_per_numa = mock.sentinel.cpus_per_numa
mem_per_numa = mock.sentinel.mem_per_numa
dynamic_memory_ratio = 1.0
else:
mock_get_vnuma_config.return_value = (None, None)
mem_per_numa, cpus_per_numa = (None, None)
dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
self._vmops.create_instance(instance=mock_instance,
network_info=[fake_network_info],
root_device=root_device_info,
block_device_info=block_device_info,
vm_gen=vm_gen,
image_meta=mock.sentinel.image_meta)
mock_get_vnuma_config.assert_called_once_with(mock_instance,
mock.sentinel.image_meta)
self._vmops._vmutils.create_vm.assert_called_once_with(
mock_instance.name, mock_instance.flavor.memory_mb,
mock_instance.flavor.vcpus, CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio, vm_gen, instance_path,
[mock_instance.uuid])
mock_instance.name, vnuma_enabled, vm_gen,
instance_path, [mock_instance.uuid])
self._vmops._vmutils.update_vm.assert_called_once_with(
mock_instance.name, mock_instance.flavor.memory_mb, mem_per_numa,
mock_instance.flavor.vcpus, cpus_per_numa,
CONF.hyperv.limit_cpu_features, dynamic_memory_ratio)
mock_configure_remotefx.assert_called_once_with(mock_instance, vm_gen)
mock_create_scsi_ctrl = self._vmops._vmutils.create_scsi_controller
@ -608,6 +628,59 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self._test_create_instance(enable_instance_metrics=False,
vm_gen=constants.VM_GEN_2)
def test_create_instance_vnuma_enabled(self):
self._test_create_instance(enable_instance_metrics=False,
vnuma_enabled=True)
@mock.patch.object(vmops.hardware, 'numa_get_constraints')
def _check_get_instance_vnuma_config_exception(self, mock_get_numa,
numa_cells):
flavor = {'extra_specs': {}}
mock_instance = mock.MagicMock(flavor=flavor)
image_meta = mock.MagicMock(properties={})
mock_get_numa.return_value.cells = numa_cells
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._get_instance_vnuma_config,
mock_instance, image_meta)
def test_get_instance_vnuma_config_bad_cpuset(self):
cell1 = mock.MagicMock(cpuset=set([0]), memory=1024)
cell2 = mock.MagicMock(cpuset=set([1, 2]), memory=1024)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
def test_get_instance_vnuma_config_bad_memory(self):
cell1 = mock.MagicMock(cpuset=set([0]), memory=1024)
cell2 = mock.MagicMock(cpuset=set([1]), memory=2048)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
@mock.patch.object(vmops.hardware, 'numa_get_constraints')
def _check_get_instance_vnuma_config(
self, mock_get_numa, numa_topology=None,
expected_mem_per_numa=None, expected_cpus_per_numa=None):
mock_instance = mock.MagicMock()
image_meta = mock.MagicMock()
mock_get_numa.return_value = numa_topology
result_memory_per_numa, result_cpus_per_numa = (
self._vmops._get_instance_vnuma_config(mock_instance, image_meta))
self.assertEqual(expected_cpus_per_numa, result_cpus_per_numa)
self.assertEqual(expected_mem_per_numa, result_memory_per_numa)
def test_get_instance_vnuma_config(self):
cell1 = mock.MagicMock(cpuset=set([0]), memory=2048, cpu_pinning=None)
cell2 = mock.MagicMock(cpuset=set([1]), memory=2048, cpu_pinning=None)
mock_topology = mock.MagicMock(cells=[cell1, cell2])
self._check_get_instance_vnuma_config(numa_topology=mock_topology,
expected_cpus_per_numa=1,
expected_mem_per_numa=2048)
def test_get_instance_vnuma_config_no_topology(self):
self._check_get_instance_vnuma_config()
@mock.patch.object(vmops.volumeops.VolumeOps, 'attach_volume')
def test_attach_root_device_volume(self, mock_attach_volume):
mock_instance = fake_instance.fake_instance_obj(self.context)

View File

@ -28,6 +28,7 @@ from oslo_utils import units
import nova.conf
from nova.i18n import _
from nova import objects
from nova.objects import fields as obj_fields
from nova.virt.hyperv import constants
from nova.virt.hyperv import pathutils
@ -120,6 +121,18 @@ class HostOps(object):
'used_video_ram': total_video_ram - available_video_ram,
'gpu_info': jsonutils.dumps(gpus)}
def _get_host_numa_topology(self):
numa_nodes = self._hostutils.get_numa_nodes()
cells = []
for numa_node in numa_nodes:
# Hyper-V does not support CPU pinning / mempages.
# initializing the rest of the fields.
numa_node.update(pinned_cpus=set(), mempages=[], siblings=[])
cell = objects.NUMACell(**numa_node)
cells.append(cell)
return objects.NUMATopology(cells=cells)
def get_available_resource(self):
"""Retrieve resource info.
@ -162,7 +175,7 @@ class HostOps(object):
(obj_fields.Architecture.X86_64,
obj_fields.HVType.HYPERV,
obj_fields.VMMode.HVM)],
'numa_topology': None,
'numa_topology': self._get_host_numa_topology()._to_json(),
}
gpu_info = self._get_remotefx_gpu_info()

View File

@ -349,15 +349,38 @@ class VMOps(object):
secure_boot_enabled = self._requires_secure_boot(instance, image_meta,
vm_gen)
memory_per_numa_node, cpus_per_numa_node = (
self._get_instance_vnuma_config(instance, image_meta))
if memory_per_numa_node:
LOG.debug("Instance requires vNUMA topology. Host's NUMA spanning "
"has to be disabled in order for the instance to "
"benefit from it.", instance=instance)
if CONF.hyperv.dynamic_memory_ratio > 1.0:
LOG.warning(_LW(
"Instance vNUMA topology requested, but dynamic memory "
"ratio is higher than 1.0 in nova.conf. Ignoring dynamic "
"memory ratio option."), instance=instance)
dynamic_memory_ratio = 1.0
vnuma_enabled = True
else:
dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
vnuma_enabled = False
self._vmutils.create_vm(instance_name,
instance.flavor.memory_mb,
instance.flavor.vcpus,
CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio,
vnuma_enabled,
vm_gen,
instance_path,
[instance.uuid])
self._vmutils.update_vm(instance_name,
instance.flavor.memory_mb,
memory_per_numa_node,
instance.flavor.vcpus,
cpus_per_numa_node,
CONF.hyperv.limit_cpu_features,
dynamic_memory_ratio)
self._configure_remotefx(instance, vm_gen)
self._vmutils.create_scsi_controller(instance_name)
@ -392,6 +415,47 @@ class VMOps(object):
self._vmutils.enable_secure_boot(
instance.name, msft_ca_required=certificate_required)
def _get_instance_vnuma_config(self, instance, image_meta):
"""Returns the appropriate NUMA configuration for Hyper-V instances,
given the desired instance NUMA topology.
:param instance: instance containing the flavor and it's extra_specs,
where the NUMA topology is defined.
:param image_meta: image's metadata, containing properties related to
the instance's NUMA topology.
:returns: memory amount and number of vCPUs per NUMA node or
(None, None), if instance NUMA topology was not requested.
:raises exception.InstanceUnacceptable:
If the given instance NUMA topology is not possible on Hyper-V.
"""
instance_topology = hardware.numa_get_constraints(instance.flavor,
image_meta)
if not instance_topology:
# instance NUMA topology was not requested.
return None, None
memory_per_numa_node = instance_topology.cells[0].memory
cpus_per_numa_node = len(instance_topology.cells[0].cpuset)
# validate that the requested NUMA topology is not asymetric.
# e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y.
# same with memory.
for cell in instance_topology.cells:
if len(cell.cpuset) != cpus_per_numa_node:
reason = _("Hyper-V does not support NUMA topologies with "
"uneven number of processors. (%(a)s != %(b)s)") % {
'a': len(cell.cpuset), 'b': cpus_per_numa_node}
raise exception.InstanceUnacceptable(reason=reason,
instance_id=instance.uuid)
if cell.memory != memory_per_numa_node:
reason = _("Hyper-V does not support NUMA topologies with "
"uneven amounts of memory. (%(a)s != %(b)s)") % {
'a': cell.memory, 'b': memory_per_numa_node}
raise exception.InstanceUnacceptable(reason=reason,
instance_id=instance.uuid)
return memory_per_numa_node, cpus_per_numa_node
def _configure_remotefx(self, instance, vm_gen):
extra_specs = instance.flavor.extra_specs
remotefx_max_resolution = extra_specs.get(

View File

@ -0,0 +1,15 @@
---
features:
- The nova Hyper-V driver now supports symmetric NUMA topologies. This means
that all the NUMA nodes in the NUMA topology must have the same amount of
vCPUs and memory. It can easily be requested by having the flavor
extra_spec "hw:numa_nodes", or the image property "hw_numa_nodes".
An instance with NUMA topology cannot have dynamic memory enabled. Thus,
if an instance requires a NUMA topology, it will be spawned without
dynamic memory, regardless of the value set in the "dynamic_memory_ratio"
config option in the compute node's "nova.conf" file.
In order to benefit from this feature, the host's NUMA spanning must be
disabled.
Hyper-V does not guarantee CPU pinning, thus, the nova Hyper-V driver will
not spawn instances with the flavor extra_spec "hw:cpu_policy" or image
property "hw_cpu_policy" set to "dedicated".