libvirt: add realtime support
Updates cpu scheduler and memory lock to make guest able to handle realtime request DocImpact: New image property cpu_realtime_mask and option realtime_scheduler_priority Blueprint: libvirt-real-time Change-Id: Ia6d8919fc9a5c520ab8eba35f040cdc450b23bd6
This commit is contained in:
parent
b3879bd199
commit
f9237774bf
|
@ -2025,3 +2025,13 @@ class NMINotSupported(Invalid):
|
|||
|
||||
class UnsupportedHostCPUControlPolicy(Invalid):
|
||||
msg_fmt = _("Requested CPU control policy not supported by host")
|
||||
|
||||
|
||||
class RealtimePolicyNotSupported(Invalid):
|
||||
msg_fmt = _("Realtime policy not supported by hypervisor")
|
||||
|
||||
|
||||
class RealtimeMaskNotFoundOrInvalid(Invalid):
|
||||
msg_fmt = _("Realtime policy needs vCPU(s) mask configured with at least "
|
||||
"1 RT vCPU and 1 ordinary vCPU. See hw:cpu_realtime_mask "
|
||||
"or hw_cpu_realtime_mask")
|
||||
|
|
|
@ -139,7 +139,8 @@ class ImageMetaProps(base.NovaObject):
|
|||
# Version 1.7: added img_config_drive field
|
||||
# Version 1.8: Added 'lxd' to hypervisor types
|
||||
# Version 1.9: added hw_cpu_thread_policy field
|
||||
VERSION = '1.9'
|
||||
# Version 1.10: added hw_cpu_realtime_mask field
|
||||
VERSION = '1.10'
|
||||
|
||||
def obj_make_compatible(self, primitive, target_version):
|
||||
super(ImageMetaProps, self).obj_make_compatible(primitive,
|
||||
|
@ -209,6 +210,11 @@ class ImageMetaProps(base.NovaObject):
|
|||
# CPU thread allocation policy
|
||||
'hw_cpu_thread_policy': fields.CPUThreadAllocationPolicyField(),
|
||||
|
||||
# CPU mask indicates which vCPUs will have realtime enable,
|
||||
# example ^0-1 means that all vCPUs except 0 and 1 will have a
|
||||
# realtime policy.
|
||||
'hw_cpu_realtime_mask': fields.StringField(),
|
||||
|
||||
# preferred number of CPU threads per core
|
||||
'hw_cpu_threads': fields.IntegerField(),
|
||||
|
||||
|
|
|
@ -0,0 +1,137 @@
|
|||
# Copyright (C) 2015 Red Hat, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
import fixtures
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from nova.tests.functional.api import client
|
||||
from nova.tests.functional.test_servers import ServersTestBase
|
||||
from nova.tests.unit import fake_network
|
||||
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
|
||||
from nova.tests.unit.virt.libvirt import fakelibvirt
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NumaHostInfo(fakelibvirt.HostInfo):
|
||||
def __init__(self, **kwargs):
|
||||
super(NumaHostInfo, self).__init__(**kwargs)
|
||||
self.numa_mempages_list = []
|
||||
|
||||
def get_numa_topology(self):
|
||||
if self.numa_topology:
|
||||
return self.numa_topology
|
||||
|
||||
topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets,
|
||||
self.cpu_cores, self.cpu_threads,
|
||||
self.kB_mem)
|
||||
self.numa_topology = topology
|
||||
|
||||
# update number of active cpus
|
||||
cpu_count = len(topology.cells) * len(topology.cells[0].cpus)
|
||||
self.cpus = cpu_count - len(self.disabled_cpus_list)
|
||||
return topology
|
||||
|
||||
def set_custom_numa_toplogy(self, topology):
|
||||
self.numa_topology = topology
|
||||
|
||||
|
||||
class RealTimeServersTest(ServersTestBase):
|
||||
|
||||
def setUp(self):
|
||||
super(RealTimeServersTest, self).setUp()
|
||||
|
||||
# Replace libvirt with fakelibvirt
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.virt.libvirt.driver.libvirt_utils',
|
||||
fake_libvirt_utils))
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.virt.libvirt.driver.libvirt',
|
||||
fakelibvirt))
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.virt.libvirt.host.libvirt',
|
||||
fakelibvirt))
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.virt.libvirt.guest.libvirt',
|
||||
fakelibvirt))
|
||||
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
||||
|
||||
def _setup_compute_service(self):
|
||||
self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver')
|
||||
|
||||
def test_no_dedicated_cpu(self):
|
||||
flavor = self._create_flavor(extra_spec={'hw:cpu_realtime': 'yes'})
|
||||
server = self._build_server(flavor)
|
||||
|
||||
# Cannot set realtime policy in a non dedicated cpu pinning policy
|
||||
self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self.api.post_server, {'server': server})
|
||||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
|
||||
def test_invalid_libvirt_version(self, img_mock):
|
||||
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
|
||||
cpu_threads=2, kB_mem=15740000)
|
||||
fake_connection = fakelibvirt.Connection('qemu:///system',
|
||||
version=1002007,
|
||||
hv_version=2001000,
|
||||
host_info=host_info)
|
||||
with mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
||||
return_value=fake_connection):
|
||||
self.compute = self.start_service('compute', host='test_compute0')
|
||||
fake_network.set_stub_network_methods(self.stubs)
|
||||
|
||||
flavor = self._create_flavor(extra_spec={
|
||||
'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated'})
|
||||
server = self._build_server(flavor)
|
||||
created = self.api.post_server({'server': server})
|
||||
|
||||
instance = self.api.get_server(created['id'])
|
||||
instance = self._wait_for_state_change(instance, 'BUILD')
|
||||
|
||||
# Realtime policy not supported by hypervisor
|
||||
self.assertEqual('ERROR', instance['status'])
|
||||
self._delete_server(instance['id'])
|
||||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
|
||||
def test_success(self, img_mock):
|
||||
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
|
||||
cpu_threads=2, kB_mem=15740000)
|
||||
fake_connection = fakelibvirt.Connection('qemu:///system',
|
||||
version=1002013,
|
||||
hv_version=2001000,
|
||||
host_info=host_info)
|
||||
with mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
||||
return_value=fake_connection):
|
||||
self.compute = self.start_service('compute', host='test_compute0')
|
||||
fake_network.set_stub_network_methods(self.stubs)
|
||||
|
||||
flavor = self._create_flavor(extra_spec={
|
||||
'hw:cpu_realtime': 'yes',
|
||||
'hw:cpu_policy': 'dedicated',
|
||||
'hw:cpu_realtime_mask': '^1'})
|
||||
server = self._build_server(flavor)
|
||||
created = self.api.post_server({'server': server})
|
||||
|
||||
instance = self.api.get_server(created['id'])
|
||||
instance = self._wait_for_state_change(instance, 'BUILD')
|
||||
|
||||
self.assertEqual('ACTIVE', instance['status'])
|
||||
self._delete_server(instance['id'])
|
|
@ -1133,7 +1133,7 @@ object_data = {
|
|||
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
|
||||
'HVSpec': '1.2-db672e73304da86139086d003f3977e7',
|
||||
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
|
||||
'ImageMetaProps': '1.9-cfeffdf93c0ec4745d66dc286559db91',
|
||||
'ImageMetaProps': '1.10-0f1c0f7d7d4cca0facd47524633ca9d1',
|
||||
'Instance': '2.0-ff56804dce87d81d9a04834d4bd1e3d2',
|
||||
'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914',
|
||||
'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33',
|
||||
|
|
|
@ -1469,7 +1469,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
|
|||
drvr, "_get_host_numa_topology",
|
||||
return_value=host_topology):
|
||||
return drvr._get_guest_memory_backing_config(
|
||||
inst_topology, numatune)
|
||||
inst_topology, numatune, {})
|
||||
|
||||
@mock.patch.object(host.Host,
|
||||
'has_min_version', return_value=True)
|
||||
|
@ -1527,6 +1527,17 @@ class LibvirtConnTestCase(test.NoDBTestCase):
|
|||
host_topology, inst_topology, numa_tune)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_get_guest_memory_backing_config_realtime(self):
|
||||
flavor = {"extra_specs": {
|
||||
"hw:cpu_realtime": "yes",
|
||||
"hw:cpu_policy": "dedicated"
|
||||
}}
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
membacking = drvr._get_guest_memory_backing_config(
|
||||
None, None, flavor)
|
||||
self.assertTrue(membacking.locked)
|
||||
self.assertFalse(membacking.sharedpages)
|
||||
|
||||
@mock.patch.object(
|
||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
||||
def test_get_guest_config_numa_host_instance_pci_no_numa_info(
|
||||
|
@ -2161,6 +2172,85 @@ class LibvirtConnTestCase(test.NoDBTestCase):
|
|||
self.assertEqual([instance_cell.id], memnode.nodeset)
|
||||
self.assertEqual("strict", memnode.mode)
|
||||
|
||||
self.assertEqual(0, len(cfg.cputune.vcpusched))
|
||||
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
|
||||
|
||||
def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self):
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
cells=[
|
||||
objects.InstanceNUMACell(
|
||||
id=1, cpuset=set([0, 1]),
|
||||
memory=1024, pagesize=2048),
|
||||
objects.InstanceNUMACell(
|
||||
id=2, cpuset=set([2, 3]),
|
||||
memory=1024, pagesize=2048)])
|
||||
instance_ref = objects.Instance(**self.test_instance)
|
||||
instance_ref.numa_topology = instance_topology
|
||||
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
||||
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
|
||||
ephemeral_gb=8128, swap=33550336, name='fake',
|
||||
extra_specs={
|
||||
"hw:cpu_realtime": "yes",
|
||||
"hw:cpu_policy": "dedicated",
|
||||
"hw:cpu_realtime_mask": "^0-1"
|
||||
})
|
||||
instance_ref.flavor = flavor
|
||||
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
caps.host.cpu.arch = "x86_64"
|
||||
caps.host.topology = self._fake_caps_numa_topology()
|
||||
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
||||
instance_ref,
|
||||
image_meta)
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(
|
||||
objects.InstanceNUMATopology, "get_by_instance_uuid",
|
||||
return_value=instance_topology),
|
||||
mock.patch.object(host.Host, 'has_min_version',
|
||||
return_value=True),
|
||||
mock.patch.object(host.Host, "get_capabilities",
|
||||
return_value=caps),
|
||||
mock.patch.object(
|
||||
hardware, 'get_vcpu_pin_set',
|
||||
return_value=set([2, 3, 4, 5])),
|
||||
mock.patch.object(host.Host, 'get_online_cpus',
|
||||
return_value=set(range(8))),
|
||||
):
|
||||
cfg = drvr._get_guest_config(instance_ref, [],
|
||||
image_meta, disk_info)
|
||||
|
||||
for instance_cell, numa_cfg_cell, index in zip(
|
||||
instance_topology.cells,
|
||||
cfg.cpu.numa.cells,
|
||||
range(len(instance_topology.cells))):
|
||||
self.assertEqual(index, numa_cfg_cell.id)
|
||||
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
|
||||
self.assertEqual(instance_cell.memory * units.Ki,
|
||||
numa_cfg_cell.memory)
|
||||
self.assertEqual("shared", numa_cfg_cell.memAccess)
|
||||
|
||||
allnodes = [cell.id for cell in instance_topology.cells]
|
||||
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
|
||||
self.assertEqual("strict", cfg.numatune.memory.mode)
|
||||
|
||||
for instance_cell, memnode, index in zip(
|
||||
instance_topology.cells,
|
||||
cfg.numatune.memnodes,
|
||||
range(len(instance_topology.cells))):
|
||||
self.assertEqual(index, memnode.cellid)
|
||||
self.assertEqual([instance_cell.id], memnode.nodeset)
|
||||
self.assertEqual("strict", memnode.mode)
|
||||
|
||||
self.assertEqual(1, len(cfg.cputune.vcpusched))
|
||||
self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler)
|
||||
self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus)
|
||||
self.assertEqual(set([0, 1]), cfg.cputune.emulatorpin.cpuset)
|
||||
|
||||
def test_get_cpu_numa_config_from_instance(self):
|
||||
topology = objects.InstanceNUMATopology(cells=[
|
||||
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
|
||||
|
|
|
@ -2329,3 +2329,34 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
|
|||
self.assertRaises(exception.CPUPinningInvalid,
|
||||
hw.numa_usage_from_instances, host_pin,
|
||||
[inst_pin_1, inst_pin_2])
|
||||
|
||||
|
||||
class CPURealtimeTestCase(test.NoDBTestCase):
|
||||
def test_success_flavor(self):
|
||||
flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^1"}}
|
||||
image = objects.ImageMeta.from_dict({})
|
||||
rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image)
|
||||
self.assertEqual(set([0, 2]), rt)
|
||||
self.assertEqual(set([1]), em)
|
||||
|
||||
def test_success_image(self):
|
||||
flavor = {"extra_specs": {}}
|
||||
image = objects.ImageMeta.from_dict(
|
||||
{"properties": {"hw_cpu_realtime_mask": "^0-1"}})
|
||||
rt, em = hw.vcpus_realtime_topology(set([0, 1, 2]), flavor, image)
|
||||
self.assertEqual(set([2]), rt)
|
||||
self.assertEqual(set([0, 1]), em)
|
||||
|
||||
def test_no_mask_configured(self):
|
||||
flavor = {"extra_specs": {}}
|
||||
image = objects.ImageMeta.from_dict({"properties": {}})
|
||||
self.assertRaises(
|
||||
exception.RealtimeMaskNotFoundOrInvalid,
|
||||
hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image)
|
||||
|
||||
def test_mask_badly_configured(self):
|
||||
flavor = {"extra_specs": {"hw:cpu_realtime_mask": "^0-2"}}
|
||||
image = objects.ImageMeta.from_dict({"properties": {}})
|
||||
self.assertRaises(
|
||||
exception.RealtimeMaskNotFoundOrInvalid,
|
||||
hw.vcpus_realtime_topology, set([0, 1, 2]), flavor, image)
|
||||
|
|
|
@ -983,6 +983,29 @@ def is_realtime_enabled(flavor):
|
|||
return strutils.bool_from_string(flavor_rt)
|
||||
|
||||
|
||||
def vcpus_realtime_topology(vcpus_set, flavor, image):
|
||||
"""Partitions vcpus used for realtime and 'normal' vcpus.
|
||||
|
||||
According to a mask specified from flavor or image, returns set of
|
||||
vcpus configured for realtime scheduler and set running as a
|
||||
'normal' vcpus.
|
||||
"""
|
||||
flavor_mask = flavor.get('extra_specs', {}).get("hw:cpu_realtime_mask")
|
||||
image_mask = image.properties.get("hw_cpu_realtime_mask")
|
||||
|
||||
mask = image_mask or flavor_mask
|
||||
if not mask:
|
||||
raise exception.RealtimeMaskNotFoundOrInvalid()
|
||||
|
||||
vcpus_spec = format_cpu_spec(vcpus_set)
|
||||
vcpus_rt = parse_cpu_spec(vcpus_spec + ", " + mask)
|
||||
vcpus_em = vcpus_set - vcpus_rt
|
||||
if len(vcpus_rt) < 1 or len(vcpus_em) < 1:
|
||||
raise exception.RealtimeMaskNotFoundOrInvalid()
|
||||
|
||||
return vcpus_rt, vcpus_em
|
||||
|
||||
|
||||
def _numa_get_constraints_auto(nodes, flavor):
|
||||
if ((flavor.vcpus % nodes) > 0 or
|
||||
(flavor.memory_mb % nodes) > 0):
|
||||
|
|
|
@ -271,7 +271,12 @@ libvirt_opts = [
|
|||
default=[],
|
||||
help='List of guid targets and ranges.'
|
||||
'Syntax is guest-gid:host-gid:count'
|
||||
'Maximum of 5 allowed.')
|
||||
'Maximum of 5 allowed.'),
|
||||
cfg.IntOpt('realtime_scheduler_priority',
|
||||
default=1,
|
||||
help='In a realtime host context vCPUs for guest will run in '
|
||||
'that scheduling priority. Priority depends on the host '
|
||||
'kernel (usually 1-99)')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
@ -430,6 +435,9 @@ MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION = (1, 3, 0)
|
|||
# Names of the types that do not get compressed during migration
|
||||
NO_COMPRESSION_TYPES = ('qcow2',)
|
||||
|
||||
# realtime suppport
|
||||
MIN_LIBVIRT_REALTIME_VERSION = (1, 2, 13)
|
||||
|
||||
|
||||
class LibvirtDriver(driver.ComputeDriver):
|
||||
capabilities = {
|
||||
|
@ -3629,7 +3637,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
return False
|
||||
|
||||
def _get_guest_numa_config(self, instance_numa_topology, flavor, pci_devs,
|
||||
allowed_cpus=None):
|
||||
allowed_cpus=None, image_meta=None):
|
||||
"""Returns the config objects for the guest NUMA specs.
|
||||
|
||||
Determines the CPUs that the guest can be pinned to if the guest
|
||||
|
@ -3745,6 +3753,23 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
# Sort the vcpupin list per vCPU id for human-friendlier XML
|
||||
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
|
||||
|
||||
if hardware.is_realtime_enabled(flavor):
|
||||
if not self._host.has_min_version(
|
||||
MIN_LIBVIRT_REALTIME_VERSION):
|
||||
raise exception.RealtimePolicyNotSupported()
|
||||
|
||||
vcpus_rt, vcpus_em = hardware.vcpus_realtime_topology(
|
||||
set(cpu.id for cpu in guest_cpu_tune.vcpupin),
|
||||
flavor, image_meta)
|
||||
|
||||
vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched()
|
||||
vcpusched.vcpus = vcpus_rt
|
||||
vcpusched.scheduler = "fifo"
|
||||
vcpusched.priority = (
|
||||
CONF.libvirt.realtime_scheduler_priority)
|
||||
guest_cpu_tune.vcpusched.append(vcpusched)
|
||||
guest_cpu_tune.emulatorpin.cpuset = vcpus_em
|
||||
|
||||
guest_numa_tune.memory = numa_mem
|
||||
guest_numa_tune.memnodes = numa_memnodes
|
||||
|
||||
|
@ -3985,7 +4010,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
if rng_is_virtio and rng_allowed:
|
||||
self._add_rng_device(guest, flavor)
|
||||
|
||||
def _get_guest_memory_backing_config(self, inst_topology, numatune):
|
||||
def _get_guest_memory_backing_config(
|
||||
self, inst_topology, numatune, flavor):
|
||||
wantsmempages = False
|
||||
if inst_topology:
|
||||
for cell in inst_topology.cells:
|
||||
|
@ -3993,6 +4019,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
wantsmempages = True
|
||||
break
|
||||
|
||||
wantsrealtime = hardware.is_realtime_enabled(flavor)
|
||||
|
||||
membacking = None
|
||||
if wantsmempages:
|
||||
pages = self._get_memory_backing_hugepages_support(
|
||||
|
@ -4000,6 +4028,11 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
if pages:
|
||||
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
|
||||
membacking.hugepages = pages
|
||||
if wantsrealtime:
|
||||
if not membacking:
|
||||
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
|
||||
membacking.locked = True
|
||||
membacking.sharedpages = False
|
||||
|
||||
return membacking
|
||||
|
||||
|
@ -4186,7 +4219,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
|
||||
|
||||
guest_numa_config = self._get_guest_numa_config(
|
||||
instance.numa_topology, flavor, pci_devs, allowed_cpus)
|
||||
instance.numa_topology, flavor, pci_devs, allowed_cpus, image_meta)
|
||||
|
||||
guest.cpuset = guest_numa_config.cpuset
|
||||
guest.cputune = guest_numa_config.cputune
|
||||
|
@ -4194,7 +4227,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
|
||||
guest.membacking = self._get_guest_memory_backing_config(
|
||||
instance.numa_topology,
|
||||
guest_numa_config.numatune)
|
||||
guest_numa_config.numatune,
|
||||
flavor)
|
||||
|
||||
guest.metadata.append(self._get_guest_config_meta(context,
|
||||
instance))
|
||||
|
|
Loading…
Reference in New Issue