Add default cpu model for AArch64
Unlike x86, AArch64 doesn't have a default model. Usually when using libvirt driver, set cpu mode to custom, nova will call libvirt to return the default models. But for aarch64, the support CPU models varies according to machine type. AArch64 use "virt" as the default machine type. In Qemu it support several models, and we should choose "max" as the by default one. Closes-Bug: #1864588 Change-Id: Ib2df50bda991a659fe10ef1dd9e7ab56800c34fb Signed-off-by: Kevin Zhao <kevin.zhao@linaro.org>
This commit is contained in:
parent
df49ad9b29
commit
5d4f82a15c
@ -1354,6 +1354,23 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
|
||||
|
||||
def test__check_cpu_compatibility_aarch64_qemu_custom_start_OK(self):
|
||||
"""Test getting CPU traits when using a virt_type that doesn't support
|
||||
the feature, only kvm and qemu supports reporting CPU traits.
|
||||
"""
|
||||
self.flags(cpu_mode='custom',
|
||||
cpu_models=['max'],
|
||||
virt_type='qemu',
|
||||
group='libvirt')
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
caps.host.cpu.arch = fields.Architecture.AARCH64
|
||||
with mock.patch.object(host.Host, "get_capabilities",
|
||||
return_value=caps):
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
drvr.init_host("dummyhost")
|
||||
|
||||
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
||||
def test_check_cpu_set_configuration__no_configuration(self, mock_log):
|
||||
"""Test that configuring no CPU option results no errors or logs.
|
||||
@ -7457,6 +7474,36 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
self.assertEqual(conf.cpu.cores, 1)
|
||||
self.assertEqual(conf.cpu.threads, 1)
|
||||
|
||||
def test_get_guest_cpu_config_qemu_custom_aarch64(self):
|
||||
self.flags(cpu_mode="custom", group='libvirt',
|
||||
cpu_models=["max"])
|
||||
expected = {
|
||||
fields.Architecture.AARCH64: "custom",
|
||||
}
|
||||
|
||||
for guestarch, expect_mode in expected.items():
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
caps.host.cpu.arch = guestarch
|
||||
with mock.patch.object(host.Host, "get_capabilities",
|
||||
return_value=caps):
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
if caps.host.cpu.arch == fields.Architecture.AARCH64:
|
||||
drvr._has_uefi_support = mock.Mock(return_value=True)
|
||||
instance_ref = objects.Instance(**self.test_instance)
|
||||
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
||||
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
||||
instance_ref,
|
||||
image_meta)
|
||||
conf = drvr._get_guest_config(instance_ref,
|
||||
_fake_network_info(self),
|
||||
image_meta, disk_info)
|
||||
self.assertIsInstance(conf.cpu,
|
||||
vconfig.LibvirtConfigGuestCPU)
|
||||
self.assertEqual(conf.cpu.mode, expect_mode)
|
||||
|
||||
@mock.patch.object(libvirt_driver.LOG, 'warning')
|
||||
def test_get_guest_cpu_config_custom_with_extra_flags(self,
|
||||
mock_warn):
|
||||
@ -19678,6 +19725,35 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
vpmem_amount += 1
|
||||
self.assertEqual(2, vpmem_amount)
|
||||
|
||||
@mock.patch.object(host.Host, "get_capabilities")
|
||||
def test_get_cpu_model_mapping(self, mock_cap):
|
||||
expected = {
|
||||
fields.Architecture.X86_64: ["Haswell", "IvyBridge"],
|
||||
fields.Architecture.I686: ["Haswell"],
|
||||
fields.Architecture.PPC: ["601_v1"],
|
||||
fields.Architecture.PPC64: ["power7"],
|
||||
fields.Architecture.PPC64LE: ["power8"],
|
||||
fields.Architecture.AARCH64: None,
|
||||
}
|
||||
for guestarch, expect_model in expected.items():
|
||||
if guestarch == fields.Architecture.AARCH64:
|
||||
self.flags(cpu_models="max", group='libvirt')
|
||||
caps = vconfig.LibvirtConfigCaps()
|
||||
caps.host = vconfig.LibvirtConfigCapsHost()
|
||||
caps.host.cpu = vconfig.LibvirtConfigCPU()
|
||||
caps.host.cpu.arch = guestarch
|
||||
mock_cap.return_value = caps
|
||||
|
||||
with mock.patch.object(host.Host,
|
||||
"get_cpu_model_names",
|
||||
return_value=expect_model):
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
if guestarch == fields.Architecture.AARCH64:
|
||||
drvr._get_cpu_model_mapping(None)
|
||||
else:
|
||||
cpu_model = drvr._get_cpu_model_mapping(expect_model[0])
|
||||
self.assertEqual(cpu_model, expect_model[0])
|
||||
|
||||
|
||||
class TestGuestConfigSysinfoSerialOS(test.NoDBTestCase):
|
||||
def setUp(self):
|
||||
|
@ -743,12 +743,6 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
cpu = vconfig.LibvirtConfigGuestCPU()
|
||||
for model in models:
|
||||
cpu.model = self._get_cpu_model_mapping(model)
|
||||
if not cpu.model:
|
||||
msg = (_("Configured CPU model: %(model)s is not correct, "
|
||||
"or your host CPU arch does not suuport this "
|
||||
"model. Please correct your config and try "
|
||||
"again.") % {'model': model})
|
||||
raise exception.InvalidCPUInfo(msg)
|
||||
try:
|
||||
self._compare_cpu(cpu, self._get_cpu_info(), None)
|
||||
except exception.InvalidCPUInfo as e:
|
||||
@ -4255,14 +4249,32 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
name.
|
||||
|
||||
:param model: Case-insensitive CPU model name.
|
||||
:return: Case-sensitive CPU model name, or None(Only when configured
|
||||
CPU model name not correct)
|
||||
:return: It will validate and return the case-sensitive CPU model name
|
||||
if on a supported platform, otherwise it will just return
|
||||
what was provided
|
||||
:raises: exception.InvalidCPUInfo if the CPU model is not supported.
|
||||
"""
|
||||
cpu_info = self._get_cpu_info()
|
||||
if cpu_info['arch'] not in (fields.Architecture.I686,
|
||||
fields.Architecture.X86_64,
|
||||
fields.Architecture.PPC64,
|
||||
fields.Architecture.PPC64LE,
|
||||
fields.Architecture.PPC):
|
||||
return model
|
||||
|
||||
if not self.cpu_models_mapping:
|
||||
cpu_models = self._host.get_cpu_model_names()
|
||||
for cpu_model in cpu_models:
|
||||
self.cpu_models_mapping[cpu_model.lower()] = cpu_model
|
||||
return self.cpu_models_mapping.get(model.lower(), None)
|
||||
|
||||
if model.lower() not in self.cpu_models_mapping:
|
||||
msg = (_("Configured CPU model: %(model)s is not correct, "
|
||||
"or your host CPU arch does not support this "
|
||||
"model. Please correct your config and try "
|
||||
"again.") % {'model': model})
|
||||
raise exception.InvalidCPUInfo(msg)
|
||||
|
||||
return self.cpu_models_mapping.get(model.lower())
|
||||
|
||||
def _get_guest_cpu_model_config(self, flavor=None):
|
||||
mode = CONF.libvirt.cpu_mode
|
||||
@ -4273,8 +4285,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
|
||||
if (CONF.libvirt.virt_type == "kvm" or
|
||||
CONF.libvirt.virt_type == "qemu"):
|
||||
caps = self._host.get_capabilities()
|
||||
if mode is None:
|
||||
caps = self._host.get_capabilities()
|
||||
# AArch64 lacks 'host-model' support because neither libvirt
|
||||
# nor QEMU are able to tell what the host CPU model exactly is.
|
||||
# And there is no CPU description code for ARM(64) at this
|
||||
@ -4293,6 +4305,13 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
mode = "host-model"
|
||||
if mode == "none":
|
||||
return vconfig.LibvirtConfigGuestCPU()
|
||||
# On AArch64 platform the return of _get_cpu_model_mapping will not
|
||||
# return the default CPU model.
|
||||
if mode == "custom":
|
||||
if caps.host.cpu.arch == fields.Architecture.AARCH64:
|
||||
if not models:
|
||||
models = ['max']
|
||||
|
||||
else:
|
||||
if mode is None or mode == "none":
|
||||
return None
|
||||
@ -10529,6 +10548,11 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
else:
|
||||
models = [self._get_cpu_model_mapping(model)
|
||||
for model in CONF.libvirt.cpu_models]
|
||||
|
||||
# Aarch64 platform doesn't return the default CPU models
|
||||
if caps.host.cpu.arch == fields.Architecture.AARCH64:
|
||||
if not models:
|
||||
models = ['max']
|
||||
# For custom mode, iterate through cpu models
|
||||
for model in models:
|
||||
caps.host.cpu.model = model
|
||||
|
@ -537,6 +537,10 @@ def get_cpu_model_from_arch(arch):
|
||||
mode = 'qemu32'
|
||||
elif arch == obj_fields.Architecture.PPC64LE:
|
||||
mode = 'POWER8'
|
||||
# NOTE(kevinz): In aarch64, cpu model 'max' will offer the capabilities
|
||||
# that all the stuff it can currently emulate, both for "TCG" and "KVM"
|
||||
elif arch == obj_fields.Architecture.AARCH64:
|
||||
mode = 'max'
|
||||
return mode
|
||||
|
||||
|
||||
|
6
releasenotes/notes/bug-1864588-737c29560effd16e.yaml
Normal file
6
releasenotes/notes/bug-1864588-737c29560effd16e.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
For AArch64 Nova now sets ``max`` as the default CPU model. It does the
|
||||
right thing in context of both QEMU TCG (plain emulation) and for KVM
|
||||
(hardware acceleration).
|
Loading…
x
Reference in New Issue
Block a user