Merge "Have host look for CPU controller of cgroupsv2 location." into stable/xena
This commit is contained in:
commit
a3e1707eaf
71
nova/tests/fixtures/nova.py
vendored
71
nova/tests/fixtures/nova.py
vendored
@ -1264,6 +1264,77 @@ class PrivsepFixture(fixtures.Fixture):
|
|||||||
nova.privsep.sys_admin_pctxt, 'client_mode', False))
|
nova.privsep.sys_admin_pctxt, 'client_mode', False))
|
||||||
|
|
||||||
|
|
||||||
|
class CGroupsFixture(fixtures.Fixture):
|
||||||
|
"""Mocks checks made for available subsystems on the host's control group.
|
||||||
|
|
||||||
|
The fixture mocks all calls made on the host to verify the capabilities
|
||||||
|
provided by its kernel. Through this, one can simulate the underlying
|
||||||
|
system hosts work on top of and have tests react to expected outcomes from
|
||||||
|
such.
|
||||||
|
|
||||||
|
Use sample:
|
||||||
|
>>> cgroups = self.useFixture(CGroupsFixture())
|
||||||
|
>>> cgroups = self.useFixture(CGroupsFixture(version=2))
|
||||||
|
>>> cgroups = self.useFixture(CGroupsFixture())
|
||||||
|
... cgroups.version = 2
|
||||||
|
|
||||||
|
:attr version: Arranges mocks to simulate the host interact with nova
|
||||||
|
following the given version of cgroups.
|
||||||
|
Available values are:
|
||||||
|
- 0: All checks related to cgroups will return False.
|
||||||
|
- 1: Checks related to cgroups v1 will return True.
|
||||||
|
- 2: Checks related to cgroups v2 will return True.
|
||||||
|
Defaults to 1.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, version=1):
|
||||||
|
self._cpuv1 = None
|
||||||
|
self._cpuv2 = None
|
||||||
|
|
||||||
|
self._version = version
|
||||||
|
|
||||||
|
@property
|
||||||
|
def version(self):
|
||||||
|
return self._version
|
||||||
|
|
||||||
|
@version.setter
|
||||||
|
def version(self, value):
|
||||||
|
self._version = value
|
||||||
|
self._update_mocks()
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
self._cpuv1 = self.useFixture(fixtures.MockPatch(
|
||||||
|
'nova.virt.libvirt.host.Host._has_cgroupsv1_cpu_controller')).mock
|
||||||
|
self._cpuv2 = self.useFixture(fixtures.MockPatch(
|
||||||
|
'nova.virt.libvirt.host.Host._has_cgroupsv2_cpu_controller')).mock
|
||||||
|
self._update_mocks()
|
||||||
|
|
||||||
|
def _update_mocks(self):
|
||||||
|
if not self._cpuv1:
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self._cpuv2:
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.version == 0:
|
||||||
|
self._cpuv1.return_value = False
|
||||||
|
self._cpuv2.return_value = False
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.version == 1:
|
||||||
|
self._cpuv1.return_value = True
|
||||||
|
self._cpuv2.return_value = False
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.version == 2:
|
||||||
|
self._cpuv1.return_value = False
|
||||||
|
self._cpuv2.return_value = True
|
||||||
|
return
|
||||||
|
|
||||||
|
raise ValueError(f"Unknown cgroups version: '{self.version}'.")
|
||||||
|
|
||||||
|
|
||||||
class NoopQuotaDriverFixture(fixtures.Fixture):
|
class NoopQuotaDriverFixture(fixtures.Fixture):
|
||||||
"""A fixture to run tests using the NoopQuotaDriver.
|
"""A fixture to run tests using the NoopQuotaDriver.
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
|
|||||||
super(ServersTestBase, self).setUp()
|
super(ServersTestBase, self).setUp()
|
||||||
|
|
||||||
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
|
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
|
||||||
|
self.useFixture(nova_fixtures.CGroupsFixture())
|
||||||
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
|
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
|
||||||
self.useFixture(nova_fixtures.OSBrickFixture())
|
self.useFixture(nova_fixtures.OSBrickFixture())
|
||||||
|
|
||||||
|
@ -421,6 +421,7 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
|
|||||||
self.useFixture(nova_fixtures.NeutronFixture(self))
|
self.useFixture(nova_fixtures.NeutronFixture(self))
|
||||||
self.useFixture(nova_fixtures.GlanceFixture(self))
|
self.useFixture(nova_fixtures.GlanceFixture(self))
|
||||||
self.useFixture(func_fixtures.PlacementFixture())
|
self.useFixture(func_fixtures.PlacementFixture())
|
||||||
|
self.useFixture(nova_fixtures.CGroupsFixture())
|
||||||
fake_network.set_stub_network_methods(self)
|
fake_network.set_stub_network_methods(self)
|
||||||
|
|
||||||
api_fixture = self.useFixture(
|
api_fixture = self.useFixture(
|
||||||
|
@ -75,6 +75,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
|
|||||||
'nova.privsep.libvirt.get_pmem_namespaces',
|
'nova.privsep.libvirt.get_pmem_namespaces',
|
||||||
return_value=self.fake_pmem_namespaces))
|
return_value=self.fake_pmem_namespaces))
|
||||||
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
|
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
|
||||||
|
self.useFixture(nova_fixtures.CGroupsFixture())
|
||||||
self.useFixture(fixtures.MockPatch(
|
self.useFixture(fixtures.MockPatch(
|
||||||
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
|
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
|
||||||
return_value={'total': 128,
|
return_value={'total': 128,
|
||||||
|
@ -47,6 +47,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
|
|||||||
'nova.virt.libvirt.guest.libvirt',
|
'nova.virt.libvirt.guest.libvirt',
|
||||||
fakelibvirt))
|
fakelibvirt))
|
||||||
self.useFixture(nova_fixtures.LibvirtFixture())
|
self.useFixture(nova_fixtures.LibvirtFixture())
|
||||||
|
self.useFixture(nova_fixtures.CGroupsFixture())
|
||||||
|
|
||||||
self.admin_api = api_fixture.admin_api
|
self.admin_api = api_fixture.admin_api
|
||||||
self.api = api_fixture.api
|
self.api = api_fixture.api
|
||||||
|
@ -741,6 +741,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
imagebackend.Image._get_driver_format)
|
imagebackend.Image._get_driver_format)
|
||||||
|
|
||||||
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
|
self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
|
||||||
|
self.cgroups = self.useFixture(nova_fixtures.CGroupsFixture())
|
||||||
|
|
||||||
# ensure tests perform the same on all host architectures; this is
|
# ensure tests perform the same on all host architectures; this is
|
||||||
# already done by the fakelibvirt fixture but we want to change the
|
# already done by the fakelibvirt fixture but we want to change the
|
||||||
@ -2922,9 +2923,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
'fake-instance-numa-topology',
|
'fake-instance-numa-topology',
|
||||||
'fake-flavor', 'fake-image-meta').obj_to_primitive())
|
'fake-flavor', 'fake-image-meta').obj_to_primitive())
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_get_guest_config_numa_host_instance_fits(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_numa_host_instance_fits(self, is_able):
|
|
||||||
self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
|
self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
|
||||||
group='compute')
|
group='compute')
|
||||||
instance_ref = objects.Instance(**self.test_instance)
|
instance_ref = objects.Instance(**self.test_instance)
|
||||||
@ -2961,9 +2960,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
|
|
||||||
@mock.patch('nova.privsep.utils.supports_direct_io',
|
@mock.patch('nova.privsep.utils.supports_direct_io',
|
||||||
new=mock.Mock(return_value=True))
|
new=mock.Mock(return_value=True))
|
||||||
@mock.patch.object(
|
def test_get_guest_config_numa_host_instance_no_fit(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
|
|
||||||
instance_ref = objects.Instance(**self.test_instance)
|
instance_ref = objects.Instance(**self.test_instance)
|
||||||
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
|
||||||
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
|
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
|
||||||
@ -3354,10 +3351,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
self._test_get_guest_memory_backing_config,
|
self._test_get_guest_memory_backing_config,
|
||||||
host_topology, inst_topology, numa_tune)
|
host_topology, inst_topology, numa_tune)
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_numa_host_instance_pci_no_numa_info(
|
|
||||||
self, is_able):
|
|
||||||
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
|
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
|
||||||
group='compute')
|
group='compute')
|
||||||
|
|
||||||
@ -3406,9 +3400,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
|
|
||||||
@mock.patch('nova.privsep.utils.supports_direct_io',
|
@mock.patch('nova.privsep.utils.supports_direct_io',
|
||||||
new=mock.Mock(return_value=True))
|
new=mock.Mock(return_value=True))
|
||||||
@mock.patch.object(
|
def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able):
|
|
||||||
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
|
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
|
||||||
group='compute')
|
group='compute')
|
||||||
instance_ref = objects.Instance(**self.test_instance)
|
instance_ref = objects.Instance(**self.test_instance)
|
||||||
@ -3516,10 +3508,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
exception.NUMATopologyUnsupported,
|
exception.NUMATopologyUnsupported,
|
||||||
None)
|
None)
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
|
|
||||||
self, is_able):
|
|
||||||
self.flags(cpu_shared_set='2-3', cpu_dedicated_set=None,
|
self.flags(cpu_shared_set='2-3', cpu_dedicated_set=None,
|
||||||
group='compute')
|
group='compute')
|
||||||
|
|
||||||
@ -3557,9 +3546,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
self.assertEqual(0, len(cfg.cputune.vcpupin))
|
self.assertEqual(0, len(cfg.cputune.vcpupin))
|
||||||
self.assertIsNone(cfg.cpu.numa)
|
self.assertIsNone(cfg.cpu.numa)
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_get_guest_config_non_numa_host_instance_topo(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_non_numa_host_instance_topo(self, is_able):
|
|
||||||
instance_topology = objects.InstanceNUMATopology(cells=[
|
instance_topology = objects.InstanceNUMATopology(cells=[
|
||||||
objects.InstanceNUMACell(
|
objects.InstanceNUMACell(
|
||||||
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
|
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
|
||||||
@ -3606,9 +3593,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
self.assertEqual(instance_cell.memory * units.Ki,
|
self.assertEqual(instance_cell.memory * units.Ki,
|
||||||
numa_cfg_cell.memory)
|
numa_cfg_cell.memory)
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_get_guest_config_numa_host_instance_topo(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_numa_host_instance_topo(self, is_able):
|
|
||||||
self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
|
self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
|
||||||
group='compute')
|
group='compute')
|
||||||
|
|
||||||
@ -6992,9 +6977,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
[],
|
[],
|
||||||
image_meta, disk_info)
|
image_meta, disk_info)
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_guest_cpu_shares_with_multi_vcpu(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_guest_cpu_shares_with_multi_vcpu(self, is_able):
|
|
||||||
self.flags(virt_type='kvm', group='libvirt')
|
self.flags(virt_type='kvm', group='libvirt')
|
||||||
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||||
@ -7012,9 +6995,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
|
|
||||||
self.assertEqual(4096, cfg.cputune.shares)
|
self.assertEqual(4096, cfg.cputune.shares)
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_get_guest_config_with_cpu_quota(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_with_cpu_quota(self, is_able):
|
|
||||||
self.flags(virt_type='kvm', group='libvirt')
|
self.flags(virt_type='kvm', group='libvirt')
|
||||||
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||||
@ -7350,9 +7331,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
self.flags(images_type='rbd', group='libvirt')
|
self.flags(images_type='rbd', group='libvirt')
|
||||||
self._test_get_guest_config_disk_cachemodes('rbd')
|
self._test_get_guest_config_disk_cachemodes('rbd')
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_get_guest_config_with_bogus_cpu_quota(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=True)
|
|
||||||
def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
|
|
||||||
self.flags(virt_type='kvm', group='libvirt')
|
self.flags(virt_type='kvm', group='libvirt')
|
||||||
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||||
@ -7370,9 +7349,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||||||
drvr._get_guest_config,
|
drvr._get_guest_config,
|
||||||
instance_ref, [], image_meta, disk_info)
|
instance_ref, [], image_meta, disk_info)
|
||||||
|
|
||||||
@mock.patch.object(
|
def test_get_update_guest_cputune(self):
|
||||||
host.Host, "is_cpu_control_policy_capable", return_value=False)
|
# No CPU controller on the host
|
||||||
def test_get_update_guest_cputune(self, is_able):
|
self.cgroups.version = 0
|
||||||
|
|
||||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||||
instance_ref = objects.Instance(**self.test_instance)
|
instance_ref = objects.Instance(**self.test_instance)
|
||||||
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
|
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
|
||||||
@ -21504,6 +21484,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
|
|||||||
self.flags(sysinfo_serial="none", group="libvirt")
|
self.flags(sysinfo_serial="none", group="libvirt")
|
||||||
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
|
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
|
||||||
self.useFixture(nova_fixtures.LibvirtFixture())
|
self.useFixture(nova_fixtures.LibvirtFixture())
|
||||||
|
self.useFixture(nova_fixtures.CGroupsFixture())
|
||||||
os_vif.initialize()
|
os_vif.initialize()
|
||||||
|
|
||||||
self.drvr = libvirt_driver.LibvirtDriver(
|
self.drvr = libvirt_driver.LibvirtDriver(
|
||||||
|
@ -1475,25 +1475,59 @@ Active: 8381604 kB
|
|||||||
self.host.compare_cpu("cpuxml")
|
self.host.compare_cpu("cpuxml")
|
||||||
mock_compareCPU.assert_called_once_with("cpuxml", 0)
|
mock_compareCPU.assert_called_once_with("cpuxml", 0)
|
||||||
|
|
||||||
def test_is_cpu_control_policy_capable_ok(self):
|
def test_is_cpu_control_policy_capable_via_neither(self):
|
||||||
m = mock.mock_open(
|
self.useFixture(nova_fixtures.CGroupsFixture(version=0))
|
||||||
read_data="""cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0
|
self.assertFalse(self.host.is_cpu_control_policy_capable())
|
||||||
cg /cgroup/memory cg opt1,opt2 0 0
|
|
||||||
""")
|
def test_is_cpu_control_policy_capable_via_cgroupsv1(self):
|
||||||
with mock.patch('builtins.open', m, create=True):
|
self.useFixture(nova_fixtures.CGroupsFixture(version=1))
|
||||||
self.assertTrue(self.host.is_cpu_control_policy_capable())
|
self.assertTrue(self.host.is_cpu_control_policy_capable())
|
||||||
|
|
||||||
def test_is_cpu_control_policy_capable_ko(self):
|
def test_is_cpu_control_policy_capable_via_cgroupsv2(self):
|
||||||
m = mock.mock_open(
|
self.useFixture(nova_fixtures.CGroupsFixture(version=2))
|
||||||
read_data="""cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0
|
self.assertTrue(self.host.is_cpu_control_policy_capable())
|
||||||
cg /cgroup/memory cg opt1,opt2 0 0
|
|
||||||
""")
|
|
||||||
with mock.patch('builtins.open', m, create=True):
|
|
||||||
self.assertFalse(self.host.is_cpu_control_policy_capable())
|
|
||||||
|
|
||||||
@mock.patch('builtins.open', side_effect=IOError)
|
def test_has_cgroupsv1_cpu_controller_ok(self):
|
||||||
def test_is_cpu_control_policy_capable_ioerror(self, mock_open):
|
m = mock.mock_open(
|
||||||
self.assertFalse(self.host.is_cpu_control_policy_capable())
|
read_data=(
|
||||||
|
"cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0"
|
||||||
|
"cg /cgroup/memory cg opt1,opt2 0 0"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
with mock.patch("builtins.open", m, create=True):
|
||||||
|
self.assertTrue(self.host._has_cgroupsv1_cpu_controller())
|
||||||
|
|
||||||
|
def test_has_cgroupsv1_cpu_controller_ko(self):
|
||||||
|
m = mock.mock_open(
|
||||||
|
read_data=(
|
||||||
|
"cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0"
|
||||||
|
"cg /cgroup/memory cg opt1,opt2 0 0"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
with mock.patch("builtins.open", m, create=True):
|
||||||
|
self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
|
||||||
|
|
||||||
|
@mock.patch("builtins.open", side_effect=IOError)
|
||||||
|
def test_has_cgroupsv1_cpu_controller_ioerror(self, _):
|
||||||
|
self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
|
||||||
|
|
||||||
|
def test_has_cgroupsv2_cpu_controller_ok(self):
|
||||||
|
m = mock.mock_open(
|
||||||
|
read_data="cpuset cpu io memory hugetlb pids rdma misc"
|
||||||
|
)
|
||||||
|
with mock.patch("builtins.open", m, create=True):
|
||||||
|
self.assertTrue(self.host._has_cgroupsv2_cpu_controller())
|
||||||
|
|
||||||
|
def test_has_cgroupsv2_cpu_controller_ko(self):
|
||||||
|
m = mock.mock_open(
|
||||||
|
read_data="memory pids"
|
||||||
|
)
|
||||||
|
with mock.patch("builtins.open", m, create=True):
|
||||||
|
self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
|
||||||
|
|
||||||
|
@mock.patch("builtins.open", side_effect=IOError)
|
||||||
|
def test_has_cgroupsv2_cpu_controller_ioerror(self, _):
|
||||||
|
self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
|
||||||
|
|
||||||
def test_get_canonical_machine_type(self):
|
def test_get_canonical_machine_type(self):
|
||||||
# this test relies on configuration from the FakeLibvirtFixture
|
# this test relies on configuration from the FakeLibvirtFixture
|
||||||
|
@ -831,6 +831,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
|
|||||||
# This is needed for the live migration tests which spawn off the
|
# This is needed for the live migration tests which spawn off the
|
||||||
# operation for monitoring.
|
# operation for monitoring.
|
||||||
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
|
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
|
||||||
|
self.useFixture(nova_fixtures.CGroupsFixture())
|
||||||
# When destroying an instance, os-vif will try to execute some commands
|
# When destroying an instance, os-vif will try to execute some commands
|
||||||
# which hang tests so let's just stub out the unplug call to os-vif
|
# which hang tests so let's just stub out the unplug call to os-vif
|
||||||
# since we don't care about it.
|
# since we don't care about it.
|
||||||
|
@ -1462,15 +1462,44 @@ class Host(object):
|
|||||||
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
|
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
|
||||||
improve scheduler latency.
|
improve scheduler latency.
|
||||||
"""
|
"""
|
||||||
|
return self._has_cgroupsv1_cpu_controller() or \
|
||||||
|
self._has_cgroupsv2_cpu_controller()
|
||||||
|
|
||||||
|
def _has_cgroupsv1_cpu_controller(self):
|
||||||
|
LOG.debug(f"Searching host: '{self.get_hostname()}' "
|
||||||
|
"for CPU controller through CGroups V1...")
|
||||||
try:
|
try:
|
||||||
with open("/proc/self/mounts", "r") as fd:
|
with open("/proc/self/mounts", "r") as fd:
|
||||||
for line in fd.readlines():
|
for line in fd.readlines():
|
||||||
# mount options and split options
|
# mount options and split options
|
||||||
bits = line.split()[3].split(",")
|
bits = line.split()[3].split(",")
|
||||||
if "cpu" in bits:
|
if "cpu" in bits:
|
||||||
|
LOG.debug("CPU controller found on host.")
|
||||||
return True
|
return True
|
||||||
|
LOG.debug("CPU controller missing on host.")
|
||||||
return False
|
return False
|
||||||
except IOError:
|
except IOError as ex:
|
||||||
|
LOG.debug(f"Search failed due to: '{ex}'. "
|
||||||
|
"Maybe the host is not running under CGroups V1. "
|
||||||
|
"Deemed host to be missing controller by this approach.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _has_cgroupsv2_cpu_controller(self):
|
||||||
|
LOG.debug(f"Searching host: '{self.get_hostname()}' "
|
||||||
|
"for CPU controller through CGroups V2...")
|
||||||
|
try:
|
||||||
|
with open("/sys/fs/cgroup/cgroup.controllers", "r") as fd:
|
||||||
|
for line in fd.readlines():
|
||||||
|
bits = line.split()
|
||||||
|
if "cpu" in bits:
|
||||||
|
LOG.debug("CPU controller found on host.")
|
||||||
|
return True
|
||||||
|
LOG.debug("CPU controller missing on host.")
|
||||||
|
return False
|
||||||
|
except IOError as ex:
|
||||||
|
LOG.debug(f"Search failed due to: '{ex}'. "
|
||||||
|
"Maybe the host is not running under CGroups V2. "
|
||||||
|
"Deemed host to be missing controller by this approach.")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_canonical_machine_type(self, arch, machine) -> str:
|
def get_canonical_machine_type(self, arch, machine) -> str:
|
||||||
|
Loading…
Reference in New Issue
Block a user