Use node's enable_cpu_pinning for scheduling
In before, the scheduler makes decisions based on CONF.enable_cpu_pinning in controller node. Toggle this config in controller node will either turn on/off cpu pinning for *all* compute nodes. This commit fixes it by reading enable_cpu_pinning from the database table. As a result, individual compute node can turn on/off cpu pinning. Change-Id: I466562138205d11054d3bc428247116160f337c8
This commit is contained in:
parent
6d880e6f15
commit
40b99c9d2f
|
@ -36,7 +36,7 @@ class CpuSetFilter(filters.BaseHostFilter):
|
|||
else:
|
||||
container_memory = int(container.memory)
|
||||
if container.cpu_policy == 'dedicated':
|
||||
if CONF.compute.enable_cpu_pinning:
|
||||
if host_state.enable_cpu_pinning:
|
||||
for numa_node in host_state.numa_topology.nodes:
|
||||
if len(numa_node.cpuset) - len(
|
||||
numa_node.pinned_cpus) >= container.cpu and \
|
||||
|
|
|
@ -42,6 +42,7 @@ class HostState(object):
|
|||
self.pci_stats = None
|
||||
self.disk_quota_supported = False
|
||||
self.runtimes = []
|
||||
self.enable_cpu_pinning = False
|
||||
|
||||
# Resource oversubscription values for the compute host:
|
||||
self.limits = {}
|
||||
|
@ -76,6 +77,7 @@ class HostState(object):
|
|||
stats=compute_node.pci_device_pools)
|
||||
self.disk_quota_supported = compute_node.disk_quota_supported
|
||||
self.runtimes = compute_node.runtimes
|
||||
self.enable_cpu_pinning = compute_node.enable_cpu_pinning
|
||||
|
||||
def __repr__(self):
|
||||
return ("%(host)s ram: %(free_ram)sMB "
|
||||
|
|
|
@ -29,7 +29,6 @@ class TestCpuSetFilter(base.TestCase):
|
|||
self.context = context.RequestContext('fake_user', 'fake_project')
|
||||
|
||||
def test_cpuset_filter_pass_dedicated(self):
|
||||
CONF.set_override('enable_cpu_pinning', True, 'compute')
|
||||
self.filt_cls = cpuset_filter.CpuSetFilter()
|
||||
container = objects.Container(self.context)
|
||||
container.cpu_policy = 'dedicated'
|
||||
|
@ -44,11 +43,11 @@ class TestCpuSetFilter(base.TestCase):
|
|||
objects.NUMANode(id=1, cpuset=set([4, 5, 6]), pinned_cpus=set([]),
|
||||
mem_total=32739, mem_available=32739)]
|
||||
)
|
||||
host.enable_cpu_pinning = True
|
||||
extra_spec = {}
|
||||
self.assertTrue(self.filt_cls.host_passes(host, container, extra_spec))
|
||||
|
||||
def test_cpuset_filter_fail_dedicated_1(self):
|
||||
CONF.set_override('enable_cpu_pinning', True, 'compute')
|
||||
self.filt_cls = cpuset_filter.CpuSetFilter()
|
||||
container = objects.Container(self.context)
|
||||
container.cpu_policy = 'dedicated'
|
||||
|
@ -63,12 +62,12 @@ class TestCpuSetFilter(base.TestCase):
|
|||
objects.NUMANode(id=1, cpuset=set([4, 5, 6]), pinned_cpus=set([]),
|
||||
mem_total=32739, mem_available=32739)]
|
||||
)
|
||||
host.enable_cpu_pinning = True
|
||||
extra_spec = {}
|
||||
self.assertFalse(self.filt_cls.host_passes(host,
|
||||
container, extra_spec))
|
||||
|
||||
def test_cpuset_filter_fail_dedicated_2(self):
|
||||
CONF.set_override('enable_cpu_pinning', False, 'compute')
|
||||
self.filt_cls = cpuset_filter.CpuSetFilter()
|
||||
container = objects.Container(self.context)
|
||||
container.cpu_policy = 'dedicated'
|
||||
|
@ -83,6 +82,7 @@ class TestCpuSetFilter(base.TestCase):
|
|||
objects.NUMANode(id=1, cpuset=set([4, 5, 6]), pinned_cpus=set([]),
|
||||
mem_total=32739, mem_available=32739)]
|
||||
)
|
||||
host.enable_cpu_pinning = False
|
||||
extra_spec = {}
|
||||
self.assertFalse(self.filt_cls.host_passes(host,
|
||||
container, extra_spec))
|
||||
|
|
|
@ -86,6 +86,7 @@ class FilterSchedulerTestCase(base.TestCase):
|
|||
node1.pci_device_pools = None
|
||||
node1.disk_quota_supported = True
|
||||
node1.runtimes = ['runc']
|
||||
node1.enable_cpu_pinning = False
|
||||
node2 = objects.ComputeNode(self.context)
|
||||
node2.cpus = 48
|
||||
node2.cpu_used = 0.0
|
||||
|
@ -101,6 +102,7 @@ class FilterSchedulerTestCase(base.TestCase):
|
|||
node2.pci_device_pools = None
|
||||
node2.disk_quota_supported = True
|
||||
node2.runtimes = ['runc']
|
||||
node2.enable_cpu_pinning = False
|
||||
node3 = objects.ComputeNode(self.context)
|
||||
node3.cpus = 48
|
||||
node3.cpu_used = 0.0
|
||||
|
@ -116,6 +118,7 @@ class FilterSchedulerTestCase(base.TestCase):
|
|||
node3.pci_device_pools = None
|
||||
node3.disk_quota_supported = True
|
||||
node3.runtimes = ['runc']
|
||||
node3.enable_cpu_pinning = False
|
||||
node4 = objects.ComputeNode(self.context)
|
||||
node4.cpus = 48
|
||||
node4.cpu_used = 0.0
|
||||
|
@ -131,6 +134,7 @@ class FilterSchedulerTestCase(base.TestCase):
|
|||
node4.pci_device_pools = None
|
||||
node4.disk_quota_supported = True
|
||||
node4.runtimes = ['runc']
|
||||
node4.enable_cpu_pinning = False
|
||||
nodes = [node1, node2, node3, node4]
|
||||
mock_compute_list.return_value = nodes
|
||||
|
||||
|
|
Loading…
Reference in New Issue