Update HostManager and DiskFilter to use ComputeNode disk ratio

Since the ComputeNode object provides a compatible facade for the Scheduler,
we can get it from the HostManager and provide this field up to the filter.

As the HostManager is calling ComputeNodeList.get_all(), then the related HostStates
will all have the scheduler nova.conf allocation ratios.

Change-Id: Idf0202d53239862c208ad2d68167c30086b6024f
Partially-Implements: blueprint disk-allocation-ratio-to-rt
This commit is contained in:
Sylvain Bauza
2016-02-09 22:52:30 +01:00
parent 5d97e62d19
commit 1d92ef40fd
9 changed files with 46 additions and 28 deletions

View File

@@ -29,7 +29,7 @@ class DiskFilter(filters.BaseHostFilter):
"""Disk Filter with over subscription flag."""
def _get_disk_allocation_ratio(self, host_state, spec_obj):
return CONF.disk_allocation_ratio
return host_state.disk_allocation_ratio
def host_passes(self, host_state, spec_obj):
"""Filter based on disk usage."""
@@ -73,9 +73,10 @@ class AggregateDiskFilter(DiskFilter):
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
aggregate_vals, CONF.disk_allocation_ratio, cast_to=float)
aggregate_vals, host_state.disk_allocation_ratio,
cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
ratio = CONF.disk_allocation_ratio
ratio = host_state.disk_allocation_ratio
return ratio

View File

@@ -150,6 +150,7 @@ class HostState(object):
# Allocation ratios for this host
self.ram_allocation_ratio = None
self.cpu_allocation_ratio = None
self.disk_allocation_ratio = None
self.updated = None
@@ -241,6 +242,7 @@ class HostState(object):
# update allocation ratios given by the ComputeNode object
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
self.ram_allocation_ratio = compute.ram_allocation_ratio
self.disk_allocation_ratio = compute.disk_allocation_ratio
def consume_from_request(self, spec_obj):
"""Incrementally update host state from a RequestSpec object."""

View File

@@ -59,6 +59,7 @@ class IronicNodeState(host_manager.HostState):
# update allocation ratios given by the ComputeNode object
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
self.ram_allocation_ratio = compute.ram_allocation_ratio
self.disk_allocation_ratio = compute.disk_allocation_ratio
self.updated = compute.updated_at

View File

@@ -52,7 +52,8 @@ COMPUTE_NODES = [
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
@@ -61,7 +62,8 @@ COMPUTE_NODES = [
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
@@ -70,7 +72,8 @@ COMPUTE_NODES = [
hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(),
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
@@ -79,7 +82,8 @@ COMPUTE_NODES = [
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
# Broken entry
objects.ComputeNode(
id=5, local_gb=1024, memory_mb=1024, vcpus=1,

View File

@@ -24,58 +24,58 @@ class TestDiskFilter(test.NoDBTestCase):
super(TestDiskFilter, self).setUp()
def test_disk_filter_passes(self):
self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=1, ephemeral_gb=1, swap=512))
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'disk_allocation_ratio': 1.0})
self.assertTrue(filt_cls.host_passes(host, spec_obj))
def test_disk_filter_fails(self):
self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(
root_gb=10, ephemeral_gb=1, swap=1024))
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'disk_allocation_ratio': 1.0})
self.assertFalse(filt_cls.host_passes(host, spec_obj))
def test_disk_filter_oversubscribe(self):
self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(
root_gb=100, ephemeral_gb=18, swap=1024))
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'disk_allocation_ratio': 10.0})
self.assertTrue(filt_cls.host_passes(host, spec_obj))
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
def test_disk_filter_oversubscribe_fail(self):
self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(
root_gb=100, ephemeral_gb=19, swap=1024))
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'disk_allocation_ratio': 10.0})
self.assertFalse(filt_cls.host_passes(host, spec_obj))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_value_error(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
self.flags(disk_allocation_ratio=1.0)
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(
root_gb=1, ephemeral_gb=1, swap=1024))
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
'total_usable_disk_gb': 1})
'total_usable_disk_gb': 1,
'disk_allocation_ratio': 1.0})
agg_mock.return_value = set(['XXX'])
self.assertTrue(filt_cls.host_passes(host, spec_obj))
agg_mock.assert_called_once_with(host, 'disk_allocation_ratio')
@@ -83,14 +83,14 @@ class TestDiskFilter(test.NoDBTestCase):
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_default_value(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
self.flags(disk_allocation_ratio=1.0)
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(
root_gb=2, ephemeral_gb=1, swap=1024))
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
'total_usable_disk_gb': 1})
'total_usable_disk_gb': 1,
'disk_allocation_ratio': 1.0})
# Uses global conf.
agg_mock.return_value = set([])
self.assertFalse(filt_cls.host_passes(host, spec_obj))

View File

@@ -33,7 +33,8 @@ COMPUTE_NODES = [
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=10, free_ram_mb=1024,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
id=2, local_gb=20, memory_mb=2048, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
@@ -47,7 +48,8 @@ COMPUTE_NODES = [
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=20, free_ram_mb=2048,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
id=3, local_gb=30, memory_mb=3072, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
@@ -61,7 +63,8 @@ COMPUTE_NODES = [
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=30, free_ram_mb=3072,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
id=4, local_gb=40, memory_mb=4096, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
@@ -75,7 +78,8 @@ COMPUTE_NODES = [
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=40, free_ram_mb=4096,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
# Broken entry
objects.ComputeNode(
id=5, local_gb=50, memory_mb=5120, vcpus=1,

View File

@@ -145,6 +145,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
}
host_state.cpu_allocation_ratio = 16.0
host_state.ram_allocation_ratio = 1.5
host_state.disk_allocation_ratio = 1.0
host_state.metrics = objects.MonitorMetricList(objects=[])
return host_state

View File

@@ -832,7 +832,8 @@ class HostStateTestCase(test.NoDBTestCase):
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode")
host.update(compute=compute)
@@ -874,7 +875,8 @@ class HostStateTestCase(test.NoDBTestCase):
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode")
host.update(compute=compute)
@@ -906,7 +908,8 @@ class HostStateTestCase(test.NoDBTestCase):
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode")
host.update(compute=compute)
@@ -1066,7 +1069,8 @@ class HostStateTestCase(test.NoDBTestCase):
hypervisor_version=hyper_ver_int,
numa_topology=fakes.NUMA_TOPOLOGY._to_json(),
stats=None, pci_device_pools=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode")
host.update(compute=compute)

View File

@@ -116,7 +116,8 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
hypervisor_type='ironic',
hypervisor_version=1,
hypervisor_hostname='fake_host',
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
def test_create_ironic_node_state(self, init_mock):