Update HostManager and filters to use ComputeNode ratios
Since the ComputeNode object provides a compatible facade for the Scheduler, we can get it from the HostManager and provide those fields up to the filters. As the HostManager is calling ComputeNodeList.get_all(), then the related HostStates will all have the scheduler nova.conf allocation ratios. Change-Id: I3bd28cd2069ada2f9b0d1fd9c05d12bb6f8f75d9 Partially-Implements: blueprint allocation-ratio-to-resource-tracker
This commit is contained in:
parent
f4138f69cc
commit
1c195210eb
|
@ -15,7 +15,6 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from nova.i18n import _LW
|
||||
|
@ -24,12 +23,6 @@ from nova.scheduler.filters import utils
|
|||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
# TODO(sbauza): Remove the import once all compute nodes are reporting the
|
||||
# allocation ratio to the HostState
|
||||
CONF.import_opt('cpu_allocation_ratio', 'nova.compute.resource_tracker')
|
||||
|
||||
|
||||
class BaseCoreFilter(filters.BaseHostFilter):
|
||||
|
||||
|
@ -84,7 +77,7 @@ class CoreFilter(BaseCoreFilter):
|
|||
"""CoreFilter filters based on CPU core utilization."""
|
||||
|
||||
def _get_cpu_allocation_ratio(self, host_state, filter_properties):
|
||||
return CONF.cpu_allocation_ratio
|
||||
return host_state.cpu_allocation_ratio
|
||||
|
||||
|
||||
class AggregateCoreFilter(BaseCoreFilter):
|
||||
|
@ -99,9 +92,9 @@ class AggregateCoreFilter(BaseCoreFilter):
|
|||
'cpu_allocation_ratio')
|
||||
try:
|
||||
ratio = utils.validate_num_values(
|
||||
aggregate_vals, CONF.cpu_allocation_ratio, cast_to=float)
|
||||
aggregate_vals, host_state.cpu_allocation_ratio, cast_to=float)
|
||||
except ValueError as e:
|
||||
LOG.warning(_LW("Could not decode cpu_allocation_ratio: '%s'"), e)
|
||||
ratio = CONF.cpu_allocation_ratio
|
||||
ratio = host_state.cpu_allocation_ratio
|
||||
|
||||
return ratio
|
||||
|
|
|
@ -10,23 +10,17 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from nova import objects
|
||||
from nova.scheduler import filters
|
||||
from nova.virt import hardware
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('cpu_allocation_ratio', 'nova.scheduler.filters.core_filter')
|
||||
CONF.import_opt('ram_allocation_ratio', 'nova.scheduler.filters.ram_filter')
|
||||
|
||||
|
||||
class NUMATopologyFilter(filters.BaseHostFilter):
|
||||
"""Filter on requested NUMA topology."""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
ram_ratio = CONF.ram_allocation_ratio
|
||||
cpu_ratio = CONF.cpu_allocation_ratio
|
||||
ram_ratio = host_state.ram_allocation_ratio
|
||||
cpu_ratio = host_state.cpu_allocation_ratio
|
||||
request_spec = filter_properties.get('request_spec', {})
|
||||
instance = request_spec.get('instance_properties', {})
|
||||
requested_topology = hardware.instance_topology_from_instance(instance)
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from nova.i18n import _LW
|
||||
|
@ -23,12 +22,6 @@ from nova.scheduler.filters import utils
|
|||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
# TODO(sbauza): Remove the import once all compute nodes are reporting the
|
||||
# allocation ratio to the HostState
|
||||
CONF.import_opt('ram_allocation_ratio', 'nova.compute.resource_tracker')
|
||||
|
||||
|
||||
class BaseRamFilter(filters.BaseHostFilter):
|
||||
|
||||
|
@ -76,7 +69,7 @@ class RamFilter(BaseRamFilter):
|
|||
"""Ram Filter with over subscription flag."""
|
||||
|
||||
def _get_ram_allocation_ratio(self, host_state, filter_properties):
|
||||
return CONF.ram_allocation_ratio
|
||||
return host_state.ram_allocation_ratio
|
||||
|
||||
|
||||
class AggregateRamFilter(BaseRamFilter):
|
||||
|
@ -92,9 +85,9 @@ class AggregateRamFilter(BaseRamFilter):
|
|||
|
||||
try:
|
||||
ratio = utils.validate_num_values(
|
||||
aggregate_vals, CONF.ram_allocation_ratio, cast_to=float)
|
||||
aggregate_vals, host_state.ram_allocation_ratio, cast_to=float)
|
||||
except ValueError as e:
|
||||
LOG.warning(_LW("Could not decode ram_allocation_ratio: '%s'"), e)
|
||||
ratio = CONF.ram_allocation_ratio
|
||||
ratio = host_state.ram_allocation_ratio
|
||||
|
||||
return ratio
|
||||
|
|
|
@ -184,6 +184,10 @@ class HostState(object):
|
|||
# Instances on this host
|
||||
self.instances = {}
|
||||
|
||||
# Allocation ratios for this host
|
||||
self.ram_allocation_ratio = None
|
||||
self.cpu_allocation_ratio = None
|
||||
|
||||
self.updated = None
|
||||
if compute:
|
||||
self.update_from_compute_node(compute)
|
||||
|
@ -251,6 +255,10 @@ class HostState(object):
|
|||
# update metrics
|
||||
self.metrics = objects.MonitorMetricList.from_json(compute.metrics)
|
||||
|
||||
# update allocation ratios given by the ComputeNode object
|
||||
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
|
||||
self.ram_allocation_ratio = compute.ram_allocation_ratio
|
||||
|
||||
@set_update_time_on_success
|
||||
def consume_from_instance(self, instance):
|
||||
"""Incrementally update host state from an instance."""
|
||||
|
|
|
@ -81,6 +81,11 @@ class IronicNodeState(host_manager.HostState):
|
|||
in compute.supported_hv_specs]
|
||||
else:
|
||||
self.supported_instances = []
|
||||
|
||||
# update allocation ratios given by the ComputeNode object
|
||||
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
|
||||
self.ram_allocation_ratio = compute.ram_allocation_ratio
|
||||
|
||||
self.updated = compute.updated_at
|
||||
|
||||
@host_manager.set_update_time_on_success
|
||||
|
|
|
@ -39,7 +39,8 @@ COMPUTE_NODES = [
|
|||
host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1',
|
||||
hypervisor_version=0, numa_topology=None,
|
||||
hypervisor_type='foo', supported_hv_specs=[],
|
||||
pci_device_pools=None, cpu_info=None, stats=None, metrics=None),
|
||||
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
|
||||
objects.ComputeNode(
|
||||
id=2, local_gb=2048, memory_mb=2048, vcpus=2,
|
||||
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
|
||||
|
@ -47,7 +48,8 @@ COMPUTE_NODES = [
|
|||
host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1',
|
||||
hypervisor_version=0, numa_topology=None,
|
||||
hypervisor_type='foo', supported_hv_specs=[],
|
||||
pci_device_pools=None, cpu_info=None, stats=None, metrics=None),
|
||||
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
|
||||
objects.ComputeNode(
|
||||
id=3, local_gb=4096, memory_mb=4096, vcpus=4,
|
||||
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
|
||||
|
@ -55,7 +57,8 @@ COMPUTE_NODES = [
|
|||
host='host3', hypervisor_hostname='node3', host_ip='127.0.0.1',
|
||||
hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(),
|
||||
hypervisor_type='foo', supported_hv_specs=[],
|
||||
pci_device_pools=None, cpu_info=None, stats=None, metrics=None),
|
||||
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
|
||||
objects.ComputeNode(
|
||||
id=4, local_gb=8192, memory_mb=8192, vcpus=8,
|
||||
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
|
||||
|
@ -63,7 +66,8 @@ COMPUTE_NODES = [
|
|||
host='host4', hypervisor_hostname='node4', host_ip='127.0.0.1',
|
||||
hypervisor_version=0, numa_topology=None,
|
||||
hypervisor_type='foo', supported_hv_specs=[],
|
||||
pci_device_pools=None, cpu_info=None, stats=None, metrics=None),
|
||||
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
|
||||
# Broken entry
|
||||
objects.ComputeNode(
|
||||
id=5, local_gb=1024, memory_mb=1024, vcpus=1,
|
||||
|
|
|
@ -22,9 +22,9 @@ class TestCoreFilter(test.NoDBTestCase):
|
|||
def test_core_filter_passes(self):
|
||||
self.filt_cls = core_filter.CoreFilter()
|
||||
filter_properties = {'instance_type': {'vcpus': 1}}
|
||||
self.flags(cpu_allocation_ratio=2)
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'vcpus_total': 4, 'vcpus_used': 7})
|
||||
{'vcpus_total': 4, 'vcpus_used': 7,
|
||||
'cpu_allocation_ratio': 2})
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_core_filter_fails_safe(self):
|
||||
|
@ -36,17 +36,17 @@ class TestCoreFilter(test.NoDBTestCase):
|
|||
def test_core_filter_fails(self):
|
||||
self.filt_cls = core_filter.CoreFilter()
|
||||
filter_properties = {'instance_type': {'vcpus': 1}}
|
||||
self.flags(cpu_allocation_ratio=2)
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'vcpus_total': 4, 'vcpus_used': 8})
|
||||
{'vcpus_total': 4, 'vcpus_used': 8,
|
||||
'cpu_allocation_ratio': 2})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_core_filter_single_instance_overcommit_fails(self):
|
||||
self.filt_cls = core_filter.CoreFilter()
|
||||
filter_properties = {'instance_type': {'vcpus': 2}}
|
||||
self.flags(cpu_allocation_ratio=2)
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'vcpus_total': 1, 'vcpus_used': 0})
|
||||
{'vcpus_total': 1, 'vcpus_used': 0,
|
||||
'cpu_allocation_ratio': 2})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
|
||||
|
@ -54,9 +54,9 @@ class TestCoreFilter(test.NoDBTestCase):
|
|||
self.filt_cls = core_filter.AggregateCoreFilter()
|
||||
filter_properties = {'context': mock.sentinel.ctx,
|
||||
'instance_type': {'vcpus': 1}}
|
||||
self.flags(cpu_allocation_ratio=2)
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'vcpus_total': 4, 'vcpus_used': 7})
|
||||
{'vcpus_total': 4, 'vcpus_used': 7,
|
||||
'cpu_allocation_ratio': 2})
|
||||
agg_mock.return_value = set(['XXX'])
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
agg_mock.assert_called_once_with(host, 'cpu_allocation_ratio')
|
||||
|
@ -67,9 +67,9 @@ class TestCoreFilter(test.NoDBTestCase):
|
|||
self.filt_cls = core_filter.AggregateCoreFilter()
|
||||
filter_properties = {'context': mock.sentinel.ctx,
|
||||
'instance_type': {'vcpus': 1}}
|
||||
self.flags(cpu_allocation_ratio=2)
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'vcpus_total': 4, 'vcpus_used': 8})
|
||||
{'vcpus_total': 4, 'vcpus_used': 8,
|
||||
'cpu_allocation_ratio': 2})
|
||||
agg_mock.return_value = set([])
|
||||
# False: fallback to default flag w/o aggregates
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
@ -84,9 +84,9 @@ class TestCoreFilter(test.NoDBTestCase):
|
|||
self.filt_cls = core_filter.AggregateCoreFilter()
|
||||
filter_properties = {'context': mock.sentinel.ctx,
|
||||
'instance_type': {'vcpus': 1}}
|
||||
self.flags(cpu_allocation_ratio=1)
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'vcpus_total': 4, 'vcpus_used': 8})
|
||||
{'vcpus_total': 4, 'vcpus_used': 8,
|
||||
'cpu_allocation_ratio': 1})
|
||||
agg_mock.return_value = set(['2', '3'])
|
||||
# use the minimum ratio from aggregates
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
|
|
@ -40,7 +40,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
obj_base.obj_to_primitive(instance))}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
'pci_stats': None,
|
||||
'cpu_allocation_ratio': 16.0,
|
||||
'ram_allocation_ratio': 1.5})
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
|
||||
|
@ -83,12 +85,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
obj_base.obj_to_primitive(instance))}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
'pci_stats': None,
|
||||
'cpu_allocation_ratio': 16.0,
|
||||
'ram_allocation_ratio': 1.5})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_numa_topology_filter_fail_memory(self):
|
||||
self.flags(ram_allocation_ratio=1)
|
||||
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]),
|
||||
memory=1024),
|
||||
|
@ -102,12 +104,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
obj_base.obj_to_primitive(instance))}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
'pci_stats': None,
|
||||
'cpu_allocation_ratio': 16.0,
|
||||
'ram_allocation_ratio': 1})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_numa_topology_filter_fail_cpu(self):
|
||||
self.flags(cpu_allocation_ratio=1)
|
||||
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
|
||||
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
|
||||
|
@ -120,13 +122,12 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
obj_base.obj_to_primitive(instance))}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
'pci_stats': None,
|
||||
'cpu_allocation_ratio': 1,
|
||||
'ram_allocation_ratio': 1.5})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_numa_topology_filter_pass_set_limit(self):
|
||||
self.flags(cpu_allocation_ratio=21)
|
||||
self.flags(ram_allocation_ratio=1.3)
|
||||
|
||||
instance_topology = objects.InstanceNUMATopology(
|
||||
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
|
||||
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
|
||||
|
@ -139,7 +140,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
|
|||
obj_base.obj_to_primitive(instance))}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'numa_topology': fakes.NUMA_TOPOLOGY,
|
||||
'pci_stats': None})
|
||||
'pci_stats': None,
|
||||
'cpu_allocation_ratio': 21,
|
||||
'ram_allocation_ratio': 1.3})
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
limits = host.limits['numa_topology']
|
||||
self.assertEqual(limits.cpu_allocation_ratio, 21)
|
||||
|
|
|
@ -24,32 +24,32 @@ class TestRamFilter(test.NoDBTestCase):
|
|||
self.filt_cls = ram_filter.RamFilter()
|
||||
|
||||
def test_ram_filter_fails_on_memory(self):
|
||||
self.flags(ram_allocation_ratio=1.0)
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
|
||||
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
|
||||
'ram_allocation_ratio': 1.0})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_ram_filter_passes(self):
|
||||
self.flags(ram_allocation_ratio=1.0)
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
|
||||
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
|
||||
'ram_allocation_ratio': 1.0})
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
def test_ram_filter_oversubscribe(self):
|
||||
self.flags(ram_allocation_ratio=2.0)
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048})
|
||||
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
|
||||
'ram_allocation_ratio': 2.0})
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
|
||||
|
||||
def test_ram_filter_oversubscribe_singe_instance_fails(self):
|
||||
self.flags(ram_allocation_ratio=2.0)
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 512, 'total_usable_ram_mb': 512})
|
||||
{'free_ram_mb': 512, 'total_usable_ram_mb': 512,
|
||||
'ram_allocation_ratio': 2.0})
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
||||
|
||||
|
@ -61,21 +61,21 @@ class TestAggregateRamFilter(test.NoDBTestCase):
|
|||
self.filt_cls = ram_filter.AggregateRamFilter()
|
||||
|
||||
def test_aggregate_ram_filter_value_error(self, agg_mock):
|
||||
self.flags(ram_allocation_ratio=1.0)
|
||||
filter_properties = {'context': mock.sentinel.ctx,
|
||||
'instance_type': {'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
|
||||
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
|
||||
'ram_allocation_ratio': 1.0})
|
||||
agg_mock.return_value = set(['XXX'])
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
|
||||
|
||||
def test_aggregate_ram_filter_default_value(self, agg_mock):
|
||||
self.flags(ram_allocation_ratio=1.0)
|
||||
filter_properties = {'context': mock.sentinel.ctx,
|
||||
'instance_type': {'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
|
||||
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
|
||||
'ram_allocation_ratio': 1.0})
|
||||
# False: fallback to default flag w/o aggregates
|
||||
agg_mock.return_value = set()
|
||||
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
@ -85,11 +85,11 @@ class TestAggregateRamFilter(test.NoDBTestCase):
|
|||
self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
|
||||
|
||||
def test_aggregate_ram_filter_conflict_values(self, agg_mock):
|
||||
self.flags(ram_allocation_ratio=1.0)
|
||||
filter_properties = {'context': mock.sentinel.ctx,
|
||||
'instance_type': {'memory_mb': 1024}}
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
|
||||
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
|
||||
'ram_allocation_ratio': 1.0})
|
||||
agg_mock.return_value = set(['1.5', '2.0'])
|
||||
# use the minimum ratio from aggregates
|
||||
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
||||
|
|
|
@ -32,7 +32,8 @@ COMPUTE_NODES = [
|
|||
cpu_arch='i386'),
|
||||
supported_hv_specs=[objects.HVSpec.from_list(
|
||||
["i386", "baremetal", "baremetal"])],
|
||||
free_disk_gb=10, free_ram_mb=1024),
|
||||
free_disk_gb=10, free_ram_mb=1024,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
|
||||
objects.ComputeNode(
|
||||
id=2, local_gb=20, memory_mb=2048, vcpus=1,
|
||||
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
|
||||
|
@ -45,7 +46,8 @@ COMPUTE_NODES = [
|
|||
cpu_arch='i386'),
|
||||
supported_hv_specs=[objects.HVSpec.from_list(
|
||||
["i386", "baremetal", "baremetal"])],
|
||||
free_disk_gb=20, free_ram_mb=2048),
|
||||
free_disk_gb=20, free_ram_mb=2048,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
|
||||
objects.ComputeNode(
|
||||
id=3, local_gb=30, memory_mb=3072, vcpus=1,
|
||||
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
|
||||
|
@ -58,7 +60,8 @@ COMPUTE_NODES = [
|
|||
cpu_arch='i386'),
|
||||
supported_hv_specs=[objects.HVSpec.from_list(
|
||||
["i386", "baremetal", "baremetal"])],
|
||||
free_disk_gb=30, free_ram_mb=3072),
|
||||
free_disk_gb=30, free_ram_mb=3072,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
|
||||
objects.ComputeNode(
|
||||
id=4, local_gb=40, memory_mb=4096, vcpus=1,
|
||||
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
|
||||
|
@ -71,7 +74,8 @@ COMPUTE_NODES = [
|
|||
cpu_arch='i386'),
|
||||
supported_hv_specs=[objects.HVSpec.from_list(
|
||||
["i386", "baremetal", "baremetal"])],
|
||||
free_disk_gb=40, free_ram_mb=4096),
|
||||
free_disk_gb=40, free_ram_mb=4096,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
|
||||
# Broken entry
|
||||
objects.ComputeNode(
|
||||
id=5, local_gb=50, memory_mb=5120, vcpus=1,
|
||||
|
|
|
@ -135,6 +135,8 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
"updated_at": timeutils.utcnow(),
|
||||
"created_at": timeutils.utcnow(),
|
||||
}
|
||||
host_state.cpu_allocation_ratio = 16.0
|
||||
host_state.ram_allocation_ratio = 1.5
|
||||
return host_state
|
||||
|
||||
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
|
||||
|
|
|
@ -783,7 +783,8 @@ class HostStateTestCase(test.NoDBTestCase):
|
|||
hypervisor_hostname='hostname', cpu_info='cpu_info',
|
||||
supported_hv_specs=[],
|
||||
hypervisor_version=hyper_ver_int, numa_topology=None,
|
||||
pci_device_pools=None, metrics=None)
|
||||
pci_device_pools=None, metrics=None,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
|
||||
|
||||
host = host_manager.HostState("fakehost", "fakenode")
|
||||
host.update_from_compute_node(compute)
|
||||
|
@ -823,7 +824,8 @@ class HostStateTestCase(test.NoDBTestCase):
|
|||
hypervisor_hostname='hostname', cpu_info='cpu_info',
|
||||
supported_hv_specs=[],
|
||||
hypervisor_version=hyper_ver_int, numa_topology=None,
|
||||
pci_device_pools=None, metrics=None)
|
||||
pci_device_pools=None, metrics=None,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
|
||||
|
||||
host = host_manager.HostState("fakehost", "fakenode")
|
||||
host.update_from_compute_node(compute)
|
||||
|
@ -854,7 +856,8 @@ class HostStateTestCase(test.NoDBTestCase):
|
|||
hypervisor_hostname='hostname', cpu_info='cpu_info',
|
||||
supported_hv_specs=[],
|
||||
hypervisor_version=hyper_ver_int, numa_topology=None,
|
||||
pci_device_pools=None, metrics=None)
|
||||
pci_device_pools=None, metrics=None,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
|
||||
|
||||
host = host_manager.HostState("fakehost", "fakenode")
|
||||
host.update_from_compute_node(compute)
|
||||
|
@ -1017,7 +1020,8 @@ class HostStateTestCase(test.NoDBTestCase):
|
|||
supported_hv_specs=[],
|
||||
hypervisor_version=hyper_ver_int,
|
||||
numa_topology=fakes.NUMA_TOPOLOGY._to_json(),
|
||||
stats=None, pci_device_pools=None)
|
||||
stats=None, pci_device_pools=None,
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
|
||||
host = host_manager.HostState("fakehost", "fakenode")
|
||||
host.update_from_compute_node(compute)
|
||||
|
||||
|
|
|
@ -120,7 +120,8 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
|
|||
free_disk_gb=10, free_ram_mb=1024,
|
||||
hypervisor_type='ironic',
|
||||
hypervisor_version=1,
|
||||
hypervisor_hostname='fake_host')
|
||||
hypervisor_hostname='fake_host',
|
||||
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
|
||||
|
||||
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
|
||||
def test_create_ironic_node_state(self, init_mock):
|
||||
|
|
Loading…
Reference in New Issue