Merge "Prevent instance disk overcommit against itself"
This commit is contained in:
@@ -40,6 +40,19 @@ class DiskFilter(filters.BaseHostFilter):
|
||||
free_disk_mb = host_state.free_disk_mb
|
||||
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
|
||||
|
||||
# Do not allow an instance to overcommit against itself, only against
|
||||
# other instances. In other words, if there isn't room for even just
|
||||
# this one instance in total_usable_disk space, consider the host full.
|
||||
if total_usable_disk_mb < requested_disk:
|
||||
LOG.debug("%(host_state)s does not have %(requested_disk)s "
|
||||
"MB usable disk space before overcommit, it only "
|
||||
"has %(physical_disk_size)s MB.",
|
||||
{'host_state': host_state,
|
||||
'requested_disk': requested_disk,
|
||||
'physical_disk_size':
|
||||
total_usable_disk_mb})
|
||||
return False
|
||||
|
||||
disk_allocation_ratio = self._get_disk_allocation_ratio(
|
||||
host_state, spec_obj)
|
||||
|
||||
|
||||
@@ -46,14 +46,28 @@ class TestDiskFilter(test.NoDBTestCase):
|
||||
filt_cls = disk_filter.DiskFilter()
|
||||
spec_obj = objects.RequestSpec(
|
||||
flavor=objects.Flavor(
|
||||
root_gb=100, ephemeral_gb=18, swap=1024))
|
||||
# 1GB used... so 119GB allowed...
|
||||
root_gb=3, ephemeral_gb=3, swap=1024))
|
||||
# Only 1Gb left, but with 10x overprovision a 7Gb instance should
|
||||
# still fit. Schedule will succeed.
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
|
||||
{'free_disk_mb': 1 * 1024, 'total_usable_disk_gb': 12,
|
||||
'disk_allocation_ratio': 10.0})
|
||||
self.assertTrue(filt_cls.host_passes(host, spec_obj))
|
||||
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
|
||||
|
||||
def test_disk_filter_oversubscribe_single_instance_fails(self):
|
||||
filt_cls = disk_filter.DiskFilter()
|
||||
spec_obj = objects.RequestSpec(
|
||||
flavor=objects.Flavor(
|
||||
root_gb=10, ephemeral_gb=2, swap=1024))
|
||||
# According to the allocation ratio, This host has 119 Gb left,
|
||||
# but it doesn't matter because the requested instance is
|
||||
# bigger than the whole drive. Schedule will fail.
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
|
||||
'disk_allocation_ratio': 10.0})
|
||||
self.assertFalse(filt_cls.host_passes(host, spec_obj))
|
||||
|
||||
def test_disk_filter_oversubscribe_fail(self):
|
||||
filt_cls = disk_filter.DiskFilter()
|
||||
spec_obj = objects.RequestSpec(
|
||||
@@ -74,7 +88,7 @@ class TestDiskFilter(test.NoDBTestCase):
|
||||
root_gb=1, ephemeral_gb=1, swap=1024))
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_disk_mb': 3 * 1024,
|
||||
'total_usable_disk_gb': 1,
|
||||
'total_usable_disk_gb': 4,
|
||||
'disk_allocation_ratio': 1.0})
|
||||
agg_mock.return_value = set(['XXX'])
|
||||
self.assertTrue(filt_cls.host_passes(host, spec_obj))
|
||||
@@ -89,7 +103,7 @@ class TestDiskFilter(test.NoDBTestCase):
|
||||
root_gb=2, ephemeral_gb=1, swap=1024))
|
||||
host = fakes.FakeHostState('host1', 'node1',
|
||||
{'free_disk_mb': 3 * 1024,
|
||||
'total_usable_disk_gb': 1,
|
||||
'total_usable_disk_gb': 4,
|
||||
'disk_allocation_ratio': 1.0})
|
||||
# Uses global conf.
|
||||
agg_mock.return_value = set([])
|
||||
|
||||
@@ -138,6 +138,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
host_state.free_ram_mb = 50000
|
||||
host_state.total_usable_ram_mb = 50000
|
||||
host_state.free_disk_mb = 4096
|
||||
host_state.total_usable_disk_gb = 4
|
||||
host_state.service = {
|
||||
"disabled": False,
|
||||
"updated_at": timeutils.utcnow(),
|
||||
|
||||
Reference in New Issue
Block a user