Remove deprecated CPU, RAM, disk claiming in resource tracker

In change Id62136d293da55e4bb639635ea5421a33b6c3ea2, we deprecated the
scheduler filters for CPU, RAM, and Disk since they were no longer
necessary in the new placement-based world. With these filters disabled,
we are no longer passing limits down to the resource tracker meaning we
are treating everything in the claim process as unlimited. This means
most of the claiming code here, NUMA stuff aside, is now a no-op and can
be removed. Do just that.

Change-Id: I2936ce8cb293dc80e1a426094fdae6e675461470
Co-Authored-By: Stephen Finucane <sfinucan@redhat.com>
Partial-Bug: #1469179
This commit is contained in:
Dan Smith 2018-03-08 14:35:15 -08:00 committed by Stephen Finucane
parent 78645e61c6
commit 5406c8bd9b
5 changed files with 16 additions and 305 deletions

View File

@ -35,18 +35,6 @@ class NopClaim(object):
self.migration = kwargs.pop('migration', None)
self.claimed_numa_topology = None
@property
def disk_gb(self):
return 0
@property
def memory_mb(self):
return 0
@property
def vcpus(self):
return 0
def __enter__(self):
return self
@ -57,10 +45,6 @@ class NopClaim(object):
def abort(self):
pass
def __str__(self):
return "[Claim: %d MB memory, %d GB disk]" % (self.memory_mb,
self.disk_gb)
class Claim(NopClaim):
"""A declaration that a compute host operation will require free resources.
@ -74,7 +58,7 @@ class Claim(NopClaim):
"""
def __init__(self, context, instance, nodename, tracker, resources,
pci_requests, overhead=None, limits=None):
pci_requests, limits=None):
super(Claim, self).__init__()
# Stash a copy of the instance at the current point of time
self.instance = instance.obj_clone()
@ -82,32 +66,12 @@ class Claim(NopClaim):
self._numa_topology_loaded = False
self.tracker = tracker
self._pci_requests = pci_requests
if not overhead:
overhead = {'memory_mb': 0,
'disk_gb': 0}
self.overhead = overhead
self.context = context
# Check claim at constructor to avoid mess code
# Raise exception ComputeResourcesUnavailable if claim failed
self._claim_test(resources, limits)
@property
def disk_gb(self):
return (self.instance.flavor.root_gb +
self.instance.flavor.ephemeral_gb +
self.overhead.get('disk_gb', 0))
@property
def memory_mb(self):
return self.instance.flavor.memory_mb + self.overhead['memory_mb']
@property
def vcpus(self):
return self.instance.flavor.vcpus
@property
def numa_topology(self):
if self._numa_topology_loaded:
@ -145,22 +109,9 @@ class Claim(NopClaim):
# If an individual limit is None, the resource will be considered
# unlimited:
memory_mb_limit = limits.get('memory_mb')
disk_gb_limit = limits.get('disk_gb')
vcpus_limit = limits.get('vcpu')
numa_topology_limit = limits.get('numa_topology')
LOG.info("Attempting claim on node %(node)s: "
"memory %(memory_mb)d MB, "
"disk %(disk_gb)d GB, vcpus %(vcpus)d CPU",
{'node': self.nodename, 'memory_mb': self.memory_mb,
'disk_gb': self.disk_gb, 'vcpus': self.vcpus},
instance=self.instance)
reasons = [self._test_memory(resources, memory_mb_limit),
self._test_disk(resources, disk_gb_limit),
self._test_vcpus(resources, vcpus_limit),
self._test_numa_topology(resources, numa_topology_limit),
reasons = [self._test_numa_topology(resources, numa_topology_limit),
self._test_pci()]
reasons = [r for r in reasons if r is not None]
if len(reasons) > 0:
@ -170,33 +121,6 @@ class Claim(NopClaim):
LOG.info('Claim successful on node %s', self.nodename,
instance=self.instance)
def _test_memory(self, resources, limit):
type_ = _("memory")
unit = "MB"
total = resources.memory_mb
used = resources.memory_mb_used
requested = self.memory_mb
return self._test(type_, unit, total, used, requested, limit)
def _test_disk(self, resources, limit):
type_ = _("disk")
unit = "GB"
total = resources.local_gb
used = resources.local_gb_used
requested = self.disk_gb
return self._test(type_, unit, total, used, requested, limit)
def _test_vcpus(self, resources, limit):
type_ = _("vcpu")
unit = "VCPU"
total = resources.vcpus
used = resources.vcpus_used
requested = self.vcpus
return self._test(type_, unit, total, used, requested, limit)
def _test_pci(self):
pci_requests = self._pci_requests
if pci_requests.requests:
@ -270,7 +194,7 @@ class MoveClaim(Claim):
Move can be either a migrate/resize, live-migrate or an evacuate operation.
"""
def __init__(self, context, instance, nodename, instance_type, image_meta,
tracker, resources, pci_requests, overhead=None, limits=None):
tracker, resources, pci_requests, limits=None):
self.context = context
self.instance_type = instance_type
if isinstance(image_meta, dict):
@ -278,23 +202,9 @@ class MoveClaim(Claim):
self.image_meta = image_meta
super(MoveClaim, self).__init__(context, instance, nodename, tracker,
resources, pci_requests,
overhead=overhead, limits=limits)
limits=limits)
self.migration = None
@property
def disk_gb(self):
return (self.instance_type.root_gb +
self.instance_type.ephemeral_gb +
self.overhead.get('disk_gb', 0))
@property
def memory_mb(self):
return self.instance_type.memory_mb + self.overhead['memory_mb']
@property
def vcpus(self):
return self.instance_type.vcpus
@property
def numa_topology(self):
return hardware.numa_get_constraints(self.instance_type,

View File

@ -193,23 +193,11 @@ class ResourceTracker(object):
"until resources have been claimed.",
instance=instance)
# get the overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
pci_requests, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
@ -290,18 +278,6 @@ class ResourceTracker(object):
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
@ -319,7 +295,7 @@ class ResourceTracker(object):
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
new_pci_requests,
limits=limits)
claim.migration = migration

View File

@ -71,7 +71,7 @@ class ClaimTestCase(test.NoDBTestCase):
requests=[]
)
def _claim(self, limits=None, overhead=None, requests=None, **kwargs):
def _claim(self, limits=None, requests=None, **kwargs):
numa_topology = kwargs.pop('numa_topology', None)
instance = self._fake_instance(**kwargs)
instance.flavor = self._fake_instance_type(**kwargs)
@ -85,8 +85,6 @@ class ClaimTestCase(test.NoDBTestCase):
}
else:
db_numa_topology = None
if overhead is None:
overhead = {'memory_mb': 0}
requests = requests or self.empty_requests
@ -95,7 +93,7 @@ class ClaimTestCase(test.NoDBTestCase):
def get_claim(mock_extra_get):
return claims.Claim(self.context, instance, _NODENAME,
self.tracker, self.resources, requests,
overhead=overhead, limits=limits)
limits=limits)
return get_claim()
def _fake_instance(self, **kwargs):
@ -150,90 +148,6 @@ class ClaimTestCase(test.NoDBTestCase):
resources.update(values)
return objects.ComputeNode(**resources)
def test_memory_unlimited(self):
self._claim(memory_mb=99999999)
def test_disk_unlimited_root(self):
self._claim(root_gb=999999)
def test_disk_unlimited_ephemeral(self):
self._claim(ephemeral_gb=999999)
def test_memory_with_overhead(self):
overhead = {'memory_mb': 8}
limits = {'memory_mb': 2048}
self._claim(memory_mb=2040, limits=limits,
overhead=overhead)
def test_memory_with_overhead_insufficient(self):
overhead = {'memory_mb': 9}
limits = {'memory_mb': 2048}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
memory_mb=2040)
def test_memory_oversubscription(self):
self._claim(memory_mb=4096)
def test_disk_with_overhead(self):
overhead = {'memory_mb': 0,
'disk_gb': 1}
limits = {'disk_gb': 100}
claim_obj = self._claim(root_gb=99, ephemeral_gb=0, limits=limits,
overhead=overhead)
self.assertEqual(100, claim_obj.disk_gb)
def test_disk_with_overhead_insufficient(self):
overhead = {'memory_mb': 0,
'disk_gb': 2}
limits = {'disk_gb': 100}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
root_gb=99, ephemeral_gb=0)
def test_disk_with_overhead_insufficient_no_root(self):
overhead = {'memory_mb': 0,
'disk_gb': 2}
limits = {'disk_gb': 1}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
root_gb=0, ephemeral_gb=0)
def test_memory_insufficient(self):
limits = {'memory_mb': 8192}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, memory_mb=16384)
def test_disk_oversubscription(self):
limits = {'disk_gb': 60}
self._claim(root_gb=10, ephemeral_gb=40,
limits=limits)
def test_disk_oversubscription_scheduler_limits_object(self):
"""Tests that the Claim code can handle a SchedulerLimits object"""
limits = objects.SchedulerLimits.from_dict({'disk_gb': 60})
self._claim(root_gb=10, ephemeral_gb=40,
limits=limits)
def test_disk_insufficient(self):
limits = {'disk_gb': 45}
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
"disk",
self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
def test_disk_and_memory_insufficient(self):
limits = {'disk_gb': 45, 'memory_mb': 8192}
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
"memory.*disk",
self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
memory_mb=16384)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=True)
def test_pci_pass(self, mock_pci_supports_requests):
@ -392,7 +306,7 @@ class ClaimTestCase(test.NoDBTestCase):
class MoveClaimTestCase(ClaimTestCase):
def _claim(self, limits=None, overhead=None, requests=None,
def _claim(self, limits=None, requests=None,
image_meta=None, **kwargs):
instance_type = self._fake_instance_type(**kwargs)
numa_topology = kwargs.pop('numa_topology', None)
@ -409,8 +323,6 @@ class MoveClaimTestCase(ClaimTestCase):
}
else:
self.db_numa_topology = None
if overhead is None:
overhead = {'memory_mb': 0}
requests = requests or self.empty_requests
@ -422,7 +334,7 @@ class MoveClaimTestCase(ClaimTestCase):
return claims.MoveClaim(self.context, self.instance, _NODENAME,
instance_type, image_meta, self.tracker,
self.resources, requests,
overhead=overhead, limits=limits)
limits=limits)
return get_claim()
@mock.patch('nova.objects.Instance.drop_migration_context')

View File

@ -1702,8 +1702,12 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance,
{}, {}, {}, block_device_mapping=[], limits=limits)
self.assertEqual(3072, cn.memory_mb_used)
self.assertEqual(768, cn.local_gb_used)
# NOTE(danms): Since we no longer claim memory and disk, this should
# complete normally. In reality, this would have been rejected by
# placement/scheduler before the instance got here.
self.assertEqual(11264, cn.memory_mb_used)
self.assertEqual(17152, cn.local_gb_used)
def test_create_multiple_instance_with_neutron_port(self):
def fake_is_neutron():
@ -1771,54 +1775,6 @@ class ComputeTestCase(BaseTestCase,
self.compute.build_and_run_instance(self.context, instance,
{}, {}, filter_properties, block_device_mapping=[])
def test_create_instance_with_oversubscribed_cpu(self):
# Test passing of oversubscribed cpu policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated(), NODENAME)
limits = {'vcpu': 3}
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(2, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
# total_mem_mb, but is less than the oversubscribed limit:
params = {"flavor": {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(2, cn.vcpus_used)
# create one more instance:
params = {"flavor": {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 1}}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(3, cn.vcpus_used)
# delete the instance:
instance['vm_state'] = vm_states.DELETED
self.rt.update_usage(self.context, instance, NODENAME)
self.assertEqual(2, cn.vcpus_used)
# now oversubscribe vcpus and fail:
params = {"flavor": {"memory_mb": 10, "root_gb": 1,
"ephemeral_gb": 1, "vcpus": 2}}
instance = self._create_fake_instance_obj(params)
limits = {'vcpu': 3}
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_create_instance_with_oversubscribed_disk(self):
# Test passing of oversubscribed disk policy from the scheduler.
@ -1845,30 +1801,6 @@ class ComputeTestCase(BaseTestCase,
cn = self.rt.compute_nodes[NODENAME]
self.assertEqual(instance_gb, cn.local_gb_used)
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but
with insufficient disk.
"""
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated(), NODENAME)
# get total memory as reported by virt driver:
resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
instance_gb = int(total_disk_gb * 1.55)
# build an instance, specifying an amount of disk that exceeds
# total_disk_gb, but is less than the oversubscribed limit:
params = {"flavor": {"root_gb": instance_gb, "memory_mb": 10}}
instance = self._create_fake_instance_obj(params)
limits = {'disk_gb': oversub_limit_gb}
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_create_instance_without_node_param(self):
instance = self._create_fake_instance_obj({'node': None})

View File

@ -2197,25 +2197,6 @@ class TestInstanceClaim(BaseTestCase):
self.assertEqual(0, cn.memory_mb_used)
self.assertEqual(0, cn.running_vms)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
def test_claim_limits(self, save_mock, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
good_limits = {
'memory_mb': _COMPUTE_NODE_FIXTURES[0].memory_mb,
'disk_gb': _COMPUTE_NODE_FIXTURES[0].local_gb,
'vcpu': _COMPUTE_NODE_FIXTURES[0].vcpus,
}
for key in good_limits.keys():
bad_limits = copy.deepcopy(good_limits)
bad_limits[key] = 0
self.assertRaises(exc.ComputeResourcesUnavailable,
self.rt.instance_claim,
self.ctx, self.instance, _NODENAME, bad_limits)
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')