Remove TODOs around claim_resources_on_destination

The TODOs were added back in the Queens/Pike timeframe [1][2]
but at this point there probably isn't much value in resolving
those TODOs by adding a skip_filters kwarg to the scheduler
especially since [3] changed the method to not support nested
resource provider allocations so the minimal duplication with
what we do in the scheduler in the non-force evacuate/live migrate
cases is sufficient.

[1] Ie63a4798d420c39815e294843e02ab6473cfded2
[2] I6590f0eda4ec4996543ad40d8c2640b83fc3dd9d
[3] I7cbd5d9fb875ebf72995362e0b6693492ce32051

Change-Id: I3e599147f95337477c9573b517feee67e0ae37e4
This commit is contained in:
Matt Riedemann 2019-11-10 09:27:00 -05:00
parent 17b5a1ab85
commit b6133f8183
3 changed files with 0 additions and 10 deletions

View File

@ -1000,11 +1000,6 @@ class ComputeTaskManager(base.Base):
'not found.', instance.host, instance.node,
instance=instance)
# TODO(mriedem): Call select_destinations() with a
# skip_filters=True flag so the scheduler does the work of
# claiming resources on the destination in Placement but still
# bypass the scheduler filters, which honors the 'force' flag
# in the API.
try:
scheduler_utils.claim_resources_on_destination(
context, self.report_client, instance, source_node, dest_node)

View File

@ -276,9 +276,6 @@ class LiveMigrationTask(base.TaskBase):
instance_id=self.instance.uuid, host=self.destination)
def _check_destination_has_enough_memory(self):
# TODO(mriedem): This method can be removed when the forced host
# scenario is calling select_destinations() in the scheduler because
# Placement will be used to filter allocation candidates by MEMORY_MB.
compute = self._get_compute_info(self.destination)
free_ram_mb = compute.free_ram_mb
total_ram_mb = compute.memory_mb

View File

@ -646,8 +646,6 @@ def resources_from_request_spec(ctxt, spec_obj, host_manager,
return res_req
# TODO(mriedem): Remove this when select_destinations() in the scheduler takes
# some sort of skip_filters flag.
def claim_resources_on_destination(
context, reportclient, instance, source_node, dest_node,
source_allocations=None, consumer_generation=None):