Merge "Cleanups for the scheduler code"
This commit is contained in:
commit
505fe2a549
@ -308,11 +308,6 @@ class FilterScheduler(driver.Scheduler):
|
|||||||
"""
|
"""
|
||||||
# The list of hosts selected for each instance
|
# The list of hosts selected for each instance
|
||||||
selected_hosts = []
|
selected_hosts = []
|
||||||
# This the overall list of values to be returned. There will be one
|
|
||||||
# item per instance, and each item will be a list of Selection objects
|
|
||||||
# representing the selected host along with zero or more alternates
|
|
||||||
# from the same cell.
|
|
||||||
selections_to_return = []
|
|
||||||
|
|
||||||
for num in range(num_instances):
|
for num in range(num_instances):
|
||||||
instance_uuid = instance_uuids[num] if instance_uuids else None
|
instance_uuid = instance_uuids[num] if instance_uuids else None
|
||||||
@ -336,6 +331,10 @@ class FilterScheduler(driver.Scheduler):
|
|||||||
# raise a NoValidHost exception.
|
# raise a NoValidHost exception.
|
||||||
self._ensure_sufficient_hosts(context, selected_hosts, num_instances)
|
self._ensure_sufficient_hosts(context, selected_hosts, num_instances)
|
||||||
|
|
||||||
|
# This the overall list of values to be returned. There will be one
|
||||||
|
# item per instance, and each item will be a list of Selection objects
|
||||||
|
# representing the selected host along with zero or more alternates
|
||||||
|
# from the same cell.
|
||||||
selections_to_return = self._get_alternate_hosts(selected_hosts,
|
selections_to_return = self._get_alternate_hosts(selected_hosts,
|
||||||
spec_obj, hosts, num, num_alts)
|
spec_obj, hosts, num, num_alts)
|
||||||
return selections_to_return
|
return selections_to_return
|
||||||
|
@ -1019,6 +1019,11 @@ def claim_resources(ctx, client, spec_obj, instance_uuid, alloc_req,
|
|||||||
if 'user_id' in spec_obj and spec_obj.user_id:
|
if 'user_id' in spec_obj and spec_obj.user_id:
|
||||||
user_id = spec_obj.user_id
|
user_id = spec_obj.user_id
|
||||||
else:
|
else:
|
||||||
|
# FIXME(mriedem): This would actually break accounting if we relied on
|
||||||
|
# the allocations for something like counting quota usage because in
|
||||||
|
# the case of migrating or evacuating an instance, the user here is
|
||||||
|
# likely the admin, not the owner of the instance, so the allocation
|
||||||
|
# would be tracked against the wrong user.
|
||||||
user_id = ctx.user_id
|
user_id = ctx.user_id
|
||||||
|
|
||||||
# NOTE(gibi): this could raise AllocationUpdateFailed which means there is
|
# NOTE(gibi): this could raise AllocationUpdateFailed which means there is
|
||||||
|
@ -1057,12 +1057,6 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
|
|||||||
def setUp(self, mock_init_agg, mock_init_inst):
|
def setUp(self, mock_init_agg, mock_init_inst):
|
||||||
super(HostManagerChangedNodesTestCase, self).setUp()
|
super(HostManagerChangedNodesTestCase, self).setUp()
|
||||||
self.host_manager = host_manager.HostManager()
|
self.host_manager = host_manager.HostManager()
|
||||||
self.fake_hosts = [
|
|
||||||
host_manager.HostState('host1', 'node1', uuids.cell),
|
|
||||||
host_manager.HostState('host2', 'node2', uuids.cell),
|
|
||||||
host_manager.HostState('host3', 'node3', uuids.cell),
|
|
||||||
host_manager.HostState('host4', 'node4', uuids.cell)
|
|
||||||
]
|
|
||||||
|
|
||||||
@mock.patch('nova.objects.ServiceList.get_by_binary')
|
@mock.patch('nova.objects.ServiceList.get_by_binary')
|
||||||
@mock.patch('nova.objects.ComputeNodeList.get_all')
|
@mock.patch('nova.objects.ComputeNodeList.get_all')
|
||||||
|
Loading…
Reference in New Issue
Block a user