Browse Source

Optimize SchedulerReportClient.delete_resource_provider

This is used to optimize the SchedulerReportClient
delete_resource_provider method when deleting a compute
node and its related resource provider which happens
in both the API when a compute service is deleted and
in the compute service in the update_available_resource
method for orphan nodes (so the new InstanceList query
method needs to be remotable).

The actual DB query stuff in here is tested in the
nova.tests.functional.wsgi.test_services code.

Change-Id: Id033e8f95f853ccfc34bd350be6df11c1bfb1b7d
Related-Bug: #1811726
changes/70/657070/3
Matt Riedemann 3 years ago
parent
commit
4adface8b9
  1. 21
      nova/objects/instance.py
  2. 18
      nova/scheduler/client/report.py
  3. 2
      nova/tests/unit/objects/test_objects.py
  4. 16
      nova/tests/unit/scheduler/client/test_report.py

21
nova/objects/instance.py

@ -1310,7 +1310,8 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 2.2: Pagination for get_active_by_window_joined()
# Version 2.3: Add get_count_by_vm_state()
# Version 2.4: Add get_counts()
VERSION = '2.4'
# Version 2.5: Add get_uuids_by_host_and_node()
VERSION = '2.5'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
@ -1373,6 +1374,24 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@staticmethod
@db_api.pick_context_manager_reader
def _get_uuids_by_host_and_node(context, host, node):
return context.session.query(
models.Instance.uuid).filter_by(
host=host).filter_by(node=node).filter_by(deleted=0).all()
@base.remotable_classmethod
def get_uuids_by_host_and_node(cls, context, host, node):
"""Return non-deleted instance UUIDs for the given host and node.
:param context: nova auth request context
:param host: Filter instances on this host.
:param node: Filter instances on this node.
:returns: list of non-deleted instance UUIDs on the given host and node
"""
return cls._get_uuids_by_host_and_node(context, host, node)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):

18
nova/scheduler/client/report.py

@ -2169,14 +2169,16 @@ class SchedulerReportClient(object):
# Delete any allocations for this resource provider.
# Since allocations are by consumer, we get the consumers on this
# host, which are its instances.
# TODO(mriedem): Optimize this up by adding an
# InstanceList.get_uuids_by_host_and_node method.
# Pass expected_attrs=[] to avoid joining on extra columns we
# don't use.
instances = objects.InstanceList.get_by_host_and_node(
context, host, nodename, expected_attrs=[])
for instance in instances:
self.delete_allocation_for_instance(context, instance.uuid)
# NOTE(mriedem): This assumes the only allocations on this node
# are instances, but there could be migration consumers if the
# node is deleted during a migration or allocations from an
# evacuated host (bug 1829479). Obviously an admin shouldn't
# do that but...you know. I guess the provider deletion should fail
# in that case which is what we'd want to happen.
instance_uuids = objects.InstanceList.get_uuids_by_host_and_node(
context, host, nodename)
for instance_uuid in instance_uuids:
self.delete_allocation_for_instance(context, instance_uuid)
try:
self._delete_provider(rp_uuid, global_request_id=context.global_id)
except (exception.ResourceProviderInUse,

2
nova/tests/unit/objects/test_objects.py

@ -1084,7 +1084,7 @@ object_data = {
'InstanceGroup': '1.11-852ac511d30913ee88f3c3a869a8f30a',
'InstanceGroupList': '1.8-90f8f1a445552bb3bbc9fa1ae7da27d4',
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
'InstanceList': '2.4-d2c5723da8c1d08e07cb00160edfd292',
'InstanceList': '2.5-43b5e7d8c4ebde52f2c2da22a1739e95',
'InstanceMapping': '1.2-3bd375e65c8eb9c45498d2f87b882e03',
'InstanceMappingList': '1.2-ee638619aa3d8a82a59c0c83bfa64d78',
'InstanceNUMACell': '1.4-7c1eb9a198dee076b4de0840e45f4f55',

16
nova/tests/unit/scheduler/client/test_report.py

@ -3188,21 +3188,18 @@ class TestAllocations(SchedulerReportClientTestCase):
"delete")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete_allocation_for_instance")
@mock.patch("nova.objects.InstanceList.get_by_host_and_node")
@mock.patch("nova.objects.InstanceList.get_uuids_by_host_and_node")
def test_delete_resource_provider_cascade(self, mock_by_host,
mock_del_alloc, mock_delete):
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
inst1 = objects.Instance(uuid=uuids.inst1)
inst2 = objects.Instance(uuid=uuids.inst2)
mock_by_host.return_value = objects.InstanceList(
objects=[inst1, inst2])
mock_by_host.return_value = [uuids.inst1, uuids.inst2]
resp_mock = mock.Mock(status_code=204)
mock_delete.return_value = resp_mock
self.client.delete_resource_provider(self.context, cn, cascade=True)
mock_by_host.assert_called_once_with(
self.context, cn.host, cn.hypervisor_hostname, expected_attrs=[])
self.context, cn.host, cn.hypervisor_hostname)
self.assertEqual(2, mock_del_alloc.call_count)
exp_url = "/resource_providers/%s" % uuids.cn
mock_delete.assert_called_once_with(
@ -3213,17 +3210,14 @@ class TestAllocations(SchedulerReportClientTestCase):
"delete")
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
"delete_allocation_for_instance")
@mock.patch("nova.objects.InstanceList.get_by_host_and_node")
@mock.patch("nova.objects.InstanceList.get_uuids_by_host_and_node")
def test_delete_resource_provider_no_cascade(self, mock_by_host,
mock_del_alloc, mock_delete):
self.client._provider_tree.new_root(uuids.cn, uuids.cn, generation=1)
self.client._association_refresh_time[uuids.cn] = mock.Mock()
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
hypervisor_hostname="fake_hostname", )
inst1 = objects.Instance(uuid=uuids.inst1)
inst2 = objects.Instance(uuid=uuids.inst2)
mock_by_host.return_value = objects.InstanceList(
objects=[inst1, inst2])
mock_by_host.return_value = [uuids.inst1, uuids.inst2]
resp_mock = mock.Mock(status_code=204)
mock_delete.return_value = resp_mock
self.client.delete_resource_provider(self.context, cn)

Loading…
Cancel
Save