Fix GET /servers/detail host_status performance regression

Change I82b11b8866ac82b05eae04351605d52fa8b91453 moved the
host_status extended server attribute processing from an
extension to the main servers view builder. This, however,
caused a regression in the detailed listing of servers because
it didn't incorporate the caching mechanism used previously
by the extension so now for each server with details when
microversion 2.16 or greater is used (and the request passes
the policy check), we get the host status per server even if
we have multiple servers on the same host.

This moves the host_status processing out of the show() method
when listing servers with details and processes them in aggregate
similar to security groups and attached volumes.

One catch is the show() method handles instances from down cells
for us so we have to handle that separately in the new host_status
processing, but it's trivial (just don't get host_status for
instances without a host field set).

This reverts commit 0cecd2ac32.

Change-Id: I8278d4ea993ed1600919e34c9759600c8c7dbb41
Closes-Bug: #1830260
This commit is contained in:
Matt Riedemann 2019-06-18 11:13:32 -04:00
parent 1f85a1f3ad
commit ab7d923ae7
4 changed files with 106 additions and 8 deletions

View File

@ -373,10 +373,6 @@ class ViewBuilder(common.ViewBuilder):
show_extra_specs = False show_extra_specs = False
show_extended_attr = context.can( show_extended_attr = context.can(
esa_policies.BASE_POLICY_NAME, fatal=False) esa_policies.BASE_POLICY_NAME, fatal=False)
show_host_status = False
if (api_version_request.is_supported(request, min_version='2.16')):
show_host_status = context.can(
servers_policies.SERVERS % 'show:host_status', fatal=False)
instance_uuids = [inst['uuid'] for inst in instances] instance_uuids = [inst['uuid'] for inst in instances]
bdms = self._get_instance_bdms_in_multiple_cells(context, bdms = self._get_instance_bdms_in_multiple_cells(context,
@ -389,11 +385,17 @@ class ViewBuilder(common.ViewBuilder):
servers_dict = self._list_view(self.show, request, instances, servers_dict = self._list_view(self.show, request, instances,
coll_name, show_extra_specs, coll_name, show_extra_specs,
show_extended_attr=show_extended_attr, show_extended_attr=show_extended_attr,
show_host_status=show_host_status, # We process host_status in aggregate.
show_host_status=False,
show_sec_grp=False, show_sec_grp=False,
bdms=bdms, bdms=bdms,
cell_down_support=cell_down_support) cell_down_support=cell_down_support)
if (api_version_request.is_supported(request, min_version='2.16') and
context.can(servers_policies.SERVERS % 'show:host_status',
fatal=False)):
self._add_host_status(list(servers_dict["servers"]), instances)
self._add_security_grps(request, list(servers_dict["servers"]), self._add_security_grps(request, list(servers_dict["servers"]),
instances) instances)
return servers_dict return servers_dict
@ -562,6 +564,27 @@ class ViewBuilder(common.ViewBuilder):
return fault_dict return fault_dict
def _add_host_status(self, servers, instances):
"""Adds the ``host_status`` field to the list of servers
This method takes care to filter instances from down cells since they
do not have a host set and as such we cannot determine the host status.
:param servers: list of detailed server dicts for the API response
body; this list is modified by reference by updating the server
dicts within the list
:param instances: list of Instance objects
"""
# Filter out instances from down cells which do not have a host field.
instances = [instance for instance in instances if 'host' in instance]
# Get the dict, keyed by instance.uuid, of host status values.
host_statuses = self.compute_api.get_instances_host_statuses(instances)
for server in servers:
# Filter out anything that is not in the resulting dict because
# we had to filter the list of instances above for down cells.
if server['id'] in host_statuses:
server['host_status'] = host_statuses[server['id']]
def _add_security_grps(self, req, servers, instances): def _add_security_grps(self, req, servers, instances):
if not len(servers): if not len(servers):
return return

View File

@ -4824,6 +4824,21 @@ class API(base.Base):
host_status = fields_obj.HostStatus.NONE host_status = fields_obj.HostStatus.NONE
return host_status return host_status
def get_instances_host_statuses(self, instance_list):
host_status_dict = dict()
host_statuses = dict()
for instance in instance_list:
if instance.host:
if instance.host not in host_status_dict:
host_status = self.get_instance_host_status(instance)
host_status_dict[instance.host] = host_status
else:
host_status = host_status_dict[instance.host]
else:
host_status = fields_obj.HostStatus.NONE
host_statuses[instance.uuid] = host_status
return host_statuses
def target_host_cell(fn): def target_host_cell(fn):
"""Target a host-based function to a cell. """Target a host-based function to a cell.

View File

@ -2049,6 +2049,7 @@ class ServersControllerTestV216(ServersControllerTest):
super(ServersControllerTestV216, self).setUp() super(ServersControllerTestV216, self).setUp()
self.mock_get.side_effect = fakes.fake_compute_get( self.mock_get.side_effect = fakes.fake_compute_get(
id=2, uuid=FAKE_UUID, id=2, uuid=FAKE_UUID,
host="node-fake",
node="node-fake", node="node-fake",
reservation_id="r-1", launch_index=0, reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2, kernel_id=UUID1, ramdisk_id=UUID2,
@ -2069,9 +2070,10 @@ class ServersControllerTestV216(ServersControllerTest):
task_state=None, task_state=None,
vm_state=vm_states.ACTIVE, vm_state=vm_states.ACTIVE,
power_state=1) power_state=1)
self.useFixture(fixtures.MockPatchObject( self.mock_get_instance_host_status = self.useFixture(
compute_api.API, 'get_instance_host_status', fixtures.MockPatchObject(
return_value='UP')).mock compute_api.API, 'get_instance_host_status',
return_value='UP')).mock
def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark, def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
status="ACTIVE", progress=100): status="ACTIVE", progress=100):
@ -2084,6 +2086,9 @@ class ServersControllerTestV216(ServersControllerTest):
server_dict['server']['locked'] = False server_dict['server']['locked'] = False
server_dict['server']["host_status"] = "UP" server_dict['server']["host_status"] = "UP"
server_dict['server']["OS-EXT-SRV-ATTR:hostname"] = "server2" server_dict['server']["OS-EXT-SRV-ATTR:hostname"] = "server2"
server_dict['server']['hostId'] = nova_utils.generate_hostid(
'node-fake', server_dict['server']['tenant_id'])
server_dict['server']["OS-EXT-SRV-ATTR:host"] = "node-fake"
server_dict['server'][ server_dict['server'][
"OS-EXT-SRV-ATTR:hypervisor_hostname"] = "node-fake" "OS-EXT-SRV-ATTR:hypervisor_hostname"] = "node-fake"
server_dict['server']["OS-EXT-SRV-ATTR:kernel_id"] = UUID1 server_dict['server']["OS-EXT-SRV-ATTR:kernel_id"] = UUID1
@ -2120,6 +2125,7 @@ class ServersControllerTestV216(ServersControllerTest):
for i in range(2): for i in range(2):
server = fakes.stub_instance_obj(context, server = fakes.stub_instance_obj(context,
id=2, uuid=FAKE_UUID, id=2, uuid=FAKE_UUID,
host="node-fake",
node="node-fake", node="node-fake",
reservation_id="r-1", launch_index=0, reservation_id="r-1", launch_index=0,
kernel_id=UUID1, ramdisk_id=UUID2, kernel_id=UUID1, ramdisk_id=UUID2,
@ -2152,6 +2158,7 @@ class ServersControllerTestV216(ServersControllerTest):
req = self.req('/fake/servers/detail') req = self.req('/fake/servers/detail')
servers_list = self.controller.detail(req) servers_list = self.controller.detail(req)
self.assertEqual(2, len(servers_list['servers']))
image_bookmark = "http://localhost/fake/images/10" image_bookmark = "http://localhost/fake/images/10"
flavor_bookmark = "http://localhost/fake/flavors/2" flavor_bookmark = "http://localhost/fake/flavors/2"
expected_server = self._get_server_data_dict(FAKE_UUID, expected_server = self._get_server_data_dict(FAKE_UUID,
@ -2160,6 +2167,9 @@ class ServersControllerTestV216(ServersControllerTest):
progress=0) progress=0)
self.assertIn(expected_server['server'], servers_list['servers']) self.assertIn(expected_server['server'], servers_list['servers'])
# We should have only gotten the host status once per host (and the
# 2 servers in the response are using the same host).
self.mock_get_instance_host_status.assert_called_once()
class ServersControllerTestV219(ServersControllerTest): class ServersControllerTestV219(ServersControllerTest):

View File

@ -5028,6 +5028,56 @@ class _ComputeAPIUnitTestMixIn(object):
self.compute_api._populate_instance_names(instance, 2, 1) self.compute_api._populate_instance_names(instance, 2, 1)
self.assertEqual('Server-%s' % instance.uuid, instance.hostname) self.assertEqual('Server-%s' % instance.uuid, instance.hostname)
def test_host_statuses(self):
five_min_ago = timeutils.utcnow() - datetime.timedelta(minutes=5)
instances = [
objects.Instance(uuid=uuids.instance_1, host='host1', services=
self._obj_to_list_obj(objects.ServiceList(
self.context), objects.Service(id=0, host='host1',
disabled=True, forced_down=True,
binary='nova-compute'))),
objects.Instance(uuid=uuids.instance_2, host='host2', services=
self._obj_to_list_obj(objects.ServiceList(
self.context), objects.Service(id=0, host='host2',
disabled=True, forced_down=False,
binary='nova-compute'))),
objects.Instance(uuid=uuids.instance_3, host='host3', services=
self._obj_to_list_obj(objects.ServiceList(
self.context), objects.Service(id=0, host='host3',
disabled=False, last_seen_up=five_min_ago,
forced_down=False, binary='nova-compute'))),
objects.Instance(uuid=uuids.instance_4, host='host4', services=
self._obj_to_list_obj(objects.ServiceList(
self.context), objects.Service(id=0, host='host4',
disabled=False, last_seen_up=timeutils.utcnow(),
forced_down=False, binary='nova-compute'))),
objects.Instance(uuid=uuids.instance_5, host='host5', services=
objects.ServiceList()),
objects.Instance(uuid=uuids.instance_6, host=None, services=
self._obj_to_list_obj(objects.ServiceList(
self.context), objects.Service(id=0, host='host6',
disabled=True, forced_down=False,
binary='nova-compute'))),
objects.Instance(uuid=uuids.instance_7, host='host2', services=
self._obj_to_list_obj(objects.ServiceList(
self.context), objects.Service(id=0, host='host2',
disabled=True, forced_down=False,
binary='nova-compute')))
]
host_statuses = self.compute_api.get_instances_host_statuses(
instances)
expect_statuses = {uuids.instance_1: fields_obj.HostStatus.DOWN,
uuids.instance_2: fields_obj.HostStatus.MAINTENANCE,
uuids.instance_3: fields_obj.HostStatus.UNKNOWN,
uuids.instance_4: fields_obj.HostStatus.UP,
uuids.instance_5: fields_obj.HostStatus.NONE,
uuids.instance_6: fields_obj.HostStatus.NONE,
uuids.instance_7: fields_obj.HostStatus.MAINTENANCE}
for instance in instances:
self.assertEqual(expect_statuses[instance.uuid],
host_statuses[instance.uuid])
@mock.patch.object(objects.Migration, 'get_by_id_and_instance') @mock.patch.object(objects.Migration, 'get_by_id_and_instance')
@mock.patch.object(objects.InstanceAction, 'action_start') @mock.patch.object(objects.InstanceAction, 'action_start')
def test_live_migrate_force_complete_succeeded( def test_live_migrate_force_complete_succeeded(