Skip querying resource request in revert_resize if no qos port

During revert_resize we can skip collecting port resource requests
from neutron if there is no port with allocation key in the binding
profile in the network info caches of the instance.

Note that we cannot do the same optimization in the MigrationTask as we
would like to use migration to heal missing port allocations. See more
in bug #1819923.

Change-Id: I72d0e28f3b9319e521243d7d0dc5dfa4feaf56f5
blueprint: support-move-ops-with-qos-ports
This commit is contained in:
Balazs Gibizer 2019-09-11 14:33:06 +02:00
parent 47bfc46fb5
commit 7da94440db
4 changed files with 51 additions and 10 deletions

View File

@ -3459,16 +3459,24 @@ class API(base.Base):
reqspec.flavor = instance.old_flavor
reqspec.save()
# TODO(gibi): do not directly overwrite the
# RequestSpec.requested_resources as others like cyborg might added
# to things there already
# NOTE(gibi): We need to collect the requested resource again as it is
# intentionally not persisted in nova. Note that this needs to be
# done here as the nova API code directly calls revert on the
# dest compute service skipping the conductor.
port_res_req = self.network_api.get_requested_resource_for_instance(
context, instance.uuid)
reqspec.requested_resources = port_res_req
# NOTE(gibi): This is a performance optimization. If the network info
# cache does not have ports with allocations in the binding profile
# then we can skip reading port resource request from neutron below.
# If a port has resource request then that would have already caused
# that the finish_resize call put allocation in the binding profile
# during the resize.
if instance.get_network_info().has_port_with_allocation():
# TODO(gibi): do not directly overwrite the
# RequestSpec.requested_resources as others like cyborg might added
# to things there already
# NOTE(gibi): We need to collect the requested resource again as it
# is intentionally not persisted in nova. Note that this needs to
# be done here as the nova API code directly calls revert on the
# dest compute service skipping the conductor.
port_res_req = (
self.network_api.get_requested_resource_for_instance(
context, instance.uuid))
reqspec.requested_resources = port_res_req
instance.task_state = task_states.RESIZE_REVERTING
instance.save(expected_task_state=[None])

View File

@ -487,6 +487,9 @@ class VIF(Model):
vif['network'] = Network.hydrate(vif['network'])
return vif
def has_allocation(self):
return self['profile'] and bool(self['profile'].get('allocation'))
def get_netmask(ip, subnet):
"""Returns the netmask appropriate for injection into a guest."""
@ -540,6 +543,9 @@ class NetworkInfo(list):
return [('network-vif-plugged', vif['id'])
for vif in self if not vif.has_bind_time_event(migration)]
def has_port_with_allocation(self):
return any(vif.has_allocation() for vif in self)
class NetworkInfoAsyncWrapper(NetworkInfo):
"""Wrapper around NetworkInfo that allows retrieving NetworkInfo

View File

@ -42,6 +42,7 @@ from nova import context
from nova.db import api as db
from nova import exception
from nova.image import api as image_api
from nova.network import model
from nova.network.neutronv2 import api as neutron_api
from nova.network.neutronv2 import constants
from nova import objects
@ -1663,6 +1664,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_check, mock_get_host_az, mock_get_requested_resources):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_inst.info_cache.network_info = model.NetworkInfo([
model.VIF(id=uuids.port1, profile={'allocation': uuids.rp})])
fake_inst.old_flavor = fake_inst.flavor
fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(),
@ -1722,6 +1725,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_host_az, mock_get_requested_resources):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_inst.info_cache.network_info = model.NetworkInfo([])
fake_inst.old_flavor = fake_inst.flavor
fake_mig = objects.Migration._from_db_object(
self.context, objects.Migration(),
@ -1746,6 +1750,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_migration.assert_called_once_with(
self.context, fake_inst['uuid'], 'finished')
mock_inst_save.assert_called_once_with(expected_task_state=[None])
mock_get_requested_resources.assert_not_called()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)

View File

@ -887,6 +887,28 @@ iface eth1 inet static
[('network-vif-plugged', uuids.normal_vif)],
network_info.get_plug_time_events(diff_host))
def test_has_port_with_allocation(self):
network_info = model.NetworkInfo([])
self.assertFalse(network_info.has_port_with_allocation())
network_info.append(
model.VIF(id=uuids.port_without_profile))
self.assertFalse(network_info.has_port_with_allocation())
network_info.append(
model.VIF(id=uuids.port_no_allocation, profile={'foo': 'bar'}))
self.assertFalse(network_info.has_port_with_allocation())
network_info.append(
model.VIF(
id=uuids.port_empty_alloc, profile={'allocation': None}))
self.assertFalse(network_info.has_port_with_allocation())
network_info.append(
model.VIF(
id=uuids.port_with_alloc, profile={'allocation': uuids.rp}))
self.assertTrue(network_info.has_port_with_allocation())
class TestNetworkMetadata(test.NoDBTestCase):
def setUp(self):