Generate request_id for Flavor based InstancePCIRequest
The InstancePCIRequest.request_id is used to correlate allocated PciDevice objects with the InstancePCIRequest object triggered the PCI allocation. For neutron port based PCI requests the IstancePCIRequest.request_id was already set to a generated UUID by nova. But for Flavor based request the request_id was kept None. The placement PCI scheduling code depends on the request_id to be a unique identifier of the request. So this patch starts filling the request_id for flavor based requests as well. This change showed than in some places nova still uses the request_id == None condition to distinguish between flavor based and neutron based requests. This logic is now adapted to use the newer and better InstancePCIRequest.source based approach. Also we took the opportunity to move the logic of querying PCI devices allocated to an instance to the Instance ovo. This change fills the request_id for newly created flavor based InstancePCIRequest ovos. But the change in logic to use the InstancePCIRequest.source property instead of the request_id == None condition works even if the request_id is None for already existing InstancePCIRequest objects. So this patch does not include a data migration logic to fill request_id for existing objects. blueprint: pci-device-tracking-in-placement Change-Id: I53e03ff7a0221db682b043fb6d5adba3f5c9fdbe
This commit is contained in:
parent
06389f8d84
commit
ccab6fed46
|
@ -43,7 +43,6 @@ from nova.network import constants
|
|||
from nova.network import model as network_model
|
||||
from nova import objects
|
||||
from nova.objects import fields as obj_fields
|
||||
from nova.pci import manager as pci_manager
|
||||
from nova.pci import request as pci_request
|
||||
from nova.pci import utils as pci_utils
|
||||
from nova.pci import whitelist as pci_whitelist
|
||||
|
@ -1631,8 +1630,7 @@ class API:
|
|||
pci_request_id cannot be found on the instance.
|
||||
"""
|
||||
if pci_request_id:
|
||||
pci_devices = pci_manager.get_instance_pci_devs(
|
||||
instance, pci_request_id)
|
||||
pci_devices = instance.get_pci_devices(request_id=pci_request_id)
|
||||
if not pci_devices:
|
||||
# The pci_request_id likely won't mean much except for tracing
|
||||
# through the logs since it is generated per request.
|
||||
|
@ -1662,8 +1660,7 @@ class API:
|
|||
Currently this is done only for PF passthrough.
|
||||
"""
|
||||
if pci_request_id is not None:
|
||||
pci_devs = pci_manager.get_instance_pci_devs(
|
||||
instance, pci_request_id)
|
||||
pci_devs = instance.get_pci_devices(request_id=pci_request_id)
|
||||
if len(pci_devs) != 1:
|
||||
# NOTE(ndipanov): We shouldn't ever get here since
|
||||
# InstancePCIRequest instances built from network requests
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import typing as ty
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as db_exc
|
||||
|
@ -1226,6 +1227,46 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
|
|||
pci_req for pci_req in self.pci_requests.requests
|
||||
if pci_req.request_id != pci_device.request_id]
|
||||
|
||||
def get_pci_devices(
|
||||
self,
|
||||
source: ty.Optional[int] = None,
|
||||
request_id: ty.Optional[str] = None,
|
||||
) -> ty.List["objects.PciDevice"]:
|
||||
"""Return the PCI devices allocated to the instance
|
||||
|
||||
:param source: Filter by source. It can be
|
||||
InstancePCIRequest.FLAVOR_ALIAS or InstancePCIRequest.NEUTRON_PORT
|
||||
or None. None means returns devices from both type of requests.
|
||||
:param request_id: Filter by PciDevice.request_id. None means do not
|
||||
filter by request_id.
|
||||
:return: a list of matching PciDevice objects
|
||||
"""
|
||||
if not self.pci_devices:
|
||||
# return early to avoid an extra lazy load on self.pci_requests
|
||||
# if there are no devices allocated to be filtered
|
||||
return []
|
||||
else:
|
||||
devs = self.pci_devices.objects
|
||||
|
||||
if request_id is not None:
|
||||
devs = [dev for dev in devs if dev.request_id == request_id]
|
||||
|
||||
if source is not None:
|
||||
# NOTE(gibi): this happens to work for the old requests when the
|
||||
# request has request_id None and therefore the device allocated
|
||||
# due to that request has request_id None too, so they will be
|
||||
# mapped via the None key.
|
||||
req_id_to_req = {
|
||||
req.request_id: req for req in self.pci_requests.requests
|
||||
}
|
||||
devs = [
|
||||
dev
|
||||
for dev in devs
|
||||
if (req_id_to_req[dev.request_id].source == source)
|
||||
]
|
||||
|
||||
return devs
|
||||
|
||||
|
||||
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
|
||||
get_fault = expected_attrs and 'fault' in expected_attrs
|
||||
|
|
|
@ -480,24 +480,3 @@ class PciDevTracker(object):
|
|||
devs = self.allocations.pop(uuid, [])
|
||||
for dev in devs:
|
||||
self._free_device(dev)
|
||||
|
||||
|
||||
def get_instance_pci_devs(
|
||||
inst: 'objects.Instance', request_id: str = None,
|
||||
) -> ty.List['objects.PciDevice']:
|
||||
"""Get the devices allocated to one or all requests for an instance.
|
||||
|
||||
- For generic PCI request, the request id is None.
|
||||
- For sr-iov networking, the request id is a valid uuid
|
||||
- There are a couple of cases where all the PCI devices allocated to an
|
||||
instance need to be returned. Refer to libvirt driver that handles
|
||||
soft_reboot and hard_boot of 'xen' instances.
|
||||
"""
|
||||
pci_devices = inst.pci_devices
|
||||
if pci_devices is None:
|
||||
return []
|
||||
|
||||
return [
|
||||
device for device in pci_devices if
|
||||
device.request_id == request_id or request_id == 'all'
|
||||
]
|
||||
|
|
|
@ -43,6 +43,7 @@ import typing as ty
|
|||
import jsonschema
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
import nova.conf
|
||||
from nova import context as ctx
|
||||
|
@ -183,7 +184,9 @@ def _translate_alias_to_requests(
|
|||
count=int(count),
|
||||
spec=spec,
|
||||
alias_name=name,
|
||||
numa_policy=policy))
|
||||
numa_policy=policy,
|
||||
request_id=uuidutils.generate_uuid(),
|
||||
))
|
||||
return pci_requests
|
||||
|
||||
|
||||
|
|
|
@ -42,7 +42,6 @@ from nova import objects
|
|||
from nova.objects import fields as obj_fields
|
||||
from nova.objects import network_request as net_req_obj
|
||||
from nova.objects import virtual_interface as obj_vif
|
||||
from nova.pci import manager as pci_manager
|
||||
from nova.pci import request as pci_request
|
||||
from nova.pci import utils as pci_utils
|
||||
from nova.pci import whitelist as pci_whitelist
|
||||
|
@ -7738,11 +7737,11 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
'vf_num': 1,
|
||||
}))
|
||||
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
def test_populate_neutron_extension_values_binding_sriov(
|
||||
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
|
||||
host_id = 'my_host_id'
|
||||
instance = {'host': host_id}
|
||||
instance = objects.Instance(host=host_id)
|
||||
port_req_body = {'port': {}}
|
||||
pci_req_id = 'my_req_id'
|
||||
pci_dev = {'vendor_id': '1377',
|
||||
|
@ -7783,11 +7782,11 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
})
|
||||
)
|
||||
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
def test_populate_neutron_extension_values_binding_sriov_card_serial(
|
||||
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
|
||||
host_id = 'my_host_id'
|
||||
instance = {'host': host_id}
|
||||
instance = objects.Instance(host=host_id)
|
||||
port_req_body = {'port': {}}
|
||||
pci_req_id = 'my_req_id'
|
||||
pci_dev = {'vendor_id': 'a2d6',
|
||||
|
@ -7867,11 +7866,11 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
})
|
||||
)
|
||||
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
def test_populate_neutron_extension_values_binding_sriov_with_cap(
|
||||
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
|
||||
host_id = 'my_host_id'
|
||||
instance = {'host': host_id}
|
||||
instance = objects.Instance(host=host_id)
|
||||
port_req_body = {'port': {
|
||||
constants.BINDING_PROFILE: {
|
||||
'capabilities': ['switchdev']}}}
|
||||
|
@ -7907,12 +7906,12 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
constants.BINDING_PROFILE])
|
||||
|
||||
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
def test_populate_neutron_extension_values_binding_sriov_pf(
|
||||
self, mock_get_instance_pci_devs, mock_get_devspec
|
||||
):
|
||||
host_id = 'my_host_id'
|
||||
instance = {'host': host_id}
|
||||
instance = objects.Instance(host=host_id)
|
||||
port_req_body = {'port': {}}
|
||||
|
||||
pci_dev = objects.PciDevice(
|
||||
|
@ -8041,11 +8040,11 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
)
|
||||
|
||||
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
def test_populate_neutron_extension_values_binding_sriov_fail(
|
||||
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
|
||||
host_id = 'my_host_id'
|
||||
instance = {'host': host_id}
|
||||
instance = objects.Instance(host=host_id)
|
||||
port_req_body = {'port': {}}
|
||||
pci_req_id = 'my_req_id'
|
||||
pci_objs = [objects.PciDevice(vendor_id='1377',
|
||||
|
@ -8062,7 +8061,7 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
self.api._populate_neutron_binding_profile,
|
||||
instance, pci_req_id, port_req_body, None)
|
||||
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value=[])
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices', return_value=[])
|
||||
def test_populate_neutron_binding_profile_pci_dev_not_found(
|
||||
self, mock_get_instance_pci_devs):
|
||||
api = neutronapi.API()
|
||||
|
@ -8073,7 +8072,7 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
api._populate_neutron_binding_profile,
|
||||
instance, pci_req_id, port_req_body, None)
|
||||
mock_get_instance_pci_devs.assert_called_once_with(
|
||||
instance, pci_req_id)
|
||||
request_id=pci_req_id)
|
||||
|
||||
@mock.patch.object(
|
||||
pci_utils, 'is_physical_function',
|
||||
|
@ -8089,7 +8088,7 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
new=mock.MagicMock(side_effect=(lambda vf_a: {
|
||||
'0000:0a:00.0': '52:54:00:1e:59:c6'}.get(vf_a)))
|
||||
)
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
def test_pci_parse_whitelist_called_once(
|
||||
self, mock_get_instance_pci_devs
|
||||
):
|
||||
|
@ -8108,7 +8107,7 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
# after the 'device_spec' is set in this test case.
|
||||
api = neutronapi.API()
|
||||
host_id = 'my_host_id'
|
||||
instance = {'host': host_id}
|
||||
instance = objects.Instance(host=host_id)
|
||||
pci_req_id = 'my_req_id'
|
||||
port_req_body = {'port': {}}
|
||||
pci_dev = {'vendor_id': '1377',
|
||||
|
@ -8144,7 +8143,7 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
vf.update_device(pci_dev)
|
||||
return instance, pf, vf
|
||||
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
|
||||
def test_populate_pci_mac_address_pf(self, mock_get_mac_by_pci_address,
|
||||
mock_get_instance_pci_devs):
|
||||
|
@ -8158,7 +8157,7 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
self.api._populate_pci_mac_address(instance, 0, req)
|
||||
self.assertEqual(expected_port_req_body, req)
|
||||
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
|
||||
def test_populate_pci_mac_address_vf(self, mock_get_mac_by_pci_address,
|
||||
mock_get_instance_pci_devs):
|
||||
|
@ -8170,7 +8169,7 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
self.api._populate_pci_mac_address(instance, 42, port_req_body)
|
||||
self.assertEqual(port_req_body, req)
|
||||
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
|
||||
def test_populate_pci_mac_address_vf_fail(self,
|
||||
mock_get_mac_by_pci_address,
|
||||
|
@ -8185,7 +8184,7 @@ class TestAPIPortbinding(TestAPIBase):
|
|||
self.api._populate_pci_mac_address(instance, 42, port_req_body)
|
||||
self.assertEqual(port_req_body, req)
|
||||
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch('nova.objects.Instance.get_pci_devices')
|
||||
@mock.patch('nova.network.neutron.LOG.error')
|
||||
def test_populate_pci_mac_address_no_device(self, mock_log_error,
|
||||
mock_get_instance_pci_devs):
|
||||
|
|
|
@ -25,6 +25,7 @@ from oslo_versionedobjects import base as ovo_base
|
|||
|
||||
from nova.compute import task_states
|
||||
from nova.compute import vm_states
|
||||
from nova import context
|
||||
from nova.db.main import api as db
|
||||
from nova.db.main import models as sql_models
|
||||
from nova import exception
|
||||
|
@ -2073,3 +2074,164 @@ class TestInstanceObjectMisc(test.NoDBTestCase):
|
|||
self.assertEqual(['metadata', 'system_metadata', 'info_cache',
|
||||
'security_groups', 'pci_devices', 'tags', 'extra',
|
||||
'extra.flavor'], result_list)
|
||||
|
||||
|
||||
class TestInstanceObjectGetPciDevices(test.NoDBTestCase):
|
||||
def test_lazy_loading_pci_devices(self):
|
||||
user_id = "fake-user"
|
||||
project_id = "fake-project"
|
||||
ctxt = context.RequestContext(user_id, project_id)
|
||||
|
||||
inst = instance.Instance(ctxt, uuid=uuids.instance)
|
||||
with mock.patch(
|
||||
"nova.objects.PciDeviceList.get_by_instance_uuid",
|
||||
return_value=objects.PciDeviceList(),
|
||||
) as mock_get_pci:
|
||||
self.assertEqual([], inst.get_pci_devices())
|
||||
|
||||
mock_get_pci.assert_called_once_with(ctxt, uuids.instance)
|
||||
|
||||
def test_lazy_loading_pci_requests(self):
|
||||
user_id = "fake-user"
|
||||
project_id = "fake-project"
|
||||
ctxt = context.RequestContext(user_id, project_id)
|
||||
|
||||
devs = [objects.PciDevice(request_id=uuids.req1)]
|
||||
inst = instance.Instance(
|
||||
ctxt,
|
||||
uuid=uuids.instance,
|
||||
pci_devices=objects.PciDeviceList(
|
||||
objects=devs
|
||||
),
|
||||
)
|
||||
|
||||
with mock.patch(
|
||||
"nova.objects.InstancePCIRequests.get_by_instance_uuid",
|
||||
return_value=objects.InstancePCIRequests(
|
||||
requests=[
|
||||
objects.InstancePCIRequest(
|
||||
request_id=uuids.req1,
|
||||
alias_name="pci-alias-1",
|
||||
),
|
||||
]
|
||||
),
|
||||
) as mock_get_pci_req:
|
||||
self.assertEqual(
|
||||
devs,
|
||||
inst.get_pci_devices(
|
||||
source=objects.InstancePCIRequest.FLAVOR_ALIAS
|
||||
),
|
||||
)
|
||||
|
||||
mock_get_pci_req.assert_called_once_with(ctxt, uuids.instance)
|
||||
|
||||
def test_no_filter(self):
|
||||
devs = [objects.PciDevice()]
|
||||
|
||||
inst = instance.Instance(
|
||||
pci_devices=objects.PciDeviceList(objects=devs)
|
||||
)
|
||||
|
||||
self.assertEqual(devs, inst.get_pci_devices())
|
||||
|
||||
def test_no_filter_by_request_id(self):
|
||||
expected_devs = [objects.PciDevice(request_id=uuids.req1)]
|
||||
all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
|
||||
|
||||
inst = instance.Instance(
|
||||
pci_devices=objects.PciDeviceList(objects=all_devs)
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
expected_devs, inst.get_pci_devices(request_id=uuids.req1)
|
||||
)
|
||||
|
||||
def test_no_filter_by_source(self):
|
||||
expected_devs = [
|
||||
objects.PciDevice(request_id=uuids.req1),
|
||||
objects.PciDevice(request_id=uuids.req1),
|
||||
]
|
||||
all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
|
||||
|
||||
inst = instance.Instance(
|
||||
pci_devices=objects.PciDeviceList(objects=all_devs),
|
||||
pci_requests=objects.InstancePCIRequests(
|
||||
requests=[
|
||||
objects.InstancePCIRequest(
|
||||
request_id=uuids.req1,
|
||||
alias_name="pci-alias-1",
|
||||
),
|
||||
objects.InstancePCIRequest(
|
||||
request_id=uuids.req2,
|
||||
),
|
||||
]
|
||||
),
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
expected_devs,
|
||||
inst.get_pci_devices(
|
||||
source=objects.InstancePCIRequest.FLAVOR_ALIAS
|
||||
),
|
||||
)
|
||||
|
||||
def test_no_filter_by_request_id_and_source(self):
|
||||
expected_devs = []
|
||||
all_devs = expected_devs + [
|
||||
objects.PciDevice(request_id=uuids.req1),
|
||||
objects.PciDevice(request_id=uuids.req2),
|
||||
objects.PciDevice(request_id=uuids.req1),
|
||||
]
|
||||
|
||||
inst = instance.Instance(
|
||||
pci_devices=objects.PciDeviceList(objects=all_devs),
|
||||
pci_requests=objects.InstancePCIRequests(
|
||||
requests=[
|
||||
objects.InstancePCIRequest(
|
||||
request_id=uuids.req1,
|
||||
alias_name="pci-alias-1",
|
||||
),
|
||||
objects.InstancePCIRequest(
|
||||
request_id=uuids.req2,
|
||||
),
|
||||
]
|
||||
),
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
expected_devs,
|
||||
inst.get_pci_devices(
|
||||
request_id=uuids.req1,
|
||||
source=objects.InstancePCIRequest.NEUTRON_PORT,
|
||||
),
|
||||
)
|
||||
|
||||
def test_old_pci_dev_and_req(self):
|
||||
"""This tests the case when the system has old InstancePCIRequest
|
||||
objects without the request_id being filled. And therefore have
|
||||
PciDevice object where the request_id is None too. These requests and
|
||||
devices are always flavor based.
|
||||
"""
|
||||
devs = [
|
||||
objects.PciDevice(request_id=None),
|
||||
objects.PciDevice(request_id=None),
|
||||
]
|
||||
|
||||
inst = instance.Instance(
|
||||
pci_devices=objects.PciDeviceList(objects=devs),
|
||||
pci_requests=objects.InstancePCIRequests(
|
||||
requests=[
|
||||
objects.InstancePCIRequest(
|
||||
request_id=None,
|
||||
alias_name="pci-alias-1",
|
||||
),
|
||||
]
|
||||
),
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
devs,
|
||||
inst.get_pci_devices(
|
||||
source=objects.InstancePCIRequest.FLAVOR_ALIAS,
|
||||
),
|
||||
)
|
||||
|
|
|
@ -810,7 +810,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
|
|||
free_pci_device_ids = (
|
||||
[dev.id for dev in self.tracker.pci_stats.get_free_devs()])
|
||||
self.assertEqual(2, len(free_pci_device_ids))
|
||||
allocated_devs = manager.get_instance_pci_devs(self.inst)
|
||||
allocated_devs = self.inst.get_pci_devices()
|
||||
pci_device = allocated_devs[0]
|
||||
self.assertNotIn(pci_device.id, free_pci_device_ids)
|
||||
instance_uuid = self.inst['uuid']
|
||||
|
@ -873,24 +873,3 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
|
|||
self.assertIsNone(self.tracker.allocations.get(instance_uuid))
|
||||
free_devs = self.tracker.pci_stats.get_free_devs()
|
||||
self.assertEqual(len(fake_db_devs), len(free_devs))
|
||||
|
||||
|
||||
class PciGetInstanceDevs(test.NoDBTestCase):
|
||||
|
||||
def test_get_devs_object(self):
|
||||
def _fake_obj_load_attr(foo, attrname):
|
||||
if attrname == 'pci_devices':
|
||||
self.load_attr_called = True
|
||||
foo.pci_devices = objects.PciDeviceList()
|
||||
|
||||
self.stub_out(
|
||||
'nova.objects.Instance.obj_load_attr',
|
||||
_fake_obj_load_attr)
|
||||
|
||||
self.load_attr_called = False
|
||||
manager.get_instance_pci_devs(objects.Instance())
|
||||
self.assertTrue(self.load_attr_called)
|
||||
|
||||
def test_get_devs_no_pci_devices(self):
|
||||
inst = objects.Instance(pci_devices=None)
|
||||
self.assertEqual([], manager.get_instance_pci_devs(inst))
|
||||
|
|
|
@ -76,7 +76,6 @@ from nova.objects import block_device as block_device_obj
|
|||
from nova.objects import fields
|
||||
from nova.objects import migrate_data as migrate_data_obj
|
||||
from nova.objects import virtual_interface as obj_vif
|
||||
from nova.pci import manager as pci_manager
|
||||
from nova.pci import utils as pci_utils
|
||||
import nova.privsep.fs
|
||||
import nova.privsep.libvirt
|
||||
|
@ -3424,10 +3423,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
status=fields.PciDeviceStatus.AVAILABLE,
|
||||
address='0000:00:00.1',
|
||||
instance_uuid=None,
|
||||
request_id=None,
|
||||
request_id=uuids.pci_req1,
|
||||
extra_info={},
|
||||
numa_node=None)
|
||||
pci_device = objects.PciDevice(**pci_device_info)
|
||||
instance_ref.pci_devices = objects.PciDeviceList(objects=[pci_device])
|
||||
pci_req = objects.InstancePCIRequest(
|
||||
request_id=uuids.pci_req1, alias_name='pci-alias-1')
|
||||
instance_ref.pci_requests = objects.InstancePCIRequests(
|
||||
requests=[pci_req])
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(host.Host, 'has_min_version',
|
||||
|
@ -3435,9 +3439,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock.patch.object(host.Host, "get_capabilities",
|
||||
return_value=caps),
|
||||
mock.patch.object(host.Host, 'get_online_cpus',
|
||||
return_value=set([3])),
|
||||
mock.patch.object(pci_manager, "get_instance_pci_devs",
|
||||
return_value=[pci_device])):
|
||||
return_value=set([3]))
|
||||
):
|
||||
cfg = conn._get_guest_config(instance_ref, [],
|
||||
image_meta, disk_info)
|
||||
self.assertEqual(set([3]), cfg.cpuset)
|
||||
|
@ -3476,23 +3479,31 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
status=fields.PciDeviceStatus.AVAILABLE,
|
||||
address='0000:00:00.1',
|
||||
instance_uuid=None,
|
||||
request_id=None,
|
||||
request_id=uuids.pci_req1,
|
||||
extra_info={},
|
||||
numa_node=1)
|
||||
pci_device = objects.PciDevice(**pci_device_info)
|
||||
pci_device_info.update(numa_node=0, address='0000:00:00.2')
|
||||
pci_device2 = objects.PciDevice(**pci_device_info)
|
||||
instance_ref.pci_devices = objects.PciDeviceList(
|
||||
objects=[pci_device, pci_device2]
|
||||
)
|
||||
instance_ref.pci_requests = objects.InstancePCIRequests(
|
||||
requests=[
|
||||
objects.InstancePCIRequest(
|
||||
request_id=uuids.pci_req1, alias_name="pci-alias-1"
|
||||
)
|
||||
]
|
||||
)
|
||||
with test.nested(
|
||||
mock.patch.object(
|
||||
host.Host, "get_capabilities", return_value=caps),
|
||||
mock.patch.object(host.Host, 'get_online_cpus',
|
||||
return_value=set([3])),
|
||||
mock.patch.object(random, 'choice'),
|
||||
mock.patch.object(pci_manager, "get_instance_pci_devs",
|
||||
return_value=[pci_device, pci_device2]),
|
||||
mock.patch.object(conn, '_has_numa_support',
|
||||
return_value=False)
|
||||
) as (_, _, choice_mock, pci_mock, _):
|
||||
) as (_, _, choice_mock, _):
|
||||
cfg = conn._get_guest_config(instance_ref, [],
|
||||
image_meta, disk_info)
|
||||
self.assertFalse(choice_mock.called)
|
||||
|
@ -7477,12 +7488,19 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
address='0000:00:00.1',
|
||||
compute_id=compute_ref.id,
|
||||
instance_uuid=instance.uuid,
|
||||
request_id=None,
|
||||
request_id=uuids.pci_req1,
|
||||
extra_info={})
|
||||
pci_device = objects.PciDevice(**pci_device_info)
|
||||
pci_list = objects.PciDeviceList()
|
||||
pci_list.objects.append(pci_device)
|
||||
instance.pci_devices = pci_list
|
||||
instance.pci_requests = objects.InstancePCIRequests(
|
||||
requests=[
|
||||
objects.InstancePCIRequest(
|
||||
request_id=uuids.pci_req1, alias_name="pci-alias"
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
|
||||
|
@ -16310,7 +16328,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_get.return_value = fake_guest
|
||||
self.stub_out('oslo_service.loopingcall.FixedIntervalLoopingCall',
|
||||
lambda *a, **k: FakeLoopingCall())
|
||||
self.stub_out('nova.pci.manager.get_instance_pci_devs', lambda *a: [])
|
||||
|
||||
drvr.reboot(None, instance, [], 'SOFT')
|
||||
|
||||
|
@ -16322,14 +16339,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_get.assert_has_calls([mock.call(instance)] * 2, any_order=True)
|
||||
self.assertEqual(2, mock_get.call_count)
|
||||
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
|
||||
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
|
||||
@mock.patch.object(greenthread, 'sleep')
|
||||
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
|
||||
@mock.patch.object(host.Host, '_get_domain')
|
||||
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
|
||||
mock_sleep, mock_loopingcall,
|
||||
mock_get_instance_pci_devs):
|
||||
mock_sleep, mock_loopingcall):
|
||||
class FakeLoopingCall(object):
|
||||
def start(self, *a, **k):
|
||||
return self
|
||||
|
@ -16357,7 +16372,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_get_domain.return_value = mock_domain
|
||||
mock_hard_reboot.side_effect = fake_hard_reboot
|
||||
mock_loopingcall.return_value = FakeLoopingCall()
|
||||
mock_get_instance_pci_devs.return_value = []
|
||||
drvr.reboot(None, instance, [], 'SOFT')
|
||||
self.assertTrue(self.reboot_hard_reboot_called)
|
||||
|
||||
|
@ -16555,7 +16569,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
|
||||
@mock.patch('oslo_utils.fileutils.ensure_tree')
|
||||
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
|
||||
@mock.patch('nova.pci.manager.get_instance_pci_devs')
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_guest_with_network')
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
|
||||
|
@ -16572,7 +16585,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock_get_guest_config, mock_get_instance_path,
|
||||
mock_get_instance_disk_info, mock_create_images_and_backing,
|
||||
mock_create_domand_and_network,
|
||||
mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
|
||||
mock_looping_call, mock_ensure_tree):
|
||||
"""For a hard reboot, we shouldn't need an additional call to glance
|
||||
to get the image metadata.
|
||||
|
||||
|
@ -16618,10 +16631,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
@mock.patch.object(conn, '_detach_mediated_devices')
|
||||
@mock.patch.object(conn, '_detach_direct_passthrough_ports')
|
||||
@mock.patch.object(conn, '_detach_pci_devices')
|
||||
@mock.patch.object(pci_manager, 'get_instance_pci_devs',
|
||||
return_value='pci devs')
|
||||
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
|
||||
def suspend(mock_get_guest, mock_get_instance_pci_devs,
|
||||
def suspend(mock_get_guest,
|
||||
mock_detach_pci_devices,
|
||||
mock_detach_direct_passthrough_ports,
|
||||
mock_detach_mediated_devices,
|
||||
|
@ -16787,7 +16798,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
instance.info_cache = objects.InstanceInfoCache(
|
||||
network_info=network_info)
|
||||
# fill the pci_devices of the instance so that
|
||||
# pci_manager.get_instance_pci_devs will not return an empty list
|
||||
# instance.get_instance_pci_devs will not return an empty list
|
||||
# which will eventually fail the assertion for detachDeviceFlags
|
||||
expected_pci_device_obj = (
|
||||
objects.PciDevice(
|
||||
|
@ -16858,7 +16869,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
instance.info_cache = objects.InstanceInfoCache(
|
||||
network_info=network_info)
|
||||
# fill the pci_devices of the instance so that
|
||||
# pci_manager.get_instance_pci_devs will not return an empty list
|
||||
# instance.get_instance_pci_devs will not return an empty list
|
||||
# which will eventually fail the assertion for detachDeviceFlags
|
||||
instance.pci_devices = objects.PciDeviceList()
|
||||
instance.pci_devices.objects = [
|
||||
|
@ -16913,8 +16924,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
mock.patch.object(drvr, '_create_guest_with_network',
|
||||
return_value=guest),
|
||||
mock.patch.object(drvr, '_attach_pci_devices'),
|
||||
mock.patch.object(pci_manager, 'get_instance_pci_devs',
|
||||
return_value='fake_pci_devs'),
|
||||
mock.patch('nova.objects.Instance.get_pci_devices',
|
||||
return_value='fake_pci_devs'),
|
||||
mock.patch.object(utils, 'get_image_from_system_metadata'),
|
||||
mock.patch.object(guest, 'sync_guest_time'),
|
||||
mock.patch.object(drvr, '_wait_for_running',
|
||||
|
@ -20586,7 +20597,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
|||
guest = mock.Mock()
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(pci_manager, 'get_instance_pci_devs'),
|
||||
mock.patch.object(drvr, '_attach_pci_devices'),
|
||||
mock.patch.object(drvr, '_attach_direct_passthrough_ports'),
|
||||
):
|
||||
|
|
|
@ -96,7 +96,6 @@ from nova import objects
|
|||
from nova.objects import diagnostics as diagnostics_obj
|
||||
from nova.objects import fields
|
||||
from nova.objects import migrate_data as migrate_data_obj
|
||||
from nova.pci import manager as pci_manager
|
||||
from nova.pci import utils as pci_utils
|
||||
import nova.privsep.libvirt
|
||||
import nova.privsep.path
|
||||
|
@ -3165,7 +3164,11 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
guest.launch(pause=current_power_state == power_state.PAUSED)
|
||||
|
||||
self._attach_pci_devices(
|
||||
guest, pci_manager.get_instance_pci_devs(instance))
|
||||
guest,
|
||||
instance.get_pci_devices(
|
||||
source=objects.InstancePCIRequest.FLAVOR_ALIAS
|
||||
),
|
||||
)
|
||||
self._attach_direct_passthrough_ports(context, instance, guest)
|
||||
|
||||
def _can_set_admin_password(self, image_meta):
|
||||
|
@ -4101,8 +4104,12 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
"""Suspend the specified instance."""
|
||||
guest = self._host.get_guest(instance)
|
||||
|
||||
self._detach_pci_devices(guest,
|
||||
pci_manager.get_instance_pci_devs(instance))
|
||||
self._detach_pci_devices(
|
||||
guest,
|
||||
instance.get_pci_devices(
|
||||
source=objects.InstancePCIRequest.FLAVOR_ALIAS
|
||||
),
|
||||
)
|
||||
self._detach_direct_passthrough_ports(context, instance, guest)
|
||||
self._detach_mediated_devices(guest)
|
||||
guest.save_memory_state()
|
||||
|
@ -4120,8 +4127,12 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
guest = self._create_guest_with_network(
|
||||
context, xml, instance, network_info, block_device_info,
|
||||
vifs_already_plugged=True)
|
||||
self._attach_pci_devices(guest,
|
||||
pci_manager.get_instance_pci_devs(instance))
|
||||
self._attach_pci_devices(
|
||||
guest,
|
||||
instance.get_pci_devices(
|
||||
source=objects.InstancePCIRequest.FLAVOR_ALIAS
|
||||
),
|
||||
)
|
||||
self._attach_direct_passthrough_ports(
|
||||
context, instance, guest, network_info)
|
||||
self._attach_mediated_devices(guest, mdevs)
|
||||
|
@ -5072,7 +5083,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
else:
|
||||
attached_via_interface_element.append(vif)
|
||||
|
||||
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
|
||||
pci_devs = instance.get_pci_devices()
|
||||
hostdev_pci_addresses = {
|
||||
vif['profile']['pci_slot']
|
||||
for vif in attached_via_hostdev_element
|
||||
|
@ -7200,11 +7211,13 @@ class LibvirtDriver(driver.ComputeDriver):
|
|||
def _guest_add_pci_devices(self, guest, instance):
|
||||
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
|
||||
# Get all generic PCI devices (non-SR-IOV).
|
||||
for pci_dev in pci_manager.get_instance_pci_devs(instance):
|
||||
for pci_dev in instance.get_pci_devices(
|
||||
source=objects.InstancePCIRequest.FLAVOR_ALIAS
|
||||
):
|
||||
guest.add_device(self._get_guest_pci_device(pci_dev))
|
||||
else:
|
||||
# PCI devices is only supported for QEMU/KVM hypervisor
|
||||
if pci_manager.get_instance_pci_devs(instance, 'all'):
|
||||
if instance.get_pci_devices():
|
||||
raise exception.PciDeviceUnsupportedHypervisor(
|
||||
type=CONF.libvirt.virt_type
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue