tests: Adding functional tests to cover VM creation with sriov
Adding HostPciSRIOVDevicesInfo class to fakelibvirt module in order to dynamically generate host's SR-IOV devices info. Adding functional tests to cover the creation of instances with attached sriov devices (virtual and physical). Verifying that VFs cannot be allocated when it's parent PF is allocated and vice-versa, when a child VF is allocated, the parent PF should not be available. Change-Id: Ib9f341b0f26d48939d3305f6575f2689682c2685
This commit is contained in:
parent
f3d535b862
commit
b0a451d428
|
@ -0,0 +1,258 @@
|
|||
# Copyright (C) 2016 Red Hat, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from nova.objects import fields
|
||||
from nova import test
|
||||
from nova.tests.functional.test_servers import ServersTestBase
|
||||
from nova.tests.unit import fake_network
|
||||
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
|
||||
from nova.tests.unit.virt.libvirt import fakelibvirt
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NumaHostInfo(fakelibvirt.HostInfo):
|
||||
def __init__(self, **kwargs):
|
||||
super(NumaHostInfo, self).__init__(**kwargs)
|
||||
self.numa_mempages_list = []
|
||||
|
||||
def get_numa_topology(self):
|
||||
if self.numa_topology:
|
||||
return self.numa_topology
|
||||
|
||||
topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets,
|
||||
self.cpu_cores, self.cpu_threads,
|
||||
self.kB_mem)
|
||||
self.numa_topology = topology
|
||||
|
||||
# update number of active cpus
|
||||
cpu_count = len(topology.cells) * len(topology.cells[0].cpus)
|
||||
self.cpus = cpu_count - len(self.disabled_cpus_list)
|
||||
return topology
|
||||
|
||||
def set_custom_numa_toplogy(self, topology):
|
||||
self.numa_topology = topology
|
||||
|
||||
|
||||
class SRIOVServersTest(ServersTestBase):
|
||||
vfs_alias_name = 'vfs'
|
||||
pfs_alias_name = 'pfs'
|
||||
|
||||
def setUp(self):
|
||||
|
||||
white_list = ['{"vendor_id":"8086","product_id":"1528"}',
|
||||
'{"vendor_id":"8086","product_id":"1515"}']
|
||||
self.flags(passthrough_whitelist=white_list, group='pci')
|
||||
|
||||
# PFs will be removed from pools, unless these has been specifically
|
||||
# requested. This is especially needed in cases where PFs and VFs have
|
||||
# the same vendor/product id
|
||||
pci_alias = ['{"vendor_id":"8086", "product_id":"1528", "name":"%s",'
|
||||
' "device_type":"%s"}' % (self.pfs_alias_name,
|
||||
fields.PciDeviceType.SRIOV_PF),
|
||||
'{"vendor_id":"8086", "product_id":"1515", "name":"%s"}' %
|
||||
self.vfs_alias_name]
|
||||
self.flags(alias=pci_alias, group='pci')
|
||||
super(SRIOVServersTest, self).setUp()
|
||||
|
||||
# Replace libvirt with fakelibvirt
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.virt.libvirt.driver.libvirt_utils',
|
||||
fake_libvirt_utils))
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.virt.libvirt.driver.libvirt',
|
||||
fakelibvirt))
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.virt.libvirt.host.libvirt',
|
||||
fakelibvirt))
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
'nova.virt.libvirt.guest.libvirt',
|
||||
fakelibvirt))
|
||||
self.useFixture(fakelibvirt.FakeLibvirtFixture())
|
||||
|
||||
def _setup_compute_service(self):
|
||||
pass
|
||||
|
||||
def _setup_scheduler_service(self):
|
||||
self.flags(compute_driver='libvirt.LibvirtDriver')
|
||||
|
||||
self.flags(driver='filter_scheduler', group='scheduler')
|
||||
self.flags(enabled_filters=CONF.filter_scheduler.enabled_filters
|
||||
+ ['NUMATopologyFilter', 'PciPassthroughFilter'],
|
||||
group='filter_scheduler')
|
||||
return self.start_service('scheduler')
|
||||
|
||||
def _get_connection(self, host_info, pci_info):
|
||||
fake_connection = fakelibvirt.Connection('qemu:///system',
|
||||
version=1002007,
|
||||
hv_version=2001000,
|
||||
host_info=host_info,
|
||||
pci_info=pci_info)
|
||||
return fake_connection
|
||||
|
||||
def _run_build_test(self, flavor_id, filter_mock, end_status='ACTIVE'):
|
||||
|
||||
self.compute = self.start_service('compute', host='test_compute0')
|
||||
fake_network.set_stub_network_methods(self)
|
||||
|
||||
# Create server
|
||||
good_server = self._build_server(flavor_id)
|
||||
|
||||
post = {'server': good_server}
|
||||
|
||||
created_server = self.api.post_server(post)
|
||||
LOG.debug("created_server: %s" % created_server)
|
||||
self.assertTrue(created_server['id'])
|
||||
created_server_id = created_server['id']
|
||||
|
||||
# Validate that the server has been created
|
||||
found_server = self.api.get_server(created_server_id)
|
||||
self.assertEqual(created_server_id, found_server['id'])
|
||||
|
||||
# It should also be in the all-servers list
|
||||
servers = self.api.get_servers()
|
||||
server_ids = [s['id'] for s in servers]
|
||||
self.assertIn(created_server_id, server_ids)
|
||||
|
||||
# Validate that PciPassthroughFilter has been called
|
||||
self.assertTrue(filter_mock.called)
|
||||
|
||||
found_server = self._wait_for_state_change(found_server, 'BUILD')
|
||||
|
||||
self.assertEqual(end_status, found_server['status'])
|
||||
return created_server
|
||||
|
||||
def _get_pci_passthrough_filter_spy(self):
|
||||
host_manager = self.scheduler.manager.driver.host_manager
|
||||
pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter']
|
||||
host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes)
|
||||
return host_pass_mock
|
||||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
|
||||
def test_create_server_with_VF(self, img_mock):
|
||||
|
||||
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
|
||||
cpu_threads=2, kB_mem=15740000)
|
||||
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo()
|
||||
pci_info.create_pci_devices()
|
||||
fake_connection = self._get_connection(host_info, pci_info)
|
||||
|
||||
# Create a flavor
|
||||
extra_spec = {"pci_passthrough:alias": "%s:1" % self.vfs_alias_name}
|
||||
flavor_id = self._create_flavor(extra_spec=extra_spec)
|
||||
host_pass_mock = self._get_pci_passthrough_filter_spy()
|
||||
with test.nested(
|
||||
mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
||||
return_value=fake_connection),
|
||||
mock.patch('nova.scheduler.filters'
|
||||
'.pci_passthrough_filter.PciPassthroughFilter'
|
||||
'.host_passes',
|
||||
side_effect=host_pass_mock)) as (conn_mock,
|
||||
filter_mock):
|
||||
server = self._run_build_test(flavor_id, filter_mock)
|
||||
self._delete_server(server['id'])
|
||||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
|
||||
def test_create_server_with_PF(self, img_mock):
|
||||
|
||||
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
|
||||
cpu_threads=2, kB_mem=15740000)
|
||||
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo()
|
||||
pci_info.create_pci_devices()
|
||||
fake_connection = self._get_connection(host_info, pci_info)
|
||||
|
||||
# Create a flavor
|
||||
extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name}
|
||||
flavor_id = self._create_flavor(extra_spec=extra_spec)
|
||||
host_pass_mock = self._get_pci_passthrough_filter_spy()
|
||||
with test.nested(
|
||||
mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
||||
return_value=fake_connection),
|
||||
mock.patch('nova.scheduler.filters'
|
||||
'.pci_passthrough_filter.PciPassthroughFilter'
|
||||
'.host_passes',
|
||||
side_effect=host_pass_mock)) as (conn_mock,
|
||||
filter_mock):
|
||||
server = self._run_build_test(flavor_id, filter_mock)
|
||||
self._delete_server(server['id'])
|
||||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
|
||||
def test_create_server_with_PF_no_VF(self, img_mock):
|
||||
|
||||
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
|
||||
cpu_threads=2, kB_mem=15740000)
|
||||
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo()
|
||||
pci_info.create_pci_devices(num_pfs=1, num_vfs=4)
|
||||
fake_connection = self._get_connection(host_info, pci_info)
|
||||
|
||||
# Create a flavor
|
||||
extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name}
|
||||
extra_spec_vfs = {"pci_passthrough:alias": "%s:1" %
|
||||
self.vfs_alias_name}
|
||||
flavor_id = self._create_flavor(extra_spec=extra_spec)
|
||||
flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs)
|
||||
host_pass_mock = self._get_pci_passthrough_filter_spy()
|
||||
with test.nested(
|
||||
mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
||||
return_value=fake_connection),
|
||||
mock.patch('nova.scheduler.filters'
|
||||
'.pci_passthrough_filter.PciPassthroughFilter'
|
||||
'.host_passes',
|
||||
side_effect=host_pass_mock)) as (conn_mock,
|
||||
filter_mock):
|
||||
pf_server = self._run_build_test(flavor_id, filter_mock)
|
||||
vf_server = self._run_build_test(flavor_id_vfs, filter_mock,
|
||||
end_status='ERROR')
|
||||
|
||||
self._delete_server(pf_server['id'])
|
||||
self._delete_server(vf_server['id'])
|
||||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
|
||||
def test_create_server_with_VF_no_PF(self, img_mock):
|
||||
|
||||
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
|
||||
cpu_threads=2, kB_mem=15740000)
|
||||
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo()
|
||||
pci_info.create_pci_devices(num_pfs=1, num_vfs=4)
|
||||
fake_connection = self._get_connection(host_info, pci_info)
|
||||
|
||||
# Create a flavor
|
||||
extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name}
|
||||
extra_spec_vfs = {"pci_passthrough:alias": "%s:1" %
|
||||
self.vfs_alias_name}
|
||||
flavor_id = self._create_flavor(extra_spec=extra_spec)
|
||||
flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs)
|
||||
host_pass_mock = self._get_pci_passthrough_filter_spy()
|
||||
with test.nested(
|
||||
mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
||||
return_value=fake_connection),
|
||||
mock.patch('nova.scheduler.filters'
|
||||
'.pci_passthrough_filter.PciPassthroughFilter'
|
||||
'.host_passes',
|
||||
side_effect=host_pass_mock)) as (conn_mock,
|
||||
filter_mock):
|
||||
vf_server = self._run_build_test(flavor_id_vfs, filter_mock)
|
||||
pf_server = self._run_build_test(flavor_id, filter_mock,
|
||||
end_status='ERROR')
|
||||
|
||||
self._delete_server(pf_server['id'])
|
||||
self._delete_server(vf_server['id'])
|
|
@ -158,6 +158,124 @@ FAKE_LIBVIRT_VERSION = 1002001
|
|||
# Libvirt version to match MIN_QEMU_VERSION in driver.py
|
||||
FAKE_QEMU_VERSION = 1005003
|
||||
|
||||
PF_CAP_TYPE = 'virt_functions'
|
||||
VF_CAP_TYPE = 'phys_function'
|
||||
PF_PROD_NAME = 'Ethernet Controller 10-Gigabit X540-AT2'
|
||||
VF_PROD_NAME = 'X540 Ethernet Controller Virtual Function'
|
||||
PF_DRIVER_NAME = 'ixgbe'
|
||||
VF_DRIVER_NAME = 'ixgbevf'
|
||||
VF_SLOT = '10'
|
||||
PF_SLOT = '00'
|
||||
|
||||
|
||||
class FakePciDevice(object):
|
||||
pci_dev_template = """<device>
|
||||
<name>pci_0000_81_%(slot)s_%(dev)d</name>
|
||||
<path>/sys/devices/pci0000:80/0000:80:01.0/0000:81:%(slot)s.%(dev)d</path>
|
||||
<parent>pci_0000_80_01_0</parent>
|
||||
<driver>
|
||||
<name>%(driver)s</name>
|
||||
</driver>
|
||||
<capability type='pci'>
|
||||
<domain>0</domain>
|
||||
<bus>129</bus>
|
||||
<slot>0</slot>
|
||||
<function>%(dev)d</function>
|
||||
<product id='0x%(prod)d'>%(prod_name)s</product>
|
||||
<vendor id='0x8086'>Intel Corporation</vendor>
|
||||
<capability type='%(cap_type)s'>
|
||||
%(functions)s
|
||||
</capability>
|
||||
<iommuGroup number='%(group_id)d'>
|
||||
<address domain='0x0000' bus='0x81' slot='0x%(slot)s' function='0x%(dev)d'/>
|
||||
</iommuGroup>
|
||||
<numa node='0'/>
|
||||
<pci-express>
|
||||
<link validity='cap' port='0' speed='5' width='8'/>
|
||||
<link validity='sta' speed='5' width='8'/>
|
||||
</pci-express>
|
||||
</capability>
|
||||
</device>"""
|
||||
|
||||
def __init__(self, dev_type, vf_ratio, group, dev, product_id):
|
||||
"""Populate pci devices
|
||||
|
||||
:param dev_type: (string) Indicates the type of the device (PF, VF)
|
||||
:param vf_ratio: (int) Ratio of Virtual Functions on Physical
|
||||
:param group: (int) iommu group id
|
||||
:param dev: (int) function number of the device
|
||||
:param product_id: (int) Device product ID
|
||||
"""
|
||||
addr_templ = (" <address domain='0x0000' bus='0x81' slot='0x%(slot)s'"
|
||||
" function='0x%(dev)d'/>")
|
||||
self.pci_dev = None
|
||||
|
||||
if dev_type == 'PF':
|
||||
pf_caps = [addr_templ % {'dev': x, 'slot': VF_SLOT}
|
||||
for x in range(dev * vf_ratio,
|
||||
(dev + 1) * vf_ratio)]
|
||||
self.pci_dev = self.pci_dev_template % {'dev': dev,
|
||||
'prod': product_id, 'group_id': group,
|
||||
'functions': '\n'.join(pf_caps), 'slot': 0,
|
||||
'cap_type': PF_CAP_TYPE, 'prod_name': PF_PROD_NAME,
|
||||
'driver': PF_DRIVER_NAME}
|
||||
elif dev_type == 'VF':
|
||||
vf_caps = [addr_templ % {'dev': int(dev / vf_ratio),
|
||||
'slot': PF_SLOT}]
|
||||
self.pci_dev = self.pci_dev_template % {'dev': dev,
|
||||
'prod': product_id, 'group_id': group,
|
||||
'functions': '\n'.join(vf_caps), 'slot': VF_SLOT,
|
||||
'cap_type': VF_CAP_TYPE, 'prod_name': VF_PROD_NAME,
|
||||
'driver': VF_DRIVER_NAME}
|
||||
|
||||
def XMLDesc(self, flags):
|
||||
return self.pci_dev
|
||||
|
||||
|
||||
class HostPciSRIOVDevicesInfo(object):
|
||||
|
||||
def __init__(self):
|
||||
self.sriov_devices = {}
|
||||
|
||||
def create_pci_devices(self, vf_product_id=1515, pf_product_id=1528,
|
||||
num_pfs=2, num_vfs=8, group=47):
|
||||
"""Populate pci devices
|
||||
|
||||
:param vf_product_id: (int) Product ID of the Virtual Functions
|
||||
:param pf_product_id=1528: (int) Product ID of the Physical Functions
|
||||
:param num_pfs: (int) The number of the Physical Functions
|
||||
:param num_vfs: (int) The number of the Virtual Functions
|
||||
:param group: (int) Initial group id
|
||||
"""
|
||||
|
||||
vf_ratio = num_vfs / num_pfs
|
||||
|
||||
# Generate PFs
|
||||
for dev in range(num_pfs):
|
||||
dev_group = group + dev + 1
|
||||
pci_dev_name = 'pci_0000_81_%(slot)s_%(dev)d' % {'slot': PF_SLOT,
|
||||
'dev': dev}
|
||||
self.sriov_devices[pci_dev_name] = FakePciDevice('PF', vf_ratio,
|
||||
dev_group, dev,
|
||||
pf_product_id)
|
||||
|
||||
# Generate VFs
|
||||
for dev in range(num_vfs):
|
||||
dev_group = group + dev + 1
|
||||
pci_dev_name = 'pci_0000_81_%(slot)s_%(dev)d' % {'slot': VF_SLOT,
|
||||
'dev': dev}
|
||||
self.sriov_devices[pci_dev_name] = FakePciDevice('VF', vf_ratio,
|
||||
dev_group, dev,
|
||||
vf_product_id)
|
||||
|
||||
def get_all_devices(self):
|
||||
return self.sriov_devices.keys()
|
||||
|
||||
def get_device_by_name(self, device_name):
|
||||
|
||||
pci_dev = self.sriov_devices.get(device_name)
|
||||
return pci_dev
|
||||
|
||||
|
||||
class HostInfo(object):
|
||||
def __init__(self, arch=arch.X86_64, kB_mem=4096,
|
||||
|
@ -807,7 +925,7 @@ class DomainSnapshot(object):
|
|||
|
||||
class Connection(object):
|
||||
def __init__(self, uri=None, readonly=False, version=FAKE_LIBVIRT_VERSION,
|
||||
hv_version=FAKE_QEMU_VERSION, host_info=None):
|
||||
hv_version=FAKE_QEMU_VERSION, host_info=None, pci_info=None):
|
||||
if not uri or uri == '':
|
||||
if allow_default_uri_connection:
|
||||
uri = 'qemu:///session'
|
||||
|
@ -841,6 +959,7 @@ class Connection(object):
|
|||
self.fakeLibVersion = version
|
||||
self.fakeVersion = hv_version
|
||||
self.host_info = host_info or HostInfo()
|
||||
self.pci_info = pci_info or HostPciSRIOVDevicesInfo()
|
||||
|
||||
def _add_filter(self, nwfilter):
|
||||
self._nwfilters[nwfilter._name] = nwfilter
|
||||
|
@ -1259,7 +1378,13 @@ class Connection(object):
|
|||
nwfilter = NWFilter(self, xml)
|
||||
self._add_filter(nwfilter)
|
||||
|
||||
def device_lookup_by_name(self, dev_name):
|
||||
return self.pci_info.get_device_by_name(dev_name)
|
||||
|
||||
def nodeDeviceLookupByName(self, name):
|
||||
pci_dev = self.pci_info.get_device_by_name(name)
|
||||
if pci_dev:
|
||||
return pci_dev
|
||||
try:
|
||||
return self._nodedevs[name]
|
||||
except KeyError:
|
||||
|
@ -1273,7 +1398,7 @@ class Connection(object):
|
|||
return []
|
||||
|
||||
def listDevices(self, cap, flags):
|
||||
return []
|
||||
return self.pci_info.get_all_devices()
|
||||
|
||||
def baselineCPU(self, cpu, flag):
|
||||
"""Add new libvirt API."""
|
||||
|
|
Loading…
Reference in New Issue