Include all network devices in nova diagnostics

get_instance_diagnostics expected all interfaces
to have a <target> element with a "dev" attribute in
the instance XML. This is not the case for VFIO
interfaces (<interface type="hostdev">).
This caused an IndexError when looping over
the interfaces.

This patch fixes this issue by retrieving interfaces
data directly from the guest XML and adding nics
appropriately to the diagnostics object.

Change-Id: I8ef852d449e9e637d45e4ac92ffc5d1abd8d31c5
Closes-Bug: #1821798
This commit is contained in:
Francois Palin 2019-03-26 15:22:40 -04:00
parent d42a007425
commit ab7c968b6f
7 changed files with 324 additions and 181 deletions

View File

@ -526,3 +526,6 @@ class TestOpenStackClient(object):
'forced_down': forced_down
}
return self.api_put('os-services/%s' % service_id, req).body['service']
def get_server_diagnostics(self, server_id):
return self.api_get('/servers/%s/diagnostics' % server_id).body

View File

@ -13,9 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
import mock
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import test_servers as base
from nova.tests.unit.virt.libvirt import fake_imagebackend
@ -70,3 +72,182 @@ class ServersTestBase(base.ServersTestBase):
pci_info=pci_info,
mdev_info=mdev_info)
return fake_connection
class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
"""A custom variant of the stock neutron fixture with more networks.
There are three networks available: two l2 networks (one flat and one VLAN)
and one l3 network (VXLAN).
"""
network_1 = {
'id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'status': 'ACTIVE',
'subnets': [],
'name': 'physical-network-foo',
'admin_state_up': True,
'tenant_id': nova_fixtures.NeutronFixture.tenant_id,
'provider:physical_network': 'foo',
'provider:network_type': 'flat',
'provider:segmentation_id': None,
}
network_2 = network_1.copy()
network_2.update({
'id': 'a252b8cd-2d99-4e82-9a97-ec1217c496f5',
'name': 'physical-network-bar',
'provider:physical_network': 'bar',
'provider:network_type': 'vlan',
'provider:segmentation_id': 123,
})
network_3 = network_1.copy()
network_3.update({
'id': '877a79cc-295b-4b80-9606-092bf132931e',
'name': 'tunneled-network',
'provider:physical_network': None,
'provider:network_type': 'vxlan',
'provider:segmentation_id': 69,
})
network_4 = network_1.copy()
network_4.update({
'id': '1b70879f-fd00-411e-8ea9-143e7820e61d',
'name': 'private-network',
'shared': False,
'provider:physical_network': 'physnet4',
"provider:network_type": "vlan",
'provider:segmentation_id': 42,
})
subnet_1 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_1.update({
'name': 'physical-subnet-foo',
})
subnet_2 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_2.update({
'id': 'b4c13749-c002-47ed-bf42-8b1d44fa9ff2',
'name': 'physical-subnet-bar',
'network_id': network_2['id'],
})
subnet_3 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_3.update({
'id': '4dacb20b-917f-4275-aa75-825894553442',
'name': 'tunneled-subnet',
'network_id': network_3['id'],
})
subnet_4 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_4.update({
'id': '7cb343ec-6637-494c-89a1-8890eab7788e',
'name': 'physical-subnet-bar',
'network_id': network_4['id'],
})
network_1['subnets'] = [subnet_1]
network_2['subnets'] = [subnet_2]
network_3['subnets'] = [subnet_3]
network_4['subnets'] = [subnet_4]
network_1_port_2 = {
'id': 'f32582b5-8694-4be8-9a52-c5732f601c9d',
'network_id': network_1['id'],
'status': 'ACTIVE',
'mac_address': '71:ce:c7:8b:cd:dc',
'fixed_ips': [
{
'ip_address': '192.168.1.10',
'subnet_id': subnet_1['id']
}
],
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_1_port_3 = {
'id': '9c7580a0-8b01-41f3-ba07-a114709a4b74',
'network_id': network_1['id'],
'status': 'ACTIVE',
'mac_address': '71:ce:c7:2b:cd:dc',
'fixed_ips': [
{
'ip_address': '192.168.1.11',
'subnet_id': subnet_1['id']
}
],
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_2_port_1 = {
'id': '67d36444-6353-40f5-9e92-59346cf0dfda',
'network_id': network_2['id'],
'status': 'ACTIVE',
'mac_address': 'd2:0b:fd:d7:89:9b',
'fixed_ips': [
{
'ip_address': '192.168.1.6',
'subnet_id': subnet_2['id']
}
],
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_3_port_1 = {
'id': '4bfa1dc4-4354-4840-b0b4-f06196fa1344',
'network_id': network_3['id'],
'status': 'ACTIVE',
'mac_address': 'd2:0b:fd:99:89:9b',
'fixed_ips': [
{
'ip_address': '192.168.2.6',
'subnet_id': subnet_3['id']
}
],
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_4_port_1 = {
'id': 'b4cd0b93-2ac8-40a7-9fa4-2cd680ccdf3e',
'network_id': network_4['id'],
'status': 'ACTIVE',
'mac_address': 'b5:bc:2e:e7:51:ee',
'fixed_ips': [
{
'ip_address': '192.168.4.6',
'subnet_id': subnet_4['id']
}
],
'binding:vif_type': 'hw_veb',
'binding:vnic_type': 'direct',
'binding:vif_details': {'vlan': 42},
'binding:profile': {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:81:00.1',
'physical_network': 'physnet4'},
}
def __init__(self, test):
super(LibvirtNeutronFixture, self).__init__(test)
self._networks = {
self.network_1['id']: self.network_1,
self.network_2['id']: self.network_2,
self.network_3['id']: self.network_3,
self.network_4['id']: self.network_4,
}
self._net1_ports = [self.network_1_port_2, self.network_1_port_3]
def create_port(self, body=None):
network_id = body['port']['network_id']
assert network_id in self._networks, ('Network %s not in fixture' %
network_id)
if network_id == self.network_1['id']:
port = self._net1_ports.pop(0)
elif network_id == self.network_2['id']:
port = self.network_2_port_1
elif network_id == self.network_3['id']:
port = self.network_3_port_1
elif network_id == self.network_4['id']:
port = self.network_4_port_1
# this copy is here to avoid modifying class variables like
# network_2_port_1 below at the update call
port = copy.deepcopy(port)
port.update(body['port'])
self._ports[port['id']] = port
# this copy is here as nova sometimes modifies the returned port
# locally and we want to avoid that nova modifies the fixture internals
return {'port': copy.deepcopy(port)}

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
import six
import mock
@ -23,7 +22,6 @@ from oslo_log import log as logging
from nova.conf import neutron as neutron_conf
from nova import context as nova_context
from nova import objects
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional.libvirt import base
from nova.tests.unit.virt.libvirt import fakelibvirt
@ -142,148 +140,6 @@ class NUMAServersTest(NUMAServersTestBase):
self._run_build_test(flavor_id, end_status='ERROR')
class NUMAAffinityNeutronFixture(nova_fixtures.NeutronFixture):
"""A custom variant of the stock neutron fixture with more networks.
There are three networks available: two l2 networks (one flat and one VLAN)
and one l3 network (VXLAN).
"""
network_1 = {
'id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'status': 'ACTIVE',
'subnets': [],
'name': 'physical-network-foo',
'admin_state_up': True,
'tenant_id': nova_fixtures.NeutronFixture.tenant_id,
'provider:physical_network': 'foo',
'provider:network_type': 'flat',
'provider:segmentation_id': None,
}
network_2 = network_1.copy()
network_2.update({
'id': 'a252b8cd-2d99-4e82-9a97-ec1217c496f5',
'name': 'physical-network-bar',
'provider:physical_network': 'bar',
'provider:network_type': 'vlan',
'provider:segmentation_id': 123,
})
network_3 = network_1.copy()
network_3.update({
'id': '877a79cc-295b-4b80-9606-092bf132931e',
'name': 'tunneled-network',
'provider:physical_network': None,
'provider:network_type': 'vxlan',
'provider:segmentation_id': 69,
})
subnet_1 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_1.update({
'name': 'physical-subnet-foo',
})
subnet_2 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_2.update({
'id': 'b4c13749-c002-47ed-bf42-8b1d44fa9ff2',
'name': 'physical-subnet-bar',
'network_id': network_2['id'],
})
subnet_3 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_3.update({
'id': '4dacb20b-917f-4275-aa75-825894553442',
'name': 'tunneled-subnet',
'network_id': network_3['id'],
})
network_1['subnets'] = [subnet_1]
network_2['subnets'] = [subnet_2]
network_3['subnets'] = [subnet_3]
network_1_port_2 = {
'id': 'f32582b5-8694-4be8-9a52-c5732f601c9d',
'network_id': network_1['id'],
'status': 'ACTIVE',
'mac_address': '71:ce:c7:8b:cd:dc',
'fixed_ips': [
{
'ip_address': '192.168.1.10',
'subnet_id': subnet_1['id']
}
],
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_1_port_3 = {
'id': '9c7580a0-8b01-41f3-ba07-a114709a4b74',
'network_id': network_1['id'],
'status': 'ACTIVE',
'mac_address': '71:ce:c7:2b:cd:dc',
'fixed_ips': [
{
'ip_address': '192.168.1.11',
'subnet_id': subnet_1['id']
}
],
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_2_port_1 = {
'id': '67d36444-6353-40f5-9e92-59346cf0dfda',
'network_id': network_2['id'],
'status': 'ACTIVE',
'mac_address': 'd2:0b:fd:d7:89:9b',
'fixed_ips': [
{
'ip_address': '192.168.1.6',
'subnet_id': subnet_2['id']
}
],
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_3_port_1 = {
'id': '4bfa1dc4-4354-4840-b0b4-f06196fa1344',
'network_id': network_3['id'],
'status': 'ACTIVE',
'mac_address': 'd2:0b:fd:99:89:9b',
'fixed_ips': [
{
'ip_address': '192.168.2.6',
'subnet_id': subnet_3['id']
}
],
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
def __init__(self, test):
super(NUMAAffinityNeutronFixture, self).__init__(test)
self._networks = {
self.network_1['id']: self.network_1,
self.network_2['id']: self.network_2,
self.network_3['id']: self.network_3,
}
self._net1_ports = [self.network_1_port_2, self.network_1_port_3]
def create_port(self, body=None):
network_id = body['port']['network_id']
assert network_id in self._networks, ('Network %s not in fixture' %
network_id)
if network_id == self.network_1['id']:
port = self._net1_ports.pop(0)
elif network_id == self.network_2['id']:
port = self.network_2_port_1
elif network_id == self.network_3['id']:
port = self.network_3_port_1
# this copy is here to avoid modifying class variables like
# network_2_port_1 below at the update call
port = copy.deepcopy(port)
port.update(body['port'])
self._ports[port['id']] = port
# this copy is here as nova sometimes modifies the returned port
# locally and we want to avoid that nova modifies the fixture internals
return {'port': copy.deepcopy(port)}
class NUMAServersWithNetworksTest(NUMAServersTestBase):
def setUp(self):
@ -300,7 +156,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
# we need a bit more intelligent neutron for these tests. Applying the
# new fixture here means that we re-stub what the previous neutron
# fixture already stubbed.
self.neutron = self.useFixture(NUMAAffinityNeutronFixture(self))
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
def _test_create_server_with_networks(self, flavor_id, networks):
host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
@ -327,7 +183,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
extra_spec = {'hw:numa_nodes': '1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
networks = [
{'uuid': NUMAAffinityNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
]
status = self._test_create_server_with_networks(
@ -346,8 +202,8 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
networks = [
{'uuid': NUMAAffinityNeutronFixture.network_1['id']},
{'uuid': NUMAAffinityNeutronFixture.network_2['id']},
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_2['id']},
]
status = self._test_create_server_with_networks(
@ -365,8 +221,8 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
extra_spec = {'hw:numa_nodes': '1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
networks = [
{'uuid': NUMAAffinityNeutronFixture.network_1['id']},
{'uuid': NUMAAffinityNeutronFixture.network_2['id']},
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_2['id']},
]
status = self._test_create_server_with_networks(
@ -384,8 +240,8 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
extra_spec = {'hw:numa_nodes': '1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
networks = [
{'uuid': NUMAAffinityNeutronFixture.network_1['id']},
{'uuid': NUMAAffinityNeutronFixture.network_3['id']},
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_3['id']},
]
status = self._test_create_server_with_networks(
@ -398,7 +254,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
extra_spec = {'hw:numa_nodes': '1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
networks = [
{'uuid': NUMAAffinityNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
]
server = self._test_create_server_with_networks(flavor_id, networks)
@ -408,7 +264,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
# attach an interface from the **same** network
post = {
'interfaceAttachment': {
'net_id': NUMAAffinityNeutronFixture.network_1['id'],
'net_id': base.LibvirtNeutronFixture.network_1['id'],
}
}
self.api.attach_interface(server['id'], post)
@ -426,7 +282,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
# attach an interface from a **different** network
post = {
'interfaceAttachment': {
'net_id': NUMAAffinityNeutronFixture.network_2['id'],
'net_id': base.LibvirtNeutronFixture.network_2['id'],
}
}
self.api.attach_interface(server['id'], post)
@ -469,7 +325,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
extra_spec = {'hw:numa_nodes': '1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
networks = [
{'uuid': NUMAAffinityNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
]
good_server = self._build_server(flavor_id)
@ -542,7 +398,7 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
extra_spec = {'hw:numa_nodes': '1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
networks = [
{'uuid': NUMAAffinityNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
]
good_server = self._build_server(flavor_id)

View File

@ -201,6 +201,82 @@ class SRIOVServersTest(_PCIServersTestBase):
self._run_build_test(flavor_id_pfs, end_status='ERROR')
class GetServerDiagnosticsServerWithVfTestV21(_PCIServersTestBase):
api_major_version = 'v2.1'
microversion = '2.48'
image_ref_parameter = 'imageRef'
VFS_ALIAS_NAME = 'vfs'
PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.VF_PROD_ID,
},
)]
PCI_ALIAS = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.VF_PROD_ID,
'name': VFS_ALIAS_NAME,
},
)]
def setUp(self):
super(GetServerDiagnosticsServerWithVfTestV21, self).setUp()
self.api.microversion = self.microversion
# The ultimate base class _IntegratedTestBase uses NeutronFixture but
# we need a bit more intelligent neutron for these tests. Applying the
# new fixture here means that we re-stub what the previous neutron
# fixture already stubbed.
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
def test_get_server_diagnostics_server_with_VF(self):
host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2,
kB_mem=15740000)
pci_info = fakelibvirt.HostPCIDevicesInfo()
fake_connection = self._get_connection(host_info, pci_info)
self.mock_conn.return_value = fake_connection
# Create a flavor
extra_spec = {"pci_passthrough:alias": "%s:1" % self.VFS_ALIAS_NAME}
flavor_id = self._create_flavor(extra_spec=extra_spec)
if not self.compute_started:
self.compute = self.start_service('compute', host='test_compute0')
self.compute_started = True
# Create server
good_server = self._build_server(flavor_id,
'155d900f-4e14-4e4c-a73d-069cbf4541e6')
good_server['networks'] = [
{'uuid': base.LibvirtNeutronFixture.network_1['id']},
{'uuid': base.LibvirtNeutronFixture.network_4['id']},
]
post = {'server': good_server}
created_server = self.api.post_server(post)
self._wait_for_state_change(created_server, 'BUILD')
diagnostics = self.api.get_server_diagnostics(created_server['id'])
self.assertEqual(base.LibvirtNeutronFixture.
network_1_port_2['mac_address'],
diagnostics['nic_details'][0]['mac_address'])
self.assertEqual(base.LibvirtNeutronFixture.
network_4_port_1['mac_address'],
diagnostics['nic_details'][1]['mac_address'])
self.assertIsNotNone(diagnostics['nic_details'][0]['tx_packets'])
self.assertIsNone(diagnostics['nic_details'][1]['tx_packets'])
class PCIServersTest(_PCIServersTestBase):
ALIAS_NAME = 'a1'

View File

@ -1020,12 +1020,23 @@ class Domain(object):
nics = ''
for nic in self._def['devices']['nics']:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
if 'source' in nic:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<target dev='tap274487d1-60'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
# this covers for direct nic type
else:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source>
<address type='pci' domain='0x0000' bus='0x81' slot='0x00'
function='0x01'/>
</source>
</interface>''' % nic
hostdevs = ''
for hostdev in self._def['devices']['hostdevs']:

View File

@ -16010,6 +16010,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
<model type="virtio"/>
<target dev="br0"/>
</interface>
<interface type='hostdev' managed='yes'>
<mac address="54:56:00:a6:40:40"/>
<driver name='vfio'/>
</interface>
</devices>
</domain>
"""
@ -16102,6 +16106,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
tx_errors=0,
tx_octets=0,
tx_packets=0)
expected.add_nic(mac_address='54:56:00:a6:40:40')
self.assertDiagnosticsEqual(expected, actual)
@mock.patch.object(host.Host, "list_instance_domains")

View File

@ -9178,28 +9178,37 @@ class LibvirtDriver(driver.ComputeDriver):
errors_count=stats[4])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
for interface in xml_doc.findall('./devices/interface'):
mac_address = interface.find('mac').get('address')
target = interface.find('./target')
# add nic that has no target (therefore no stats)
if target is None:
diags.add_nic(mac_address=mac_address)
continue
# add nic with stats
dev = target.get('dev')
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
if dev:
# interfaceStats might launch an exception if the
# method is not supported by the underlying hypervisor
# being used by libvirt
stats = domain.interfaceStats(dev)
diags.add_nic(mac_address=mac_address,
rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if diags.nic_details:
nodes = xml_doc.findall('./devices/interface/mac')
for index, node in enumerate(nodes):
diags.nic_details[index].mac_address = node.get('address')
return diags
@staticmethod