Merge "Add 'resource_request' to neutronv2/constants"

This commit is contained in:
Zuul 2019-07-22 21:59:56 +00:00 committed by Gerrit Code Review
commit ffa85a9263
10 changed files with 43 additions and 31 deletions

View File

@ -30,6 +30,7 @@ from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network.neutronv2 import constants
from nova import objects
from nova import quota
from nova import utils
@ -578,9 +579,9 @@ def instance_has_port_with_resource_request(
# instance has no port with resource request. If the instance is shelve
# offloaded then we still have to hit neutron.
search_opts = {'device_id': instance_uuid,
'fields': ['resource_request']}
'fields': [constants.RESOURCE_REQUEST]}
ports = network_api.list_ports(context, **search_opts).get('ports', [])
for port in ports:
if port.get('resource_request'):
if port.get(constants.RESOURCE_REQUEST):
return True
return False

View File

@ -1677,7 +1677,8 @@ class PlacementCommands(object):
try:
return neutron.list_ports(
ctxt, device_id=instance.uuid,
fields=['id', 'resource_request', constants.BINDING_PROFILE]
fields=['id', constants.RESOURCE_REQUEST,
constants.BINDING_PROFILE]
)['ports']
except neutron_client_exc.NeutronClientException as e:
raise exception.UnableToQueryPorts(
@ -1685,7 +1686,7 @@ class PlacementCommands(object):
@staticmethod
def _has_request_but_no_allocation(port):
request = port.get('resource_request')
request = port.get(constants.RESOURCE_REQUEST)
binding_profile = port.get(constants.BINDING_PROFILE, {}) or {}
allocation = binding_profile.get(constants.ALLOCATION)
# We are defensive here about 'resources' and 'required' in the
@ -1775,7 +1776,8 @@ class PlacementCommands(object):
instance allocation dict.
"""
matching_rp_uuids = self._get_rps_in_tree_with_required_traits(
ctxt, node_uuid, port['resource_request']['required'], placement)
ctxt, node_uuid, port[constants.RESOURCE_REQUEST]['required'],
placement)
if len(matching_rp_uuids) > 1:
# If there is more than one such RP then it is an ambiguous
@ -1797,7 +1799,7 @@ class PlacementCommands(object):
raise exception.NoResourceProviderToHealFrom(
port_id=port['id'],
instance_uuid=instance_uuid,
traits=port['resource_request']['required'],
traits=port[constants.RESOURCE_REQUEST]['required'],
node_uuid=node_uuid)
# We found one RP that matches the traits. Assume that we can allocate
@ -1807,7 +1809,7 @@ class PlacementCommands(object):
port_allocation = {
rp_uuid: {
'resources': port['resource_request']['resources']
'resources': port[constants.RESOURCE_REQUEST]['resources']
}
}
return port_allocation
@ -1881,7 +1883,7 @@ class PlacementCommands(object):
"traits for port %(port_uuid)s with resource request "
"%(request)s attached to instance %(instance_uuid)s") %
{"rp_uuid": rp_uuid, "port_uuid": port["id"],
"request": port.get("resource_request"),
"request": port.get(constants.RESOURCE_REQUEST),
"instance_uuid": instance.uuid})
return allocations, ports_to_heal

View File

@ -63,6 +63,7 @@ from nova.i18n import _
from nova import image
from nova import network
from nova.network import model as network_model
from nova.network.neutronv2 import constants
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import objects
@ -4352,7 +4353,7 @@ class API(base.Base):
# need a new scheduling if resource on this host is not available.
if port_id:
port = self.network_api.show_port(context, port_id)
if port['port'].get('resource_request'):
if port['port'].get(constants.RESOURCE_REQUEST):
raise exception.AttachInterfaceWithQoSPolicyNotSupported(
instance_uuid=instance.uuid)

View File

@ -462,7 +462,7 @@ class API(base_api.NetworkAPI):
# such ports are currently not supported as they would at least
# need resource allocation manipulation in placement but might also
# need a new scheduling if resource on this host is not available.
if port.get('resource_request', None):
if port.get(constants.RESOURCE_REQUEST, None):
msg = _(
"The auto-created port %(port_id)s is being deleted due "
"to its network having QoS policy.")
@ -1003,7 +1003,7 @@ class API(base_api.NetworkAPI):
for port in requested_ports_dict.values():
# only communicate the allocations if the port has resource
# requests
if port.get('resource_request'):
if port.get(constants.RESOURCE_REQUEST):
profile = port.get(constants.BINDING_PROFILE, {})
# NOTE(gibi): In the resource provider mapping there can be
# more than one RP fulfilling a request group. But resource
@ -1668,7 +1668,7 @@ class API(base_api.NetworkAPI):
if port:
# if there is resource associated to this port then that needs to
# be deallocated so lets return info about such allocation
resource_request = port.get('resource_request')
resource_request = port.get(constants.RESOURCE_REQUEST)
allocated_rp = port.get(
constants.BINDING_PROFILE, {}).get(constants.ALLOCATION)
if resource_request and allocated_rp:
@ -1955,7 +1955,7 @@ class API(base_api.NetworkAPI):
port = self._show_port(
context, port_id, neutron_client=neutron,
fields=['binding:vnic_type', constants.BINDING_PROFILE,
'network_id', 'resource_request'])
'network_id', constants.RESOURCE_REQUEST])
network_id = port.get('network_id')
trusted = None
vnic_type = port.get('binding:vnic_type',
@ -1967,7 +1967,7 @@ class API(base_api.NetworkAPI):
# set depending on neutron configuration, e.g. if QoS rules are
# applied to the port/network and the port-resource-request API
# extension is enabled.
resource_request = port.get('resource_request', None)
resource_request = port.get(constants.RESOURCE_REQUEST, None)
return vnic_type, trusted, network_id, resource_request
def create_resource_requests(self, context, requested_networks,

View File

@ -27,3 +27,4 @@ BINDING_HOST_ID = 'binding:host_id'
MIGRATING_ATTR = 'migrating_to'
L3_NETWORK_TYPES = ['vxlan', 'gre', 'geneve']
ALLOCATION = 'allocation'
RESOURCE_REQUEST = 'resource_request'

View File

@ -1284,7 +1284,7 @@ class NeutronFixture(fixtures.Fixture):
}
],
'tenant_id': tenant_id,
'resource_request': {
neutron_constants.RESOURCE_REQUEST: {
"resources": {
orc.NET_BW_IGR_KILOBIT_PER_SEC: 1000,
orc.NET_BW_EGR_KILOBIT_PER_SEC: 1000},
@ -1337,7 +1337,7 @@ class NeutronFixture(fixtures.Fixture):
}
],
'tenant_id': tenant_id,
'resource_request': {},
neutron_constants.RESOURCE_REQUEST: {},
'binding:vnic_type': 'direct',
}
@ -1355,7 +1355,7 @@ class NeutronFixture(fixtures.Fixture):
}
],
'tenant_id': tenant_id,
'resource_request': {
neutron_constants.RESOURCE_REQUEST: {
"resources": {
orc.NET_BW_IGR_KILOBIT_PER_SEC: 10000,
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10000},
@ -1378,7 +1378,7 @@ class NeutronFixture(fixtures.Fixture):
}
],
'tenant_id': tenant_id,
'resource_request': {
neutron_constants.RESOURCE_REQUEST: {
"resources": {
orc.NET_BW_IGR_KILOBIT_PER_SEC: 10000,
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10000},

View File

@ -24,6 +24,7 @@ from nova.cmd import manage
from nova import config
from nova import context
from nova import exception
from nova.network.neutronv2 import constants
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
@ -770,7 +771,7 @@ class TestNovaManagePlacementHealPortAllocations(
# _ports list is safe as it is re-created for each Neutron fixture
# instance therefore for each individual test using that fixture.
bound_port = self.neutron._ports[port_id]
bound_port['resource_request'] = resource_request
bound_port[constants.RESOURCE_REQUEST] = resource_request
def _create_server_with_missing_port_alloc(
self, ports, resource_request=None):
@ -822,7 +823,7 @@ class TestNovaManagePlacementHealPortAllocations(
# bridge RP
total_request = collections.defaultdict(int)
for port in ports:
port_request = port['resource_request']['resources']
port_request = port[constants.RESOURCE_REQUEST]['resources']
for rc, amount in port_request.items():
total_request[rc] += amount
self.assertEqual(total_request, network_allocations)

View File

@ -36,6 +36,7 @@ from nova.compute import instance_actions
from nova.compute import manager as compute_manager
from nova import context
from nova import exception
from nova.network.neutronv2 import constants
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.scheduler import utils
@ -5655,7 +5656,7 @@ class PortResourceRequestBasedSchedulingTestBase(
self._create_sriov_networking_rp_tree(compute_rp_uuid)
def assertPortMatchesAllocation(self, port, allocations):
port_request = port['resource_request']['resources']
port_request = port[constants.RESOURCE_REQUEST]['resources']
for rc, amount in allocations.items():
self.assertEqual(port_request[rc], amount,
'port %s requested %d %s '
@ -5674,8 +5675,9 @@ class UnsupportedPortResourceRequestBasedSchedulingTest(
# _ports list is safe as it is re-created for each Neutron fixture
# instance therefore for each individual test using that fixture.
bound_port = self.neutron._ports[port_id]
bound_port['resource_request'] = (
self.neutron.port_with_resource_request['resource_request'])
bound_port[constants.RESOURCE_REQUEST] = (
self.neutron.port_with_resource_request[
constants.RESOURCE_REQUEST])
def test_interface_attach_with_port_resource_request(self):
# create a server

View File

@ -43,6 +43,7 @@ from nova.db import api as db
from nova import exception
from nova.image import api as image_api
from nova.network.neutronv2 import api as neutron_api
from nova.network.neutronv2 import constants
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
@ -6219,7 +6220,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
with mock.patch.object(
self.compute_api.network_api, 'show_port',
return_value={'port': {
'resource_request': {
constants.RESOURCE_REQUEST: {
'resources': {'CUSTOM_RESOURCE_CLASS': 42}
}}}) as mock_show_port:
self.assertRaises(

View File

@ -2135,7 +2135,8 @@ class TestNeutronv2(TestNeutronv2Base):
if binding_vnic_type:
test_port['port']['binding:vnic_type'] = binding_vnic_type
if port_resource_request:
test_port['port']['resource_request'] = port_resource_request
test_port['port'][
constants.RESOURCE_REQUEST] = port_resource_request
mock_get_client.reset_mock()
mock_client = mock_get_client()
@ -2147,7 +2148,7 @@ class TestNeutronv2(TestNeutronv2Base):
mock_client.show_port.assert_called_once_with(test_port['port']['id'],
fields=['binding:vnic_type', 'binding:profile', 'network_id',
'resource_request'])
constants.RESOURCE_REQUEST])
self.assertEqual(expected_vnic_type, vnic_type)
self.assertEqual('net-id', network_id)
self.assertIsNone(trusted)
@ -3523,7 +3524,7 @@ class TestNeutronv2WithMock(TestNeutronv2Base):
mock_client.show_port.assert_called_once_with(test_port['port']['id'],
fields=['binding:vnic_type', 'binding:profile', 'network_id',
'resource_request'])
constants.RESOURCE_REQUEST])
self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type)
self.assertEqual('net-id', network_id)
self.assertTrue(trusted)
@ -3968,7 +3969,8 @@ class TestNeutronv2WithMock(TestNeutronv2Base):
mock_client = mock.MagicMock()
mock_client.create_port.return_value = {'port': {
'id': uuids.port_id,
'resource_request': {'resources': {'CUSTOM_RESOURCE_CLASS': 42}}
constants.RESOURCE_REQUEST: {
'resources': {'CUSTOM_RESOURCE_CLASS': 42}}
}}
exc = self.assertRaises(exception.NetworksWithQoSPolicyNotSupported,
@ -3989,7 +3991,8 @@ class TestNeutronv2WithMock(TestNeutronv2Base):
mock_client = mock.MagicMock()
mock_client.create_port.return_value = {'port': {
'id': uuids.port_id,
'resource_request': {'resources': {'CUSTOM_RESOURCE_CLASS': 42}}
constants.RESOURCE_REQUEST: {
'resources': {'CUSTOM_RESOURCE_CLASS': 42}}
}}
mock_client.delete_port.side_effect = \
exceptions.NeutronClientException()
@ -5038,7 +5041,7 @@ class TestNeutronv2WithMock(TestNeutronv2Base):
mock_client = mock.Mock()
mock_client.show_port.return_value = {
'port': {
'resource_request': {
constants.RESOURCE_REQUEST: {
'resources': {
'NET_BW_EGR_KILOBIT_PER_SEC': 1000
}
@ -5178,7 +5181,7 @@ class TestNeutronv2WithMock(TestNeutronv2Base):
'tenant_id': uuids.project_id,
'network_id': uuids.networkid_1,
'mac_address': 'fake-mac',
'resource_request': 'fake-request'
constants.RESOURCE_REQUEST: 'fake-request'
}
mock_show_port.return_value = port
mock_get_client.return_value.list_networks.return_value = {