Add port-resource-request-groups extension
port-resource-request-groups extension provides support for the new format of resource_request. The new format allows to request multiple groups of resources and traits from the same RP subtree. Closes-Bug: #1943724 Partial-Bug: #1922237 Depends-On: https://review.opendev.org/c/openstack/tempest/+/809168/ See-Also: https://review.opendev.org/785236 Change-Id: I99a49b107b1872ddf83d1d8497a26a8d728feb07
This commit is contained in:
parent
6d8e830859
commit
8db15cb2f3
0
neutron/exceptions/__init__.py
Normal file
0
neutron/exceptions/__init__.py
Normal file
24
neutron/exceptions/qos.py
Normal file
24
neutron/exceptions/qos.py
Normal file
@ -0,0 +1,24 @@
|
||||
# Copyright (c) 2021 Ericsson Software Technology
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lib import exceptions as e
|
||||
|
||||
from neutron._i18n import _
|
||||
|
||||
|
||||
# TODO(przszc): Move to n-lib
|
||||
class QosPlacementAllocationUpdateConflict(e.Conflict):
|
||||
message = _("Updating placement allocation with %(alloc_diff)s for "
|
||||
"consumer %(consumer)s failed. The requested resources would "
|
||||
"exceed the capacity available.")
|
23
neutron/extensions/port_resource_request_groups.py
Normal file
23
neutron/extensions/port_resource_request_groups.py
Normal file
@ -0,0 +1,23 @@
|
||||
# Copyright (c) 2021 Ericsson Software Technology
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lib.api.definitions import port_resource_request_groups as apidef
|
||||
from neutron_lib.api import extensions as api_extensions
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Port_resource_request_groups(api_extensions.APIExtensionDescriptor):
|
||||
api_definition = apidef
|
@ -211,40 +211,53 @@ class AgentMechanismDriverBase(api.MechanismDriver, metaclass=abc.ABCMeta):
|
||||
if 'allocation' not in context.current['binding:profile']:
|
||||
return False
|
||||
|
||||
rp = uuid.UUID(context.current['binding:profile']['allocation'])
|
||||
allocation = context.current['binding:profile']['allocation']
|
||||
host_agents = self._possible_agents_for_port(context)
|
||||
|
||||
reported = {}
|
||||
for agent in host_agents:
|
||||
if 'resource_provider_bandwidths' in agent['configurations']:
|
||||
if const.RP_BANDWIDTHS in agent['configurations']:
|
||||
for device in agent['configurations'][
|
||||
'resource_provider_bandwidths'].keys():
|
||||
const.RP_BANDWIDTHS].keys():
|
||||
device_rp_uuid = place_utils.device_resource_provider_uuid(
|
||||
namespace=uuid_ns,
|
||||
host=agent['host'],
|
||||
device=device)
|
||||
if device_rp_uuid == rp:
|
||||
reported[agent['id']] = agent
|
||||
for group, rp in allocation.items():
|
||||
if device_rp_uuid == uuid.UUID(rp):
|
||||
reported[group] = reported.get(group, []) + [agent]
|
||||
if (const.RP_PP_WITHOUT_DIRECTION in agent['configurations'] or
|
||||
const.RP_PP_WITH_DIRECTION in agent['configurations']):
|
||||
for group, rp in allocation.items():
|
||||
agent_rp_uuid = place_utils.agent_resource_provider_uuid(
|
||||
namespace=uuid_ns,
|
||||
host=agent['host'])
|
||||
if agent_rp_uuid == uuid.UUID(rp):
|
||||
reported[group] = reported.get(group, []) + [agent]
|
||||
|
||||
if len(reported) == 1:
|
||||
agent = list(reported.values())[0]
|
||||
LOG.debug("Agent %(agent)s of type %(agent_type)s reports to be "
|
||||
"responsible for resource provider %(rsc_provider)s",
|
||||
{'agent': agent['id'],
|
||||
'agent_type': agent['agent_type'],
|
||||
'rsc_provider': rp})
|
||||
return True
|
||||
elif len(reported) > 1:
|
||||
LOG.error("Agent misconfiguration, multiple agents on the same "
|
||||
"host %(host)s reports being responsible for resource "
|
||||
"provider %(rsc_provider)s: %(agents)s",
|
||||
{'host': context.current['binding:host_id'],
|
||||
'rsc_provider': rp,
|
||||
'agents': reported.keys()})
|
||||
return False
|
||||
else:
|
||||
# not responsible, must be somebody else
|
||||
return False
|
||||
for group, agents in reported.items():
|
||||
if len(agents) == 1:
|
||||
agent = agents[0]
|
||||
LOG.debug(
|
||||
"Agent %(agent)s of type %(agent_type)s reports to be "
|
||||
"responsible for resource provider %(rsc_provider)s",
|
||||
{'agent': agent['id'],
|
||||
'agent_type': agent['agent_type'],
|
||||
'rsc_provider': allocation[group]})
|
||||
elif len(agents) > 1:
|
||||
LOG.error(
|
||||
"Agent misconfiguration, multiple agents on the same "
|
||||
"host %(host)s reports being responsible for resource "
|
||||
"provider %(rsc_provider)s: %(agents)s",
|
||||
{'host': context.current['binding:host_id'],
|
||||
'rsc_provider': allocation[group],
|
||||
'agents': [agent['id'] for agent in agents]})
|
||||
return False
|
||||
else:
|
||||
# not responsible, must be somebody else
|
||||
return False
|
||||
|
||||
return (len(reported) >= 1 and (len(reported) == len(allocation)))
|
||||
|
||||
|
||||
class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase,
|
||||
|
@ -912,15 +912,14 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
|
||||
# traits of the allocated resource provider on
|
||||
# the top binding_level (==0).
|
||||
if (context.current['binding:profile'] is not None and
|
||||
'allocation' in context.current[
|
||||
'binding:profile'] and
|
||||
context.current[
|
||||
'binding:profile'].get('allocation') and
|
||||
not redoing_bottom):
|
||||
LOG.debug(
|
||||
"Undo bottom bound level and redo it "
|
||||
"according to binding_profile.allocation, "
|
||||
"resource provider uuid: %s",
|
||||
context.current[
|
||||
'binding:profile']['allocation'])
|
||||
"according to binding_profile.allocation: %s",
|
||||
context.current['binding:profile'][
|
||||
'allocation'])
|
||||
context._pop_binding_level()
|
||||
context._unset_binding()
|
||||
return self._bind_port_level(
|
||||
@ -966,21 +965,22 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
|
||||
if driver.obj.responsible_for_ports_allocation(context):
|
||||
drivers.append(driver)
|
||||
|
||||
allocation = context.current['binding:profile']['allocation']
|
||||
|
||||
if len(drivers) == 0:
|
||||
LOG.error("Failed to bind port %(port)s on host "
|
||||
"%(host)s allocated on resource provider "
|
||||
"%(rsc_provider)s, because no mechanism driver "
|
||||
"%(host)s allocated on resource providers: "
|
||||
"%(rsc_providers)s, because no mechanism driver "
|
||||
"reports being responsible",
|
||||
{'port': context.current['id'],
|
||||
'host': context.host,
|
||||
'rsc_provider': context.current[
|
||||
'binding:profile']['allocation']})
|
||||
'rsc_providers': ','.join(allocation.values())})
|
||||
raise place_exc.UnknownResourceProvider(
|
||||
rsc_provider=context.current['binding:profile']['allocation'])
|
||||
rsc_provider=','.join(allocation.values()))
|
||||
|
||||
if len(drivers) >= 2:
|
||||
raise place_exc.AmbiguousResponsibilityForResourceProvider(
|
||||
rsc_provider=context.current['binding:profile']['allocation'],
|
||||
rsc_provider=','.join(allocation.values()),
|
||||
drivers=','.join([driver.name for driver in drivers]))
|
||||
|
||||
# NOTE(bence romsics): The error conditions for raising either
|
||||
|
@ -13,9 +13,13 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import types
|
||||
import uuid
|
||||
|
||||
from keystoneauth1 import exceptions as ks_exc
|
||||
from neutron_lib.api.definitions import port as port_def
|
||||
from neutron_lib.api.definitions import port_resource_request
|
||||
from neutron_lib.api.definitions import port_resource_request_groups
|
||||
from neutron_lib.api.definitions import portbindings
|
||||
from neutron_lib.api.definitions import qos as qos_apidef
|
||||
from neutron_lib.api.definitions import qos_bw_limit_direction
|
||||
@ -34,6 +38,7 @@ from neutron_lib import context
|
||||
from neutron_lib.db import api as db_api
|
||||
from neutron_lib.db import resource_extend
|
||||
from neutron_lib import exceptions as lib_exc
|
||||
from neutron_lib.exceptions import placement as place_exc
|
||||
from neutron_lib.exceptions import qos as qos_exc
|
||||
from neutron_lib.placement import client as pl_client
|
||||
from neutron_lib.placement import utils as pl_utils
|
||||
@ -44,6 +49,7 @@ from oslo_log import log as logging
|
||||
|
||||
from neutron._i18n import _
|
||||
from neutron.db import db_base_plugin_common
|
||||
from neutron.exceptions import qos as neutron_qos_exc
|
||||
from neutron.extensions import qos
|
||||
from neutron.objects import base as base_obj
|
||||
from neutron.objects import network as network_object
|
||||
@ -59,6 +65,53 @@ from neutron.services.qos.drivers import manager
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO(przszc): Move this function to n-lib
|
||||
def update_qos_allocation(self, consumer_uuid, alloc_diff):
|
||||
"""Update allocation for QoS minimum bandwidth consumer
|
||||
|
||||
:param consumer_uuid: The uuid of the consumer, in case of bound port
|
||||
owned by a VM, the VM uuid.
|
||||
:param alloc_diff: A dict which contains RP UUIDs as keys and
|
||||
corresponding fields to update for the allocation
|
||||
under the given resource provider.
|
||||
"""
|
||||
for i in range(pl_client.GENERATION_CONFLICT_RETRIES):
|
||||
body = self.list_allocations(consumer_uuid)
|
||||
if not body['allocations']:
|
||||
raise place_exc.PlacementAllocationRemoved(consumer=consumer_uuid)
|
||||
# Count new values based on the diff in alloc_diff
|
||||
for rp_uuid, diff in alloc_diff.items():
|
||||
if rp_uuid not in body['allocations']:
|
||||
raise place_exc.PlacementAllocationRpNotExists(
|
||||
resource_provider=rp_uuid, consumer=consumer_uuid)
|
||||
for drctn, value in diff.items():
|
||||
orig_value = (body['allocations'][rp_uuid][
|
||||
'resources'].get(drctn, 0))
|
||||
new_value = orig_value + value
|
||||
if new_value > 0:
|
||||
body['allocations'][rp_uuid]['resources'][
|
||||
drctn] = new_value
|
||||
else:
|
||||
# Remove the resource class if the new value is 0
|
||||
resources = body['allocations'][rp_uuid]['resources']
|
||||
resources.pop(drctn, None)
|
||||
|
||||
# Remove RPs without any resources
|
||||
body['allocations'] = {
|
||||
rp: alloc for rp, alloc in body['allocations'].items()
|
||||
if alloc.get('resources')}
|
||||
try:
|
||||
# Update allocations has no return body, but leave the loop
|
||||
return self.update_allocation(consumer_uuid, body)
|
||||
except ks_exc.Conflict as e:
|
||||
resp = e.response.json()
|
||||
if resp['errors'][0]['code'] == 'placement.concurrent_update':
|
||||
continue
|
||||
raise
|
||||
raise place_exc.PlacementAllocationGenerationConflict(
|
||||
consumer=consumer_uuid)
|
||||
|
||||
|
||||
@resource_extend.has_resource_extenders
|
||||
class QoSPlugin(qos.QoSPluginBase):
|
||||
"""Implementation of the Neutron QoS Service Plugin.
|
||||
@ -73,6 +126,7 @@ class QoSPlugin(qos.QoSPluginBase):
|
||||
qos_default.ALIAS,
|
||||
qos_rule_type_details.ALIAS,
|
||||
port_resource_request.ALIAS,
|
||||
port_resource_request_groups.ALIAS,
|
||||
qos_bw_minimum_ingress.ALIAS,
|
||||
qos_rules_alias.ALIAS,
|
||||
qos_port_network_policy.ALIAS,
|
||||
@ -88,6 +142,8 @@ class QoSPlugin(qos.QoSPluginBase):
|
||||
super(QoSPlugin, self).__init__()
|
||||
self.driver_manager = manager.QosServiceDriverManager()
|
||||
self._placement_client = pl_client.PlacementAPIClient(cfg.CONF)
|
||||
self._placement_client.update_qos_allocation = types.MethodType(
|
||||
update_qos_allocation, self._placement_client)
|
||||
|
||||
callbacks_registry.subscribe(
|
||||
self._validate_create_port_callback,
|
||||
@ -131,29 +187,42 @@ class QoSPlugin(qos.QoSPluginBase):
|
||||
port_res['resource_request'] = {
|
||||
'qos_id': qos_id,
|
||||
'network_id': port_db.network_id,
|
||||
'vnic_type': port_res[portbindings.VNIC_TYPE]}
|
||||
'vnic_type': port_res[portbindings.VNIC_TYPE],
|
||||
'port_id': port_db.id,
|
||||
}
|
||||
return port_res
|
||||
|
||||
min_bw_rules = rule_object.QosMinimumBandwidthRule.get_objects(
|
||||
context.get_admin_context(), qos_policy_id=qos_id)
|
||||
resources = QoSPlugin._get_resources(min_bw_rules)
|
||||
if not resources:
|
||||
return port_res
|
||||
min_bw_request_group = QoSPlugin._get_min_bw_request_group(
|
||||
qos_id, port_db.id, port_res[portbindings.VNIC_TYPE],
|
||||
port_db.network_id)
|
||||
min_pps_request_group = QoSPlugin._get_min_pps_request_group(
|
||||
qos_id, port_db.id, port_res[portbindings.VNIC_TYPE])
|
||||
|
||||
segments = network_object.NetworkSegment.get_objects(
|
||||
context.get_admin_context(), network_id=port_db.network_id)
|
||||
traits = QoSPlugin._get_traits(port_res[portbindings.VNIC_TYPE],
|
||||
segments)
|
||||
if not traits:
|
||||
return port_res
|
||||
|
||||
port_res['resource_request'] = {
|
||||
'required': traits,
|
||||
'resources': resources}
|
||||
port_res['resource_request'] = (
|
||||
QoSPlugin._get_resource_request(min_bw_request_group,
|
||||
min_pps_request_group))
|
||||
return port_res
|
||||
|
||||
@staticmethod
|
||||
def _get_resources(min_bw_rules):
|
||||
def _get_resource_request(min_bw_request_group, min_pps_request_group):
|
||||
resource_request = None
|
||||
request_groups = []
|
||||
|
||||
if min_bw_request_group:
|
||||
request_groups += [min_bw_request_group]
|
||||
|
||||
if min_pps_request_group:
|
||||
request_groups += [min_pps_request_group]
|
||||
|
||||
if request_groups:
|
||||
resource_request = {
|
||||
'request_groups': request_groups,
|
||||
'same_subtree': [rg['id'] for rg in request_groups],
|
||||
}
|
||||
return resource_request
|
||||
|
||||
@staticmethod
|
||||
def _get_min_bw_resources(min_bw_rules):
|
||||
resources = {}
|
||||
# NOTE(ralonsoh): we should move this translation dict to n-lib.
|
||||
rule_direction_class = {
|
||||
@ -167,7 +236,63 @@ class QoSPlugin(qos.QoSPluginBase):
|
||||
return resources
|
||||
|
||||
@staticmethod
|
||||
def _get_traits(vnic_type, segments):
|
||||
def _get_min_bw_request_group(qos_policy_id, port_id, vnic_type,
|
||||
network_id, min_bw_rules=None,
|
||||
segments=None):
|
||||
request_group = {}
|
||||
if not min_bw_rules:
|
||||
min_bw_rules = rule_object.QosMinimumBandwidthRule.get_objects(
|
||||
context.get_admin_context(), qos_policy_id=qos_policy_id)
|
||||
min_bw_resources = QoSPlugin._get_min_bw_resources(min_bw_rules)
|
||||
if not segments:
|
||||
segments = network_object.NetworkSegment.get_objects(
|
||||
context.get_admin_context(), network_id=network_id)
|
||||
min_bw_traits = QoSPlugin._get_min_bw_traits(vnic_type, segments)
|
||||
if min_bw_resources and min_bw_traits:
|
||||
request_group.update({
|
||||
'id': str(pl_utils.resource_request_group_uuid(
|
||||
uuid.UUID(port_id), min_bw_rules)),
|
||||
'required': min_bw_traits,
|
||||
'resources': min_bw_resources,
|
||||
})
|
||||
return request_group
|
||||
|
||||
@staticmethod
|
||||
def _get_min_pps_request_group(qos_policy_id, port_id, vnic_type,
|
||||
min_pps_rules=None):
|
||||
request_group = {}
|
||||
if not min_pps_rules:
|
||||
min_pps_rules = rule_object.QosMinimumPacketRateRule.get_objects(
|
||||
context.get_admin_context(),
|
||||
qos_policy_id=qos_policy_id)
|
||||
min_pps_resources = QoSPlugin._get_min_pps_resources(min_pps_rules)
|
||||
min_pps_traits = [pl_utils.vnic_type_trait(vnic_type)]
|
||||
if min_pps_resources and min_pps_traits:
|
||||
request_group.update({
|
||||
'id': str(pl_utils.resource_request_group_uuid(
|
||||
uuid.UUID(port_id), min_pps_rules)),
|
||||
'required': min_pps_traits,
|
||||
'resources': min_pps_resources,
|
||||
})
|
||||
return request_group
|
||||
|
||||
@staticmethod
|
||||
def _get_min_pps_resources(min_pps_rules):
|
||||
resources = {}
|
||||
rule_direction_class = {
|
||||
nl_constants.INGRESS_DIRECTION:
|
||||
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC,
|
||||
nl_constants.EGRESS_DIRECTION:
|
||||
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC,
|
||||
nl_constants.ANY_DIRECTION:
|
||||
orc.NET_PACKET_RATE_KILOPACKET_PER_SEC,
|
||||
}
|
||||
for rule in min_pps_rules:
|
||||
resources[rule_direction_class[rule.direction]] = rule.min_kpps
|
||||
return resources
|
||||
|
||||
@staticmethod
|
||||
def _get_min_bw_traits(vnic_type, segments):
|
||||
# TODO(lajoskatona): Change to handle all segments when any traits
|
||||
# support will be available. See Placement spec:
|
||||
# https://review.opendev.org/565730
|
||||
@ -189,6 +314,7 @@ class QoSPlugin(qos.QoSPluginBase):
|
||||
def _extend_port_resource_request_bulk(ports_res, noop):
|
||||
"""Add resource request to a list of ports."""
|
||||
min_bw_rules = dict()
|
||||
min_pps_rules = dict()
|
||||
net_segments = dict()
|
||||
|
||||
for port_res in ports_res:
|
||||
@ -201,29 +327,33 @@ class QoSPlugin(qos.QoSPluginBase):
|
||||
|
||||
net_id = port_res['resource_request'].pop('network_id')
|
||||
vnic_type = port_res['resource_request'].pop('vnic_type')
|
||||
port_id = port_res['resource_request'].pop('port_id')
|
||||
|
||||
if qos_id not in min_bw_rules:
|
||||
rules = rule_object.QosMinimumBandwidthRule.get_objects(
|
||||
context.get_admin_context(), qos_policy_id=qos_id)
|
||||
min_bw_rules[qos_id] = rules
|
||||
|
||||
resources = QoSPlugin._get_resources(min_bw_rules[qos_id])
|
||||
if not resources:
|
||||
continue
|
||||
|
||||
if net_id not in net_segments:
|
||||
segments = network_object.NetworkSegment.get_objects(
|
||||
context.get_admin_context(),
|
||||
network_id=net_id)
|
||||
net_segments[net_id] = segments
|
||||
|
||||
traits = QoSPlugin._get_traits(vnic_type, net_segments[net_id])
|
||||
if not traits:
|
||||
continue
|
||||
min_bw_request_group = QoSPlugin._get_min_bw_request_group(
|
||||
qos_id, port_id, vnic_type, net_id,
|
||||
min_bw_rules[qos_id], net_segments[net_id])
|
||||
|
||||
port_res['resource_request'] = {
|
||||
'required': traits,
|
||||
'resources': resources}
|
||||
if qos_id not in min_pps_rules:
|
||||
rules = rule_object.QosMinimumPacketRateRule.get_objects(
|
||||
context.get_admin_context(), qos_policy_id=qos_id)
|
||||
min_pps_rules[qos_id] = rules
|
||||
min_pps_request_group = QoSPlugin._get_min_pps_request_group(
|
||||
qos_id, port_id, vnic_type, min_pps_rules[qos_id])
|
||||
|
||||
port_res['resource_request'] = (
|
||||
QoSPlugin._get_resource_request(min_bw_request_group,
|
||||
min_pps_request_group))
|
||||
|
||||
return ports_res
|
||||
|
||||
@ -278,62 +408,213 @@ class QoSPlugin(qos.QoSPluginBase):
|
||||
policy = policy_object.QosPolicy.get_object(
|
||||
context.elevated(), id=policy_id)
|
||||
self._change_placement_allocation(original_policy, policy,
|
||||
orig_port)
|
||||
orig_port, port)
|
||||
|
||||
def _prepare_allocation_needs(self, original_rule, desired_rule):
|
||||
if (isinstance(desired_rule, rule_object.QosMinimumBandwidthRule) or
|
||||
isinstance(desired_rule, dict)):
|
||||
o_dir = original_rule.get('direction')
|
||||
o_minkbps = original_rule.get('min_kbps')
|
||||
d_minkbps = desired_rule.get('min_kbps')
|
||||
d_dir = desired_rule.get('direction')
|
||||
if o_dir == d_dir and o_minkbps != d_minkbps:
|
||||
diff = d_minkbps - o_minkbps
|
||||
# TODO(lajoskatona): move this to neutron-lib, see similar
|
||||
# dict @l125.
|
||||
if d_dir == 'egress':
|
||||
drctn = orc.NET_BW_EGR_KILOBIT_PER_SEC
|
||||
else:
|
||||
drctn = orc.NET_BW_IGR_KILOBIT_PER_SEC
|
||||
return {drctn: diff}
|
||||
def _translate_rule_for_placement(self, rule):
|
||||
dir = rule.get('direction')
|
||||
if isinstance(rule, rule_object.QosMinimumBandwidthRule):
|
||||
value = rule.get('min_kbps')
|
||||
# TODO(lajoskatona): move this to neutron-lib, see similar
|
||||
# dict @l125.
|
||||
if dir == 'egress':
|
||||
drctn = orc.NET_BW_EGR_KILOBIT_PER_SEC
|
||||
else:
|
||||
drctn = orc.NET_BW_IGR_KILOBIT_PER_SEC
|
||||
return {drctn: value}
|
||||
elif isinstance(rule, rule_object.QosMinimumPacketRateRule):
|
||||
value = rule.get('min_kpps')
|
||||
# TODO(przszc): move this to neutron-lib, see similar
|
||||
# dict @l268.
|
||||
rule_direction_class = {
|
||||
nl_constants.INGRESS_DIRECTION:
|
||||
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC,
|
||||
nl_constants.EGRESS_DIRECTION:
|
||||
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC,
|
||||
nl_constants.ANY_DIRECTION:
|
||||
orc.NET_PACKET_RATE_KILOPACKET_PER_SEC,
|
||||
}
|
||||
return {rule_direction_class[dir]: value}
|
||||
return {}
|
||||
|
||||
def _prepare_allocation_needs(self, original_port, rule_type_to_rp_map,
|
||||
original_rules, desired_rules):
|
||||
alloc_diff = {}
|
||||
for rule in original_rules:
|
||||
translated_rule = self._translate_rule_for_placement(rule)
|
||||
# NOTE(przszc): Updating Placement resource allocation relies on
|
||||
# calculating a difference between current allocation and desired
|
||||
# one. If we want to release resources we need to get a negative
|
||||
# value of the original allocation.
|
||||
translated_rule = {rc: v * -1 for rc, v in translated_rule.items()}
|
||||
rp_uuid = rule_type_to_rp_map.get(rule.rule_type)
|
||||
if not rp_uuid:
|
||||
LOG.warning(
|
||||
"Port %s has no RP responsible for allocating %s resource "
|
||||
"class. Only dataplane enforcement will happen for %s "
|
||||
"rule!", original_port['id'],
|
||||
''.join(translated_rule.keys()), rule.rule_type)
|
||||
continue
|
||||
if rp_uuid not in alloc_diff:
|
||||
alloc_diff[rp_uuid] = translated_rule
|
||||
else:
|
||||
alloc_diff[rp_uuid].update(translated_rule)
|
||||
|
||||
for rule in desired_rules:
|
||||
translated_rule = self._translate_rule_for_placement(rule)
|
||||
rp_uuid = rule_type_to_rp_map.get(rule.rule_type)
|
||||
if not rp_uuid:
|
||||
LOG.warning(
|
||||
"Port %s has no RP responsible for allocating %s resource "
|
||||
"class. Only dataplane enforcement will happen for %s "
|
||||
"rule!", original_port['id'],
|
||||
''.join(translated_rule.keys()), rule.rule_type)
|
||||
continue
|
||||
for rc, value in translated_rule.items():
|
||||
if (rc == orc.NET_PACKET_RATE_KILOPACKET_PER_SEC and
|
||||
(orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC in
|
||||
alloc_diff[rp_uuid] or
|
||||
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC in
|
||||
alloc_diff[rp_uuid]) or
|
||||
(rc in (orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC,
|
||||
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC) and
|
||||
orc.NET_PACKET_RATE_KILOPACKET_PER_SEC in
|
||||
alloc_diff[rp_uuid])):
|
||||
raise NotImplementedError(_(
|
||||
'Changing from direction-less QoS minimum packet rate '
|
||||
'rule to a direction-oriented minimum packet rate rule'
|
||||
', or vice versa, is not supported.'))
|
||||
new_value = alloc_diff[rp_uuid].get(rc, 0) + value
|
||||
if new_value == 0:
|
||||
alloc_diff[rp_uuid].pop(rc, None)
|
||||
else:
|
||||
alloc_diff[rp_uuid][rc] = new_value
|
||||
|
||||
alloc_diff = {rp: diff for rp, diff in alloc_diff.items() if diff}
|
||||
return alloc_diff
|
||||
|
||||
def _get_updated_port_allocation(self, original_port, original_rules,
|
||||
desired_rules):
|
||||
# NOTE(przszc): Port's binding:profile.allocation attribute can contain
|
||||
# multiple RPs and we need to figure out which RP is responsible for
|
||||
# providing resources for particular resource class. We could use
|
||||
# Placement API to get RP's inventory, but it would require superfluous
|
||||
# API calls. Another option is to calculate resource request group
|
||||
# UUIDs and check if they match the ones in allocation attribute and
|
||||
# create a lookup table. Since we are updating allocation attribute we
|
||||
# have to calculate resource request group UUIDs anyways, so creating a
|
||||
# lookup table seems like a better solution.
|
||||
rule_type_to_rp_map = {}
|
||||
updated_allocation = {}
|
||||
|
||||
allocation = original_port['binding:profile']['allocation']
|
||||
|
||||
for group_uuid, rp in allocation.items():
|
||||
for rule_type in [qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH,
|
||||
qos_constants.RULE_TYPE_MINIMUM_PACKET_RATE]:
|
||||
o_filtered_rules = [r for r in original_rules
|
||||
if r.rule_type == rule_type]
|
||||
d_filtered_rules = [r for r in desired_rules
|
||||
if r.rule_type == rule_type]
|
||||
o_group_uuid = str(
|
||||
pl_utils.resource_request_group_uuid(
|
||||
uuid.UUID(original_port['id']), o_filtered_rules))
|
||||
if group_uuid == o_group_uuid:
|
||||
rule_type_to_rp_map[rule_type] = rp
|
||||
# NOTE(przszc): Original QoS policy can have more rule
|
||||
# types than desired QoS policy. We should add RP to
|
||||
# allocation only if there are rules with this type in
|
||||
# desired QoS policy.
|
||||
if d_filtered_rules:
|
||||
d_group_uuid = str(
|
||||
pl_utils.resource_request_group_uuid(
|
||||
uuid.UUID(original_port['id']),
|
||||
d_filtered_rules))
|
||||
updated_allocation[d_group_uuid] = rp
|
||||
break
|
||||
|
||||
return updated_allocation, rule_type_to_rp_map
|
||||
|
||||
def _change_placement_allocation(self, original_policy, desired_policy,
|
||||
orig_port):
|
||||
orig_port, port):
|
||||
alloc_diff = {}
|
||||
original_rules = []
|
||||
rp_uuid = orig_port['binding:profile'].get('allocation')
|
||||
desired_rules = []
|
||||
device_id = orig_port['device_id']
|
||||
if original_policy:
|
||||
original_rules = original_policy.get('rules')
|
||||
if desired_policy:
|
||||
desired_rules = desired_policy.get('rules')
|
||||
else:
|
||||
desired_rules = [{'direction': 'egress', 'min_kbps': 0},
|
||||
{'direction': 'ingress', 'min_kbps': 0}]
|
||||
|
||||
any_rules_minbw = any(
|
||||
[isinstance(r, rule_object.QosMinimumBandwidthRule)
|
||||
for r in desired_rules])
|
||||
if (not original_rules and any_rules_minbw) or not rp_uuid:
|
||||
LOG.warning("There was no QoS policy with minimum_bandwidth rule "
|
||||
"attached to the port %s, there is no allocation "
|
||||
"record in placement for it, only the dataplane "
|
||||
"enforcement will happen!", orig_port['id'])
|
||||
# Filter out rules that can't have resources allocated in Placement
|
||||
original_rules = [r for r in original_rules
|
||||
if (isinstance(r, (rule_object.QosMinimumBandwidthRule,
|
||||
rule_object.QosMinimumPacketRateRule)))]
|
||||
desired_rules = [r for r in desired_rules
|
||||
if (isinstance(r, (rule_object.QosMinimumBandwidthRule,
|
||||
rule_object.QosMinimumPacketRateRule)))]
|
||||
if not original_rules and not desired_rules:
|
||||
return
|
||||
for o_rule in original_rules:
|
||||
if isinstance(o_rule, rule_object.QosMinimumBandwidthRule):
|
||||
for d_rule in desired_rules:
|
||||
alloc_diff.update(
|
||||
self._prepare_allocation_needs(o_rule, d_rule))
|
||||
|
||||
o_rule_types = set(r.rule_type for r in original_rules)
|
||||
d_rule_types = set(r.rule_type for r in desired_rules)
|
||||
allocation = orig_port['binding:profile'].get('allocation')
|
||||
if (not original_rules and desired_rules) or not allocation:
|
||||
LOG.warning("There was no QoS policy with minimum_bandwidth or "
|
||||
"minimum_packet_rate rule attached to the port %s, "
|
||||
"there is no allocation record in placement for it, "
|
||||
"only the dataplane enforcement will happen!",
|
||||
orig_port['id'])
|
||||
return
|
||||
|
||||
new_rule_types = d_rule_types.difference(o_rule_types)
|
||||
if new_rule_types:
|
||||
# NOTE(przszc): Port's resource_request and
|
||||
# binding:profile.allocation attributes depend on associated
|
||||
# QoS policy. resource_request is calculated on the fly, but
|
||||
# allocation attribute needs to be updated whenever QoS policy
|
||||
# changes. Since desired QoS policy has more rule types than the
|
||||
# original QoS policy, Neutron doesn't know which RP is responsible
|
||||
# for allocating those resources. That would require scheduling in
|
||||
# Nova. However, some users do not use Placement enforcement and
|
||||
# are interested only in dataplane enforcement. It means that we
|
||||
# cannot raise an exception without breaking legacy behavior.
|
||||
# Only rules that have resources allocated in Placement are going
|
||||
# to be included in port's binding:profile.allocation.
|
||||
LOG.warning(
|
||||
"There was no QoS policy with %s rules attached to the port "
|
||||
"%s, there is no allocation record in placement for it, only "
|
||||
"the dataplane enforcement will happen for those rules!",
|
||||
','.join(new_rule_types), orig_port['id'])
|
||||
desired_rules = [
|
||||
r for r in desired_rules if r.rule_type not in new_rule_types]
|
||||
|
||||
# NOTE(przszc): Get updated allocation but do not assign it to the
|
||||
# port yet. We don't know if Placement API call is going to succeed.
|
||||
updated_allocation, rule_type_to_rp_map = (
|
||||
self._get_updated_port_allocation(orig_port, original_rules,
|
||||
desired_rules))
|
||||
alloc_diff = self._prepare_allocation_needs(orig_port,
|
||||
rule_type_to_rp_map,
|
||||
original_rules,
|
||||
desired_rules)
|
||||
if alloc_diff:
|
||||
try:
|
||||
self._placement_client.update_qos_minbw_allocation(
|
||||
consumer_uuid=device_id, minbw_alloc_diff=alloc_diff,
|
||||
rp_uuid=rp_uuid)
|
||||
self._placement_client.update_qos_allocation(
|
||||
consumer_uuid=device_id, alloc_diff=alloc_diff)
|
||||
except ks_exc.Conflict:
|
||||
raise qos_exc.QosPlacementAllocationConflict(
|
||||
consumer=device_id, rp=rp_uuid)
|
||||
raise neutron_qos_exc.QosPlacementAllocationUpdateConflict(
|
||||
alloc_diff=alloc_diff, consumer=device_id)
|
||||
|
||||
# NOTE(przszc): Upon successful allocation update in Placement we can
|
||||
# proceed with updating port's binding:profile.allocation attribute.
|
||||
# Keep in mind that updating port state this way is possible only
|
||||
# because DBEventPayload's payload attributes are not copied -
|
||||
# subscribers obtain a direct reference to event payload objects.
|
||||
# If that changes, line below won't have any effect.
|
||||
#
|
||||
# Subscribers should *NOT* modify event payload objects, but this is
|
||||
# the only way we can avoid inconsistency in port's attributes.
|
||||
port['binding:profile'] = {'allocation': updated_allocation}
|
||||
|
||||
def _validate_update_port_callback(self, resource, event, trigger,
|
||||
payload=None):
|
||||
|
@ -97,7 +97,9 @@ class SriovSwitchMechGenericTestCase(SriovNicSwitchMechanismBaseTestCase,
|
||||
]
|
||||
segments = []
|
||||
# uuid -v5 87f1895c-73bb-11e8-9008-c4d987b2a692 host:eth0
|
||||
profile = {'allocation': '5762cf50-781b-5f01-8ebc-0cce8c9e74cd'}
|
||||
profile = {'allocation':
|
||||
{'fake_request_group_uuid':
|
||||
'5762cf50-781b-5f01-8ebc-0cce8c9e74cd'}}
|
||||
|
||||
port_ctx = base.FakePortContext(
|
||||
self.AGENT_TYPE,
|
||||
|
@ -171,7 +171,7 @@ class OpenvswitchMechanismHybridPlugTestCase(OpenvswitchMechanismBaseTestCase):
|
||||
|
||||
class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase,
|
||||
base.AgentMechanismGenericTestCase):
|
||||
def test_driver_responsible_for_ports_allocation(self):
|
||||
def test_driver_responsible_for_ports_allocation_min_bw(self):
|
||||
agents = [
|
||||
{'agent_type': constants.AGENT_TYPE_OVS,
|
||||
'configurations': {'resource_provider_bandwidths': {'eth0': {}}},
|
||||
@ -179,8 +179,77 @@ class OpenvswitchMechanismGenericTestCase(OpenvswitchMechanismBaseTestCase,
|
||||
'host': 'host'}
|
||||
]
|
||||
segments = []
|
||||
|
||||
# uuid -v5 87ee7d5c-73bb-11e8-9008-c4d987b2a692 host:eth0
|
||||
profile = {'allocation': '13cc0ed9-e802-5eaa-b4c7-3441855e31f2'}
|
||||
fake_min_bw_rp = '13cc0ed9-e802-5eaa-b4c7-3441855e31f2'
|
||||
fake_allocation = {
|
||||
'fake_min_bw_resource_request_group': fake_min_bw_rp,
|
||||
}
|
||||
profile = {'allocation': fake_allocation}
|
||||
|
||||
port_ctx = base.FakePortContext(
|
||||
self.AGENT_TYPE,
|
||||
agents,
|
||||
segments,
|
||||
vnic_type=portbindings.VNIC_NORMAL,
|
||||
profile=profile)
|
||||
with mock.patch.object(self.driver, '_possible_agents_for_port',
|
||||
return_value=agents):
|
||||
self.assertTrue(
|
||||
self.driver.responsible_for_ports_allocation(port_ctx))
|
||||
|
||||
def test_driver_responsible_for_ports_allocation_min_pps(self):
|
||||
agents = [
|
||||
{'agent_type': constants.AGENT_TYPE_OVS,
|
||||
'configurations': {
|
||||
'resource_provider_packet_processing_with_direction': {
|
||||
'host': {}}},
|
||||
'id': '1',
|
||||
'host': 'host'}
|
||||
]
|
||||
segments = []
|
||||
|
||||
# uuid -v5 87ee7d5c-73bb-11e8-9008-c4d987b2a692 host
|
||||
fake_min_pps_rp = '791f63f0-1a1a-5c38-8972-5e43014fd58b'
|
||||
fake_allocation = {
|
||||
'fake_min_pps_resource_request_group': fake_min_pps_rp,
|
||||
}
|
||||
profile = {'allocation': fake_allocation}
|
||||
|
||||
port_ctx = base.FakePortContext(
|
||||
self.AGENT_TYPE,
|
||||
agents,
|
||||
segments,
|
||||
vnic_type=portbindings.VNIC_NORMAL,
|
||||
profile=profile)
|
||||
with mock.patch.object(self.driver, '_possible_agents_for_port',
|
||||
return_value=agents):
|
||||
self.assertTrue(
|
||||
self.driver.responsible_for_ports_allocation(port_ctx))
|
||||
|
||||
def test_driver_responsible_for_ports_allocation_min_pps_and_min_bw(self):
|
||||
agents = [{
|
||||
'agent_type': constants.AGENT_TYPE_OVS,
|
||||
'configurations': {
|
||||
'resource_provider_packet_processing_without_direction': {
|
||||
'host': {}
|
||||
},
|
||||
'resource_provider_bandwidths': {'eth0': {}}
|
||||
},
|
||||
'id': '1',
|
||||
'host': 'host'
|
||||
}]
|
||||
segments = []
|
||||
|
||||
# uuid -v5 87ee7d5c-73bb-11e8-9008-c4d987b2a692 host
|
||||
fake_min_pps_rp = '791f63f0-1a1a-5c38-8972-5e43014fd58b'
|
||||
# uuid -v5 87ee7d5c-73bb-11e8-9008-c4d987b2a692 host:eth0
|
||||
fake_min_bw_rp = '13cc0ed9-e802-5eaa-b4c7-3441855e31f2'
|
||||
fake_allocation = {
|
||||
'fake_min_pps_resource_request_group': fake_min_pps_rp,
|
||||
'fake_min_bw_resource_request_group': fake_min_bw_rp,
|
||||
}
|
||||
profile = {'allocation': fake_allocation}
|
||||
|
||||
port_ctx = base.FakePortContext(
|
||||
self.AGENT_TYPE,
|
||||
|
@ -123,7 +123,8 @@ class TestManagers(base.BaseTestCase):
|
||||
None,
|
||||
None,
|
||||
self.segments_to_bind,
|
||||
profile={'allocation': 'fake_resource_provider'})
|
||||
profile={'allocation':
|
||||
{'fake_uuid': 'fake_resource_provider'}})
|
||||
)
|
||||
|
||||
def test__infer_driver_from_allocation_ambiguous(self):
|
||||
@ -145,7 +146,8 @@ class TestManagers(base.BaseTestCase):
|
||||
None,
|
||||
None,
|
||||
self.segments_to_bind,
|
||||
profile={'allocation': 'fake_resource_provider'})
|
||||
profile={'allocation':
|
||||
{'fake_uuid': 'fake_resource_provider'}})
|
||||
)
|
||||
|
||||
@mock.patch.object(managers.LOG, 'critical')
|
||||
|
@ -32,6 +32,7 @@ from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
import webob.exc
|
||||
|
||||
from neutron.exceptions import qos as neutron_qos_exc
|
||||
from neutron.extensions import qos_rules_alias
|
||||
from neutron import manager
|
||||
from neutron.objects import network as network_object
|
||||
@ -135,8 +136,10 @@ class TestQosPlugin(base.BaseQosTestCase):
|
||||
self.assertEqual(call_args[1], ctxt)
|
||||
self.assertIsInstance(call_args[2], policy_object.QosPolicy)
|
||||
|
||||
def _create_and_extend_port(self, bw_rules, physical_network='public',
|
||||
has_qos_policy=True, has_net_qos_policy=False):
|
||||
def _create_and_extend_port(self, min_bw_rules, min_pps_rules=None,
|
||||
physical_network='public',
|
||||
has_qos_policy=True, has_net_qos_policy=False,
|
||||
request_groups_uuids=None):
|
||||
network_id = uuidutils.generate_uuid()
|
||||
|
||||
self.port_data = {
|
||||
@ -155,83 +158,345 @@ class TestQosPlugin(base.BaseQosTestCase):
|
||||
port_res = {"binding:vnic_type": "normal"}
|
||||
segment_mock = mock.MagicMock(network_id=network_id,
|
||||
physical_network=physical_network)
|
||||
min_pps_rules = min_pps_rules if min_pps_rules else []
|
||||
|
||||
with mock.patch('neutron.objects.network.NetworkSegment.get_objects',
|
||||
return_value=[segment_mock]), \
|
||||
mock.patch(
|
||||
'neutron.objects.qos.rule.QosMinimumBandwidthRule.'
|
||||
'get_objects',
|
||||
return_value=bw_rules):
|
||||
return_value=min_bw_rules), \
|
||||
mock.patch(
|
||||
'neutron.objects.qos.rule.QosMinimumPacketRateRule.'
|
||||
'get_objects',
|
||||
return_value=min_pps_rules), \
|
||||
mock.patch(
|
||||
'uuid.uuid5',
|
||||
return_value='fake_uuid',
|
||||
side_effect=request_groups_uuids):
|
||||
return qos_plugin.QoSPlugin._extend_port_resource_request(
|
||||
port_res, self.port)
|
||||
|
||||
def _create_and_extend_ports(self, min_bw_rules, min_pps_rules=None,
|
||||
physical_network='public',
|
||||
request_groups_uuids=None):
|
||||
network_id = uuidutils.generate_uuid()
|
||||
|
||||
ports_res = [
|
||||
{
|
||||
"resource_request": {
|
||||
"port_id": uuidutils.generate_uuid(),
|
||||
"qos_id": self.policy.id,
|
||||
"network_id": network_id,
|
||||
"vnic_type": "normal",
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"resource_request": {
|
||||
"port_id": uuidutils.generate_uuid(),
|
||||
"qos_id": self.policy.id,
|
||||
"network_id": network_id,
|
||||
"vnic_type": "normal",
|
||||
}
|
||||
},
|
||||
]
|
||||
segment_mock = mock.MagicMock(network_id=network_id,
|
||||
physical_network=physical_network)
|
||||
min_pps_rules = min_pps_rules if min_pps_rules else []
|
||||
|
||||
with mock.patch('neutron.objects.network.NetworkSegment.get_objects',
|
||||
return_value=[segment_mock]), \
|
||||
mock.patch(
|
||||
'neutron.objects.qos.rule.QosMinimumBandwidthRule.'
|
||||
'get_objects',
|
||||
return_value=min_bw_rules), \
|
||||
mock.patch(
|
||||
'neutron.objects.qos.rule.QosMinimumPacketRateRule.'
|
||||
'get_objects',
|
||||
return_value=min_pps_rules), \
|
||||
mock.patch(
|
||||
'uuid.uuid5',
|
||||
return_value='fake_uuid',
|
||||
side_effect=request_groups_uuids):
|
||||
return qos_plugin.QoSPlugin._extend_port_resource_request_bulk(
|
||||
ports_res, None)
|
||||
|
||||
def test__extend_port_resource_request_min_bw_rule(self):
|
||||
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
port = self._create_and_extend_port([self.min_bw_rule])
|
||||
|
||||
self.assertEqual(
|
||||
1,
|
||||
len(port['resource_request']['request_groups'])
|
||||
)
|
||||
self.assertEqual(
|
||||
'fake_uuid',
|
||||
port['resource_request']['request_groups'][0]['id']
|
||||
)
|
||||
self.assertEqual(
|
||||
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
port['resource_request']['required']
|
||||
port['resource_request']['request_groups'][0]['required']
|
||||
)
|
||||
self.assertEqual(
|
||||
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
|
||||
port['resource_request']['resources'],
|
||||
port['resource_request']['request_groups'][0]['resources'],
|
||||
)
|
||||
self.assertEqual(
|
||||
['fake_uuid'],
|
||||
port['resource_request']['same_subtree'],
|
||||
)
|
||||
|
||||
def test__extend_port_resource_request_mixed_rules(self):
|
||||
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
def test__extend_port_resource_request_min_pps_rule(self):
|
||||
port = self._create_and_extend_port([], [self.min_pps_rule])
|
||||
|
||||
self.assertEqual(
|
||||
1,
|
||||
len(port['resource_request']['request_groups'])
|
||||
)
|
||||
self.assertEqual(
|
||||
'fake_uuid',
|
||||
port['resource_request']['request_groups'][0]['id']
|
||||
)
|
||||
self.assertEqual(
|
||||
['CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
port['resource_request']['request_groups'][0]['required']
|
||||
)
|
||||
self.assertEqual(
|
||||
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
|
||||
port['resource_request']['request_groups'][0]['resources'],
|
||||
)
|
||||
self.assertEqual(
|
||||
['fake_uuid'],
|
||||
port['resource_request']['same_subtree'],
|
||||
)
|
||||
|
||||
def test__extend_port_resource_request_min_bw_and_pps_rule(self):
|
||||
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
self.min_pps_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
request_groups_uuids = ['fake_uuid0', 'fake_uuid1']
|
||||
min_bw_rule_ingress_data = {
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'min_kbps': 20,
|
||||
'direction': lib_constants.INGRESS_DIRECTION}
|
||||
min_pps_rule_ingress_data = {
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'min_kpps': 20,
|
||||
'direction': lib_constants.INGRESS_DIRECTION}
|
||||
min_bw_rule_ingress = rule_object.QosMinimumBandwidthRule(
|
||||
self.ctxt, **min_bw_rule_ingress_data)
|
||||
|
||||
min_pps_rule_ingress = rule_object.QosMinimumPacketRateRule(
|
||||
self.ctxt, **min_pps_rule_ingress_data)
|
||||
port = self._create_and_extend_port(
|
||||
[self.min_bw_rule, min_bw_rule_ingress])
|
||||
[self.min_bw_rule, min_bw_rule_ingress],
|
||||
[self.min_pps_rule, min_pps_rule_ingress],
|
||||
request_groups_uuids=request_groups_uuids)
|
||||
|
||||
self.assertEqual(
|
||||
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
port['resource_request']['required']
|
||||
2,
|
||||
len(port['resource_request']['request_groups'])
|
||||
)
|
||||
self.assertIn(
|
||||
{
|
||||
'id': 'fake_uuid0',
|
||||
'required':
|
||||
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
'resources': {
|
||||
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
|
||||
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20},
|
||||
},
|
||||
port['resource_request']['request_groups']
|
||||
)
|
||||
self.assertIn(
|
||||
{
|
||||
'id': 'fake_uuid1',
|
||||
'required': ['CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
'resources': {
|
||||
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC: 10,
|
||||
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC: 20,
|
||||
},
|
||||
},
|
||||
port['resource_request']['request_groups']
|
||||
)
|
||||
self.assertEqual(
|
||||
{
|
||||
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
|
||||
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20
|
||||
},
|
||||
port['resource_request']['resources'],
|
||||
['fake_uuid0', 'fake_uuid1'],
|
||||
port['resource_request']['same_subtree'],
|
||||
)
|
||||
|
||||
def test__extend_port_resource_request_non_min_bw_rule(self):
|
||||
port = self._create_and_extend_port([])
|
||||
def test__extend_port_resource_request_non_min_bw_or_min_pps_rule(self):
|
||||
port = self._create_and_extend_port([], [])
|
||||
|
||||
self.assertIsNone(port.get('resource_request'))
|
||||
|
||||
def test__extend_port_resource_request_non_provider_net(self):
|
||||
def test__extend_port_resource_request_min_bw_non_provider_net(self):
|
||||
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
|
||||
port = self._create_and_extend_port([self.min_bw_rule],
|
||||
physical_network=None)
|
||||
self.assertIsNone(port.get('resource_request'))
|
||||
|
||||
def test__extend_port_resource_request_mix_rules_non_provider_net(self):
|
||||
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
|
||||
port = self._create_and_extend_port([self.min_bw_rule],
|
||||
[self.min_pps_rule],
|
||||
physical_network=None)
|
||||
self.assertEqual(
|
||||
1,
|
||||
len(port['resource_request']['request_groups'])
|
||||
)
|
||||
self.assertEqual(
|
||||
'fake_uuid',
|
||||
port['resource_request']['request_groups'][0]['id']
|
||||
)
|
||||
self.assertEqual(
|
||||
['CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
port['resource_request']['request_groups'][0]['required']
|
||||
)
|
||||
self.assertEqual(
|
||||
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
|
||||
port['resource_request']['request_groups'][0]['resources'],
|
||||
)
|
||||
self.assertEqual(
|
||||
['fake_uuid'],
|
||||
port['resource_request']['same_subtree'],
|
||||
)
|
||||
|
||||
def test__extend_port_resource_request_bulk_min_bw_rule(self):
|
||||
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
ports = self._create_and_extend_ports([self.min_bw_rule])
|
||||
|
||||
for port in ports:
|
||||
self.assertEqual(
|
||||
1,
|
||||
len(port['resource_request']['request_groups'])
|
||||
)
|
||||
self.assertEqual(
|
||||
'fake_uuid',
|
||||
port['resource_request']['request_groups'][0]['id']
|
||||
)
|
||||
self.assertEqual(
|
||||
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
port['resource_request']['request_groups'][0]['required']
|
||||
)
|
||||
self.assertEqual(
|
||||
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
|
||||
port['resource_request']['request_groups'][0]['resources'],
|
||||
)
|
||||
self.assertEqual(
|
||||
['fake_uuid'],
|
||||
port['resource_request']['same_subtree'],
|
||||
)
|
||||
|
||||
def test__extend_port_resource_request_bulk_min_pps_rule(self):
|
||||
ports = self._create_and_extend_ports([], [self.min_pps_rule])
|
||||
|
||||
for port in ports:
|
||||
self.assertEqual(
|
||||
1,
|
||||
len(port['resource_request']['request_groups'])
|
||||
)
|
||||
self.assertEqual(
|
||||
'fake_uuid',
|
||||
port['resource_request']['request_groups'][0]['id']
|
||||
)
|
||||
self.assertEqual(
|
||||
['CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
port['resource_request']['request_groups'][0]['required']
|
||||
)
|
||||
self.assertEqual(
|
||||
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
|
||||
port['resource_request']['request_groups'][0]['resources'],
|
||||
)
|
||||
self.assertEqual(
|
||||
['fake_uuid'],
|
||||
port['resource_request']['same_subtree'],
|
||||
)
|
||||
|
||||
def test__extend_port_resource_request_bulk_min_bw_and_pps_rule(self):
|
||||
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
self.min_pps_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
request_groups_uuids = ['fake_uuid0', 'fake_uuid1'] * 2
|
||||
min_bw_rule_ingress_data = {
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'min_kbps': 20,
|
||||
'direction': lib_constants.INGRESS_DIRECTION}
|
||||
min_pps_rule_ingress_data = {
|
||||
'id': uuidutils.generate_uuid(),
|
||||
'min_kpps': 20,
|
||||
'direction': lib_constants.INGRESS_DIRECTION}
|
||||
min_bw_rule_ingress = rule_object.QosMinimumBandwidthRule(
|
||||
self.ctxt, **min_bw_rule_ingress_data)
|
||||
min_pps_rule_ingress = rule_object.QosMinimumPacketRateRule(
|
||||
self.ctxt, **min_pps_rule_ingress_data)
|
||||
ports = self._create_and_extend_ports(
|
||||
[self.min_bw_rule, min_bw_rule_ingress],
|
||||
[self.min_pps_rule, min_pps_rule_ingress],
|
||||
request_groups_uuids=request_groups_uuids)
|
||||
|
||||
for port in ports:
|
||||
self.assertEqual(
|
||||
2,
|
||||
len(port['resource_request']['request_groups'])
|
||||
)
|
||||
self.assertIn(
|
||||
{
|
||||
'id': 'fake_uuid0',
|
||||
'required':
|
||||
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
'resources': {
|
||||
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
|
||||
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20},
|
||||
},
|
||||
port['resource_request']['request_groups']
|
||||
)
|
||||
self.assertIn(
|
||||
{
|
||||
'id': 'fake_uuid1',
|
||||
'required': ['CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
'resources': {
|
||||
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC: 10,
|
||||
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC: 20,
|
||||
},
|
||||
},
|
||||
port['resource_request']['request_groups']
|
||||
)
|
||||
self.assertEqual(
|
||||
['fake_uuid0', 'fake_uuid1'],
|
||||
port['resource_request']['same_subtree'],
|
||||
)
|
||||
|
||||
def test__extend_port_resource_request_no_qos_policy(self):
|
||||
port = self._create_and_extend_port([], physical_network='public',
|
||||
has_qos_policy=False)
|
||||
self.assertIsNone(port.get('resource_request'))
|
||||
|
||||
def test__extend_port_resource_request_inherited_policy(self):
|
||||
def test__extend_port_resource_request_min_bw_inherited_policy(
|
||||
self):
|
||||
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
|
||||
self.min_bw_rule.qos_policy_id = self.policy.id
|
||||
|
||||
port = self._create_and_extend_port([self.min_bw_rule],
|
||||
has_net_qos_policy=True)
|
||||
self.assertEqual(
|
||||
1,
|
||||
len(port['resource_request']['request_groups'])
|
||||
)
|
||||
self.assertEqual(
|
||||
'fake_uuid',
|
||||
port['resource_request']['request_groups'][0]['id']
|
||||
)
|
||||
self.assertEqual(
|
||||
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
|
||||
port['resource_request']['required']
|
||||
port['resource_request']['request_groups'][0]['required']
|
||||
)
|
||||
self.assertEqual(
|
||||
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
|
||||
port['resource_request']['resources'],
|
||||
port['resource_request']['request_groups'][0]['resources'],
|
||||
)
|
||||
self.assertEqual(
|
||||
['fake_uuid'],
|
||||
port['resource_request']['same_subtree'],
|
||||
)
|
||||
|
||||
def test_get_ports_with_policy(self):
|
||||
@ -1721,6 +1986,20 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
|
||||
|
||||
class TestQosPluginDB(base.BaseQosTestCase):
|
||||
|
||||
PORT_ID = 'f02f160e-1612-11ec-b2b8-bf60ab98186c'
|
||||
|
||||
QOS_MIN_BW_RULE_ID = '8bf8eb46-160e-11ec-8024-9f96be32099d'
|
||||
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
|
||||
# 8bf8eb46-160e-11ec-8024-9f96be32099d
|
||||
MIN_BW_REQUEST_GROUP_UUID = 'c8bc1b27-59a1-5135-aa33-aeecad6093f4'
|
||||
MIN_BW_RP = 'd7bea120-1626-11ec-9148-c32debfcf0f6'
|
||||
|
||||
QOS_MIN_PPS_RULE_ID = '6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb'
|
||||
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
|
||||
# 6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb
|
||||
MIN_PPS_REQUEST_GROUP_UUID = '995008f4-f120-547a-b051-428b89076067'
|
||||
MIN_PPS_RP = 'e16161f4-1626-11ec-a5a2-1fc9396e27cc'
|
||||
|
||||
def setUp(self):
|
||||
super(TestQosPluginDB, self).setUp()
|
||||
self.setup_coreplugin(load_plugins=False)
|
||||
@ -1743,20 +2022,34 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
return qos_policy
|
||||
|
||||
def _make_qos_minbw_rule(self, policy_id, direction='ingress',
|
||||
min_kbps=1000):
|
||||
min_kbps=1000, rule_id=None):
|
||||
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
|
||||
qos_rule = rule_object.QosMinimumBandwidthRule(
|
||||
self.context, project_id=self.project_id,
|
||||
qos_policy_id=policy_id, direction=direction, min_kbps=min_kbps)
|
||||
qos_policy_id=policy_id, direction=direction, min_kbps=min_kbps,
|
||||
id=rule_id)
|
||||
qos_rule.create()
|
||||
return qos_rule
|
||||
|
||||
def _make_port(self, network_id, qos_policy_id=None):
|
||||
def _make_qos_minpps_rule(self, policy_id, direction='ingress',
|
||||
min_kpps=1000, rule_id=None):
|
||||
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
|
||||
qos_rule = rule_object.QosMinimumPacketRateRule(
|
||||
self.context, project_id=self.project_id,
|
||||
qos_policy_id=policy_id, direction=direction, min_kpps=min_kpps,
|
||||
id=rule_id)
|
||||
qos_rule.create()
|
||||
return qos_rule
|
||||
|
||||
def _make_port(self, network_id, qos_policy_id=None, port_id=None):
|
||||
port_id = port_id if port_id else uuidutils.generate_uuid()
|
||||
base_mac = ['aa', 'bb', 'cc', 'dd', 'ee', 'ff']
|
||||
mac = netaddr.EUI(next(net_utils.random_mac_generator(base_mac)))
|
||||
port = ports_object.Port(
|
||||
self.context, network_id=network_id, device_owner='3',
|
||||
project_id=self.project_id, admin_state_up=True, status='DOWN',
|
||||
device_id='2', qos_policy_id=qos_policy_id, mac_address=mac)
|
||||
device_id='2', qos_policy_id=qos_policy_id, mac_address=mac,
|
||||
id=port_id)
|
||||
port.create()
|
||||
return port
|
||||
|
||||
@ -1845,7 +2138,8 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
qos2_id = qos2.id if qos2 else None
|
||||
|
||||
network = self._make_network()
|
||||
port = self._make_port(network.id, qos_policy_id=qos1_id)
|
||||
port = self._make_port(
|
||||
network.id, qos_policy_id=qos1_id, port_id=TestQosPluginDB.PORT_ID)
|
||||
|
||||
return {"context": self.context,
|
||||
"original_port": {
|
||||
@ -1888,7 +2182,7 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
payload=events.DBEventPayload(
|
||||
context, states=(original_port, port)))
|
||||
mock_alloc_change.assert_called_once_with(
|
||||
qos1_obj, qos2_obj, kwargs['original_port'])
|
||||
qos1_obj, qos2_obj, kwargs['original_port'], port)
|
||||
|
||||
def test_check_port_for_placement_allocation_change_no_new_policy(self):
|
||||
qos1_obj = self._make_qos_policy()
|
||||
@ -1906,7 +2200,7 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
payload=events.DBEventPayload(
|
||||
context, states=(original_port, port)))
|
||||
mock_alloc_change.assert_called_once_with(
|
||||
qos1_obj, None, kwargs['original_port'])
|
||||
qos1_obj, None, kwargs['original_port'], port)
|
||||
|
||||
def test_check_port_for_placement_allocation_change_no_qos_update(self):
|
||||
qos1_obj = self._make_qos_policy()
|
||||
@ -1926,40 +2220,183 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
context, states=(original_port, port)))
|
||||
mock_alloc_change.assert_not_called()
|
||||
|
||||
def _prepare_port_for_placement_allocation(self, qos1, qos2=None,
|
||||
min_kbps1=1000, min_kbps2=2000):
|
||||
rule1_obj = self._make_qos_minbw_rule(qos1.id, min_kbps=min_kbps1)
|
||||
qos1.rules = [rule1_obj]
|
||||
if qos2:
|
||||
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=min_kbps2)
|
||||
qos2.rules = [rule2_obj]
|
||||
orig_port = {'binding:profile': {'allocation': 'rp:uu:id'},
|
||||
'device_id': 'uu:id'}
|
||||
return orig_port
|
||||
def _prepare_port_for_placement_allocation(self, original_qos,
|
||||
desired_qos=None,
|
||||
original_min_kbps=None,
|
||||
desired_min_kbps=None,
|
||||
original_min_kpps=None,
|
||||
desired_min_kpps=None):
|
||||
kwargs = self._prepare_for_port_placement_allocation_change(
|
||||
original_qos, desired_qos)
|
||||
orig_port = kwargs['original_port']
|
||||
original_qos.rules = []
|
||||
allocation = {}
|
||||
if original_min_kbps:
|
||||
original_qos.rules += [self._make_qos_minbw_rule(
|
||||
original_qos.id, min_kbps=original_min_kbps,
|
||||
rule_id=TestQosPluginDB.QOS_MIN_BW_RULE_ID)]
|
||||
allocation.update(
|
||||
{TestQosPluginDB.MIN_BW_REQUEST_GROUP_UUID:
|
||||
TestQosPluginDB.MIN_BW_RP})
|
||||
if original_min_kpps:
|
||||
original_qos.rules += [self._make_qos_minpps_rule(
|
||||
original_qos.id, min_kpps=original_min_kpps,
|
||||
rule_id=TestQosPluginDB.QOS_MIN_PPS_RULE_ID)]
|
||||
allocation.update(
|
||||
{TestQosPluginDB.MIN_PPS_REQUEST_GROUP_UUID:
|
||||
TestQosPluginDB.MIN_PPS_RP})
|
||||
if desired_qos:
|
||||
desired_qos.rules = []
|
||||
if desired_min_kbps:
|
||||
desired_qos.rules += [self._make_qos_minbw_rule(
|
||||
desired_qos.id, min_kbps=desired_min_kbps)]
|
||||
if desired_min_kpps:
|
||||
desired_qos.rules += [self._make_qos_minpps_rule(
|
||||
desired_qos.id, min_kpps=desired_min_kpps)]
|
||||
orig_port.update(
|
||||
{'binding:profile':
|
||||
{'allocation': allocation},
|
||||
'device_id': 'uu:id'})
|
||||
return orig_port, kwargs['port']
|
||||
|
||||
def test_change_placement_allocation_increase(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
port = self._prepare_port_for_placement_allocation(qos1, qos2)
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, port)
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, qos2, orig_port, port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
minbw_alloc_diff={'NET_BW_IGR_KILOBIT_PER_SEC': 1000},
|
||||
rp_uuid='rp:uu:id')
|
||||
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
|
||||
|
||||
def test_test_change_placement_allocation_decrease(self):
|
||||
def test_change_placement_allocation_increase_min_pps(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
port = self._prepare_port_for_placement_allocation(qos2, qos1)
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, original_min_kpps=1000, desired_min_kpps=2000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, port)
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, qos2, orig_port, port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
minbw_alloc_diff={'NET_BW_IGR_KILOBIT_PER_SEC': -1000},
|
||||
rp_uuid='rp:uu:id')
|
||||
alloc_diff={self.MIN_PPS_RP: {
|
||||
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 1000}})
|
||||
|
||||
def test_change_placement_allocation_increase_min_pps_and_min_bw(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
|
||||
original_min_kpps=500, desired_min_kpps=1000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, qos2, orig_port, port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
alloc_diff={
|
||||
self.MIN_PPS_RP: {
|
||||
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 500},
|
||||
self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
|
||||
|
||||
def test_change_placement_allocation_change_direction_min_pps_and_min_bw(
|
||||
self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
|
||||
original_min_kpps=500, desired_min_kpps=1000)
|
||||
for rule in qos2.rules:
|
||||
rule.direction = 'egress'
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, qos2, orig_port, port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
alloc_diff={
|
||||
self.MIN_PPS_RP: {
|
||||
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -500,
|
||||
'NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC': 1000},
|
||||
self.MIN_BW_RP: {
|
||||
'NET_BW_IGR_KILOBIT_PER_SEC': -1000,
|
||||
'NET_BW_EGR_KILOBIT_PER_SEC': 2000}})
|
||||
|
||||
def test_change_placement_allocation_change_dir_min_pps_ingress_to_any(
|
||||
self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, original_min_kpps=1000, desired_min_kpps=1000)
|
||||
for rule in qos2.rules:
|
||||
rule.direction = 'any'
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.assertRaises(NotImplementedError,
|
||||
self.qos_plugin._change_placement_allocation, qos1, qos2,
|
||||
orig_port, port)
|
||||
mock_update_qos_alloc.assert_not_called()
|
||||
|
||||
def test_change_placement_allocation_min_bw_dataplane_enforcement(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, desired_min_kbps=1000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
|
||||
port)
|
||||
mock_update_qos_alloc.assert_not_called()
|
||||
|
||||
def test_change_placement_allocation_min_bw_dataplane_enforcement_with_pps(
|
||||
self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, desired_min_kbps=1000, original_min_kpps=500,
|
||||
desired_min_kpps=1000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
|
||||
port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
alloc_diff={
|
||||
self.MIN_PPS_RP: {
|
||||
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 500}})
|
||||
|
||||
def test_change_placement_allocation_decrease(self):
|
||||
original_qos = self._make_qos_policy()
|
||||
desired_qos = self._make_qos_policy()
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
original_qos, desired_qos, original_min_kbps=2000,
|
||||
desired_min_kbps=1000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
original_qos, desired_qos, orig_port, port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000}})
|
||||
|
||||
def test_change_placement_allocation_decrease_min_pps(self):
|
||||
original_qos = self._make_qos_policy()
|
||||
desired_qos = self._make_qos_policy()
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
original_qos, desired_qos, original_min_kpps=2000,
|
||||
desired_min_kpps=1000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
original_qos, desired_qos, orig_port, port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
alloc_diff={self.MIN_PPS_RP: {
|
||||
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -1000}})
|
||||
|
||||
def test_change_placement_allocation_no_original_qos(self):
|
||||
qos1 = None
|
||||
@ -1967,10 +2404,11 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=1000)
|
||||
qos2.rules = [rule2_obj]
|
||||
orig_port = {'id': 'u:u', 'device_id': 'i:d', 'binding:profile': {}}
|
||||
port = {}
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, qos2, orig_port)
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
|
||||
port)
|
||||
mock_update_qos_alloc.assert_not_called()
|
||||
|
||||
def test_change_placement_allocation_no_original_allocation(self):
|
||||
@ -1981,22 +2419,27 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=1000)
|
||||
qos2.rules = [rule2_obj]
|
||||
orig_port = {'id': 'u:u', 'device_id': 'i:d', 'binding:profile': {}}
|
||||
port = {}
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, qos2, orig_port)
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
|
||||
port)
|
||||
mock_update_qos_alloc.assert_not_called()
|
||||
|
||||
def test_change_placement_allocation_new_policy_empty(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
port = self._prepare_port_for_placement_allocation(qos1)
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(qos1,
|
||||
original_min_kbps=1000, original_min_kpps=2000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, None, port)
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, None, orig_port, port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
minbw_alloc_diff={'NET_BW_IGR_KILOBIT_PER_SEC': -1000},
|
||||
rp_uuid='rp:uu:id')
|
||||
alloc_diff={
|
||||
self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000},
|
||||
self.MIN_PPS_RP: {
|
||||
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -2000}})
|
||||
|
||||
def test_change_placement_allocation_no_min_bw(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
@ -2005,24 +2448,32 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
bw_limit_rule2 = rule_object.QosDscpMarkingRule(dscp_mark=18)
|
||||
qos1.rules = [bw_limit_rule1]
|
||||
qos2.rules = [bw_limit_rule2]
|
||||
port = {'binding:profile': {'allocation': 'rp:uu:id'},
|
||||
'device_id': 'uu:id'}
|
||||
orig_port = {
|
||||
'binding:profile': {'allocation': {
|
||||
self.MIN_BW_REQUEST_GROUP_UUID: self.MIN_BW_RP}},
|
||||
'device_id': 'uu:id',
|
||||
'id': '9416c220-160a-11ec-ba3d-474633eb825c',
|
||||
}
|
||||
port = {}
|
||||
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, None, port)
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, None, orig_port, port)
|
||||
mock_update_qos_alloc.assert_not_called()
|
||||
|
||||
def test_change_placement_allocation_old_rule_not_min_bw(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
bw_limit_rule = rule_object.QosDscpMarkingRule(dscp_mark=16)
|
||||
port = self._prepare_port_for_placement_allocation(qos1, qos2)
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, desired_min_kbps=2000)
|
||||
qos1.rules = [bw_limit_rule]
|
||||
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, port)
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
|
||||
port)
|
||||
mock_update_qos_alloc.assert_not_called()
|
||||
|
||||
def test_change_placement_allocation_new_rule_not_min_bw(self):
|
||||
@ -2030,47 +2481,55 @@ class TestQosPluginDB(base.BaseQosTestCase):
|
||||
qos2 = self._make_qos_policy()
|
||||
bw_limit_rule = rule_object.QosDscpMarkingRule(dscp_mark=16)
|
||||
qos2.rules = [bw_limit_rule]
|
||||
port = self._prepare_port_for_placement_allocation(qos1)
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(qos1,
|
||||
original_min_kbps=1000)
|
||||
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, port)
|
||||
mock_update_qos_alloc.assert_not_called()
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, qos2, orig_port, port)
|
||||
mock_update_qos_alloc.assert_called_once_with(
|
||||
consumer_uuid='uu:id',
|
||||
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000}})
|
||||
|
||||
def test_change_placement_allocation_equal_minkbps(self):
|
||||
def test_change_placement_allocation_equal_minkbps_and_minkpps(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
port = self._prepare_port_for_placement_allocation(qos1, qos2, 1000,
|
||||
1000)
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, original_min_kbps=1000, desired_min_kbps=1000,
|
||||
original_min_kpps=1000, desired_min_kpps=1000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(qos1, qos2, port)
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
self.qos_plugin._change_placement_allocation(
|
||||
qos1, qos2, orig_port, port)
|
||||
mock_update_qos_alloc.assert_not_called()
|
||||
|
||||
def test_change_placement_allocation_update_conflict(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
port = self._prepare_port_for_placement_allocation(qos1, qos2)
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
mock_update_qos_alloc.side_effect = ks_exc.Conflict(
|
||||
response={'errors': [{'code': 'placement.concurrent_update'}]}
|
||||
)
|
||||
self.assertRaises(
|
||||
qos_exc.QosPlacementAllocationConflict,
|
||||
neutron_qos_exc.QosPlacementAllocationUpdateConflict,
|
||||
self.qos_plugin._change_placement_allocation,
|
||||
qos1, qos2, port)
|
||||
qos1, qos2, orig_port, port)
|
||||
|
||||
def test_change_placement_allocation_update_generation_conflict(self):
|
||||
qos1 = self._make_qos_policy()
|
||||
qos2 = self._make_qos_policy()
|
||||
port = self._prepare_port_for_placement_allocation(qos1, qos2)
|
||||
orig_port, port = self._prepare_port_for_placement_allocation(
|
||||
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
|
||||
with mock.patch.object(self.qos_plugin._placement_client,
|
||||
'update_qos_minbw_allocation') as mock_update_qos_alloc:
|
||||
'update_qos_allocation') as mock_update_qos_alloc:
|
||||
mock_update_qos_alloc.side_effect = (
|
||||
pl_exc.PlacementAllocationGenerationConflict(
|
||||
consumer='rp:uu:id'))
|
||||
consumer=self.MIN_BW_RP))
|
||||
self.assertRaises(
|
||||
pl_exc.PlacementAllocationGenerationConflict,
|
||||
self.qos_plugin._change_placement_allocation,
|
||||
qos1, qos2, port)
|
||||
qos1, qos2, orig_port, port)
|
||||
|
@ -0,0 +1,17 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Added port-resource-request-groups API extension, that provides support for
|
||||
the new format of port's ``resource_request`` and
|
||||
``binding:profile.allocation`` attributes. The new format allows to request
|
||||
multiple groups of resources and traits from the same RP subtree.
|
||||
|
||||
Assigning a new ``QoS policy`` with ``minimum_packet_rate`` rule to an
|
||||
``already bound port`` updates the allocation in Placement.
|
||||
``NOTE``: Placement allocation update is not supported if original QoS
|
||||
policy had no ``minimum_packet_rate`` rule. Changing from direction-less
|
||||
``minimum_packet_rate`` rule to a direction-oriented
|
||||
``minimum_packet_rate`` rule is not supported.
|
||||
fixes:
|
||||
- |
|
||||
Fixes bug `1943724 <https://bugs.launchpad.net/neutron/+bug/1943724>`_.
|
Loading…
Reference in New Issue
Block a user