Support interface attach / detach with new resource request format

The interface attach and detach logic is now fully adapted to the new
extended resource request format, and supports more than one request
group in a single port.

blueprint: qos-minimum-guaranteed-packet-rate
Change-Id: I73e6acf5adfffa9203efa3374671ec18f4ea79eb
This commit is contained in:
Balazs Gibizer 2021-07-09 15:44:57 +02:00
parent 9cb92e8832
commit 44309c419f
13 changed files with 294 additions and 164 deletions

View File

@ -44,14 +44,9 @@ Extended resource request
Since neutron 19.0.0 (Xena), neutron implements an extended resource request
format via the the ``port-resource-request-groups`` neutron API extension. As
of nova 24.0.0 (Xena), nova does not fully support the new extension. If the
extension is enabled in neutron, then nova will reject interface attach
operation. Admins should not enable this API extension in neutron.
Please note that Nova only supports the server create operation if every
nova-compute service also upgraded to Xena version and the
``[upgrade_levels]/compute`` configuration does not prevent
the computes from using the latest RPC version.
of nova 24.0.0 (Xena), nova also supports this extension if every nova-compute
service is upgraded to Xena version and the ``[upgrade_levels]/compute``
configuration does not prevent the computes from using the latest RPC version.
See :nova-doc:`the admin guide <admin/port_with_resource_request.html>` for
administrative details.

View File

@ -71,12 +71,8 @@ Extended resource request
Since neutron 19.0.0 (Xena), neutron implements an extended resource request
format via the the ``port-resource-request-groups`` neutron API extension. As
of nova 24.0.0 (Xena), Nova does not fully support the new extension. If the
extension is enabled in neutron, then nova will reject interface attach
operation. Admins should not enable this API extension in neutron.
Please note that Nova only supports the server create operation if every
nova-compute service also upgraded to Xena version and the
of nova 24.0.0 (Xena), nova also supports this extension if every nova-compute
service is upgraded to Xena version and the
:oslo.config:option:`upgrade_levels.compute` configuration does not prevent
the computes from using the latest RPC version.

View File

@ -177,7 +177,9 @@ class InterfaceAttachmentController(wsgi.Controller):
exception.NetworksWithQoSPolicyNotSupported,
exception.InterfaceAttachPciClaimFailed,
exception.InterfaceAttachResourceAllocationFailed,
exception.ForbiddenPortsWithAccelerator) as e:
exception.ForbiddenPortsWithAccelerator,
exception.ExtendedResourceRequestOldCompute,
) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except (
exception.OperationNotSupportedForVDPAInterface,

View File

@ -113,6 +113,7 @@ SUPPORT_VNIC_TYPE_ACCELERATOR = 57
MIN_COMPUTE_BOOT_WITH_EXTENDED_RESOURCE_REQUEST = 58
MIN_COMPUTE_MOVE_WITH_EXTENDED_RESOURCE_REQUEST = 59
MIN_COMPUTE_INT_ATTACH_WITH_EXTENDED_RES_REQ = 60
# FIXME(danms): Keep a global cache of the cells we find the
# first time we look. This needs to be refreshed on a timer or
@ -5097,20 +5098,37 @@ class API:
self.volume_api.attachment_delete(
context, new_attachment_id)
def support_port_attach(self, context, port):
"""Returns false if neutron is configured with extended resource
request and the port has resource request.
This function is only here temporary to help mocking this check in the
functional test environment.
"""
if not self.network_api.has_extended_resource_request_extension(
def ensure_compute_version_for_resource_request(
self, context, instance, port
):
"""Checks that the compute service version is new enough for the
resource request of the port.
"""
if self.network_api.has_extended_resource_request_extension(
context
):
return True
# TODO(gibi): Remove this check in Y where we can be sure that
# the compute is already upgraded to X.
res_req = port.get(constants.RESOURCE_REQUEST) or {}
groups = res_req.get('request_groups', [])
if groups:
svc = objects.Service.get_by_host_and_binary(
context, instance.host, 'nova-compute')
if svc.version < MIN_COMPUTE_INT_ATTACH_WITH_EXTENDED_RES_REQ:
raise exception.ExtendedResourceRequestOldCompute()
resource_request = port.get('resource_request', {})
return not resource_request.get('request_groups', [])
else:
# NOTE(gibi): Checking if the requested port has resource request
# as such ports are only supported if the compute service version
# is >= 55.
# TODO(gibi): Remove this check in X as there we can be sure
# that all computes are new enough.
if port.get(constants.RESOURCE_REQUEST):
svc = objects.Service.get_by_host_and_binary(
context, instance.host, 'nova-compute')
if svc.version < 55:
raise exception.AttachInterfaceWithQoSPolicyNotSupported(
instance_uuid=instance.uuid)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
@ -5124,18 +5142,6 @@ class API:
if port_id:
port = self.network_api.show_port(context, port_id)['port']
# NOTE(gibi): Checking if the requested port has resource request
# as such ports are only supported if the compute service version
# is >= 55.
# TODO(gibi): Remove this check in X as there we can be sure
# that all computes are new enough.
if port.get(constants.RESOURCE_REQUEST):
svc = objects.Service.get_by_host_and_binary(
context, instance.host, 'nova-compute')
if svc.version < 55:
raise exception.AttachInterfaceWithQoSPolicyNotSupported(
instance_uuid=instance.uuid)
if port.get('binding:vnic_type', "normal") == "vdpa":
# FIXME(sean-k-mooney): Attach works but detach results in a
# QEMU error; blocked until this is resolved
@ -5148,8 +5154,8 @@ class API:
network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL):
raise exception.ForbiddenPortsWithAccelerator()
if not self.support_port_attach(context, port):
raise exception.AttachWithExtendedQoSPolicyNotSupported()
self.ensure_compute_version_for_resource_request(
context, instance, port)
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,

View File

@ -7650,27 +7650,28 @@ class ComputeManager(manager.Manager):
if not request_groups:
return None, None
# NOTE(gibi): we assume a single RequestGroup here as:
# 1) there can only be a single port per interface attach request
# 2) a single port can only request resources in a single RequestGroup
# as per the current neutron API.
# #2) might change in the future so both
# nova.network.neutron.API.create_resource_requests() and this function
# takes a list of groups
request_group = request_groups[0]
# restrict the resource request to the current compute node. The
# compute node uuid is the uuid of the root provider of the node in
# placement
compute_node_uuid = objects.ComputeNode.get_by_nodename(
context, instance.node).uuid
request_group.in_tree = compute_node_uuid
# we can have multiple request groups, it would be enough to restrict
# only one of them to the compute tree but for symetry we restrict
# all of them
for request_group in request_groups:
request_group.in_tree = compute_node_uuid
# NOTE(gibi): when support is added for attaching a cyborg based
# smart NIC the ResourceRequest could be extended to handle multiple
# request groups.
rr = scheduler_utils.ResourceRequest.from_request_group(
request_group, request_level_params)
# NOTE(gibi): group policy is mandatory in a resource request if there
# are multiple groups. The policy can only come from the flavor today
# and a new flavor is not provided with an interface attach request and
# the instance's current flavor might not have a policy. Still we are
# attaching a single port where currently the two possible groups
# (one for bandwidth and one for packet rate) will always be allocated
# from different providers. So both possible policies (none, isolated)
# are always fulfilled for this single port. We still has to specify
# one so we specify the least restrictive now.
rr = scheduler_utils.ResourceRequest.from_request_groups(
request_groups, request_level_params, group_policy='none')
res = self.reportclient.get_allocation_candidates(context, rr)
alloc_reqs, provider_sums, version = res
@ -10293,14 +10294,20 @@ class ComputeManager(manager.Manager):
{'intf': vif['id']},
instance=instance)
profile = vif.get('profile', {}) or {} # profile can be None
if profile.get('allocation'):
rps = profile.get('allocation')
if rps:
if isinstance(rps, dict):
# if extended resource request extension is enabled
# then we have a dict of providers, flatten it for the
# log.
rps = ','.join(rps.values())
LOG.error(
'The bound port %(port_id)s is deleted in Neutron but '
'the resource allocation on the resource provider '
'%(rp_uuid)s is leaked until the server '
'the resource allocation on the resource providers '
'%(rp_uuid)s are leaked until the server '
'%(server_uuid)s is deleted.',
{'port_id': vif['id'],
'rp_uuid': vif['profile']['allocation'],
'rp_uuid': rps,
'server_uuid': instance.uuid})
del network_info[index]

View File

@ -1754,15 +1754,27 @@ class API:
if port:
# if there is resource associated to this port then that needs to
# be deallocated so lets return info about such allocation
resource_request = port.get(constants.RESOURCE_REQUEST)
resource_request = port.get(constants.RESOURCE_REQUEST) or {}
profile = get_binding_profile(port)
allocated_rp = profile.get(constants.ALLOCATION)
if resource_request and allocated_rp:
port_allocation = {
allocated_rp: {
"resources": resource_request.get("resources", {})
if self.has_extended_resource_request_extension(context, neutron):
# new format
groups = resource_request.get(constants.REQUEST_GROUPS)
if groups:
allocated_rps = profile.get(constants.ALLOCATION)
for group in groups:
allocated_rp = allocated_rps[group['id']]
port_allocation[allocated_rp] = {
"resources": group.get("resources", {})
}
else:
# legacy format
allocated_rp = profile.get(constants.ALLOCATION)
if resource_request and allocated_rp:
port_allocation = {
allocated_rp: {
"resources": resource_request.get("resources", {})
}
}
}
else:
# Check the info_cache. If the port is still in the info_cache and
# in that cache there is allocation in the profile then we suspect

View File

@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 59
SERVICE_VERSION = 60
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -209,6 +209,10 @@ SERVICE_VERSION_HISTORY = (
# Add support for server move operations with neutron extended resource
# request
{'compute_rpc': '6.0'},
# Version 60: Compute RPC v6.0:
# Add support for interface attach operation with neutron extended resource
# request
{'compute_rpc': '6.0'},
)
# This is used to raise an error at service startup if older than N-1 computes

View File

@ -199,17 +199,22 @@ class ResourceRequest(object):
return res_req
@classmethod
def from_request_group(
def from_request_groups(
cls,
request_group: 'objects.RequestGroup',
request_groups: ty.List['objects.RequestGroup'],
request_level_params: 'objects.RequestLevelParams',
group_policy: str,
) -> 'ResourceRequest':
"""Create a new instance of ResourceRequest from a RequestGroup."""
"""Create a new instance of ResourceRequest from a list of
RequestGroup objects.
"""
res_req = cls()
res_req._root_required = request_level_params.root_required
res_req._root_forbidden = request_level_params.root_forbidden
res_req._same_subtree = request_level_params.same_subtree
res_req._add_request_group(request_group)
res_req.group_policy = group_policy
for request_group in request_groups:
res_req._add_request_group(request_group)
res_req.strip_zeros()
return res_req

View File

@ -19,6 +19,7 @@ Provides common functionality for integrated unit tests
import collections
import random
import re
import string
import time
@ -258,9 +259,9 @@ class InstanceHelperMixin:
server['id'], expected_statuses, actual_status,
))
def _wait_for_log(self, log_line):
def _wait_for_log(self, log_line_regex):
for i in range(10):
if log_line in self.stdlog.logger.output:
if re.search(log_line_regex, self.stdlog.logger.output):
return
time.sleep(0.5)

View File

@ -14,7 +14,6 @@
import copy
import logging
import unittest
from keystoneauth1 import adapter
import mock
@ -1342,16 +1341,12 @@ class PortResourceRequestBasedSchedulingTest(
response = self.api.api_post('/os-server-external-events', events).body
self.assertEqual(200, response['events'][0]['code'])
port_rp_uuid = self.ovs_bridge_rp_per_host[self.compute1_rp_uuid]
# 1) Nova logs an ERROR about the leak
self._wait_for_log(
'ERROR [nova.compute.manager] The bound port %(port_id)s is '
'deleted in Neutron but the resource allocation on the resource '
'provider %(rp_uuid)s is leaked until the server %(server_uuid)s '
'is deleted.'
'The bound port %(port_id)s is deleted in Neutron but the '
'resource allocation on the resource providers .* are leaked '
'until the server %(server_uuid)s is deleted.'
% {'port_id': port['id'],
'rp_uuid': port_rp_uuid,
'server_uuid': server['id']})
allocations = self.placement.get(
@ -1635,8 +1630,6 @@ class ExtendedPortResourceRequestBasedSchedulingTestBase(
port['id'], group_req, pps_allocations)
# TODO(gibi): The tests are failing today as we need to fix a bunch of TODOs in
# the code.
class MultiGroupResourceRequestBasedSchedulingTest(
ExtendedPortResourceRequestBasedSchedulingTestBase,
PortResourceRequestBasedSchedulingTest,
@ -1650,31 +1643,6 @@ class MultiGroupResourceRequestBasedSchedulingTest(
super().setUp()
self.neutron = self.useFixture(
MultiGroupResourceRequestNeutronFixture(self))
# Turn off the blanket rejections of the extended resource request in
# port attach. This test class wants to prove that the extended
# resource request is supported.
patcher = mock.patch(
'nova.compute.api.API.support_port_attach',
return_value=True,
)
self.addCleanup(patcher.stop)
patcher.start()
@unittest.expectedFailure
def test_interface_attach_with_resource_request(self):
super().test_interface_attach_with_resource_request()
@unittest.expectedFailure
def test_interface_attach_with_resource_request_no_candidates(self):
super().test_interface_attach_with_resource_request_no_candidates()
@unittest.expectedFailure
def test_interface_detach_with_port_with_bandwidth_request(self):
super().test_interface_detach_with_port_with_bandwidth_request()
@unittest.expectedFailure
def test_delete_bound_port_in_neutron_with_resource_request(self):
super().test_delete_bound_port_in_neutron_with_resource_request()
class ServerMoveWithPortResourceRequestTest(
@ -2959,51 +2927,26 @@ class ExtendedResourceRequestOldCompute(
lambda server: shelve_offload_then_unshelve(server),
)
class ExtendedResourceRequestTempNegativeTest(
PortResourceRequestBasedSchedulingTestBase):
"""A set of temporary tests to show that nova currently rejects requests
that uses the extended-resource-request Neutron API extension. These test
are expected to be removed when support for the extension is implemented
in nova.
"""
def _test_operation(self, op_name, op_callable):
# boot a server with a qos port still using the old Neutron resource
# request API extension
@mock.patch('nova.objects.service.Service.get_by_host_and_binary')
def test_interface_attach(self, mock_get_service):
# service version 59 allows booting
mock_get_service.return_value.version = 59
server = self._create_server(
flavor=self.flavor,
networks=[{'port': self.neutron.port_with_resource_request['id']}],
networks=[{'port': self.neutron.port_1['id']}],
)
self._wait_for_state_change(server, 'ACTIVE')
# enable the new extended-resource-request Neutron API extension by
# replacing the old neutron fixture with a new one that enables the
# extension. Note that we are carrying over the state of the neutron
# to the new extension to keep the port bound to the server.
self.neutron = self.useFixture(
ExtendedResourceRequestNeutronFixture.
create_with_existing_neutron_state(self.neutron))
# nova does not support this Neutron API extension yet so the
# operation fails
self._wait_for_state_change(server, "ACTIVE")
# for interface attach service version 60 would be needed
ex = self.assertRaises(
client.OpenStackApiException,
op_callable,
self._attach_interface,
server,
self.neutron.port_with_sriov_resource_request['id'],
)
self.assertEqual(400, ex.response.status_code)
self.assertIn(
f'The {op_name} with port having extended resource request, like '
f'a port with both QoS minimum bandwidth and packet rate '
f'policies, is not yet supported.',
'The port-resource-request-groups neutron API extension is not '
'supported by old nova compute service. Upgrade your compute '
'services to Xena (24.0.0) or later.',
str(ex)
)
def test_interface_attach(self):
self._test_operation(
'interface attach server operation',
lambda server: self._attach_interface(
server,
self.neutron.port_with_sriov_resource_request['id'],
),
)

View File

@ -7267,6 +7267,10 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
mock_record.assert_called_once_with(
self.context, instance, instance_actions.ATTACH_INTERFACE)
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False),
)
@mock.patch('nova.objects.service.Service.get_by_host_and_binary')
@mock.patch('nova.compute.api.API._record_action_start')
def test_attach_interface_qos_aware_port_old_compute(
@ -7330,6 +7334,84 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
port_id=mock.sentinel.port_id, requested_ip=mock.sentinel.ip,
tag=mock.sentinel.tag)
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=True),
)
@mock.patch('nova.objects.service.Service.get_by_host_and_binary')
@mock.patch('nova.compute.api.API._record_action_start')
def test_attach_interface_extended_qos_port_old_compute(
self, mock_record, mock_get_service
):
instance = self._create_instance_obj()
service = objects.Service()
service.version = 59
mock_get_service.return_value = service
with mock.patch.object(
self.compute_api.network_api, 'show_port',
return_value={
'port': {
constants.RESOURCE_REQUEST: {
'request_groups': [
{
'resources': {'CUSTOM_RESOURCE_CLASS': 42}
}
],
}
}
}
) as mock_show_port:
self.assertRaises(
nova.exception.ExtendedResourceRequestOldCompute,
self.compute_api.attach_interface,
self.context, instance,
'foo_net_id', 'foo_port_id', None
)
mock_show_port.assert_called_once_with(self.context, 'foo_port_id')
mock_get_service.assert_called_once_with(
self.context, instance.host, 'nova-compute')
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=True)
)
@mock.patch('nova.compute.rpcapi.ComputeAPI.attach_interface')
@mock.patch('nova.objects.service.Service.get_by_host_and_binary')
@mock.patch('nova.compute.api.API._record_action_start')
def test_attach_interface_extended_qos_port(
self, mock_record, mock_get_service, mock_attach
):
instance = self._create_instance_obj()
service = objects.Service()
service.version = 60
mock_get_service.return_value = service
with mock.patch.object(
self.compute_api.network_api, 'show_port',
return_value={
'port': {
constants.RESOURCE_REQUEST: {
'request_groups': [
{
'resources': {'CUSTOM_RESOURCE_CLASS': 42}
}
],
}
}
}
) as mock_show_port:
self.compute_api.attach_interface(
self.context, instance, mock.sentinel.net_id,
mock.sentinel.port_id, mock.sentinel.ip, mock.sentinel.tag)
mock_show_port.assert_called_once_with(
self.context, mock.sentinel.port_id)
mock_get_service.assert_called_once_with(
self.context, instance.host, 'nova-compute')
mock_attach.assert_called_once_with(
self.context, instance=instance, network_id=mock.sentinel.net_id,
port_id=mock.sentinel.port_id, requested_ip=mock.sentinel.ip,
tag=mock.sentinel.tag)
@mock.patch('nova.compute.api.API._record_action_start')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'detach_interface')
def test_detach_interface(self, mock_detach, mock_record):

View File

@ -5412,6 +5412,10 @@ class TestAPI(TestAPIBase):
vif.destroy.assert_called_once_with()
self.assertEqual({}, port_allocation)
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False),
)
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch('nova.network.neutron.API._delete_nic_metadata')
@mock.patch.object(objects.VirtualInterface, 'get_by_uuid')
@ -5458,6 +5462,62 @@ class TestAPI(TestAPIBase):
},
port_allocation)
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=True),
)
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch('nova.network.neutron.API._delete_nic_metadata')
@mock.patch.object(objects.VirtualInterface, 'get_by_uuid')
@mock.patch('nova.network.neutron.get_client')
def test_deallocate_port_for_instance_port_with_extended_allocation(
self, mock_get_client, mock_get_vif_by_uuid, mock_del_nic_meta,
mock_netinfo):
mock_inst = mock.Mock(project_id="proj-1",
availability_zone='zone-1',
uuid='inst-1')
mock_inst.get_network_info.return_value = [
model.VIF(id=uuids.port_uid, preserve_on_delete=True)
]
vif = objects.VirtualInterface()
vif.tag = 'foo'
vif.destroy = mock.MagicMock()
mock_get_vif_by_uuid.return_value = vif
mock_client = mock.Mock()
mock_client.show_port.return_value = {
'port': {
constants.RESOURCE_REQUEST: {
'request_groups': [
{
'id': uuids.group1,
'resources': {
'NET_BW_EGR_KILOBIT_PER_SEC': 1000,
}
}
],
},
'binding:profile': {
'allocation': {uuids.group1: uuids.rp1}
}
}
}
mock_get_client.return_value = mock_client
_, port_allocation = self.api.deallocate_port_for_instance(
mock.sentinel.ctx, mock_inst, uuids.port_id)
self.assertEqual(
{
uuids.rp1: {
"resources": {
'NET_BW_EGR_KILOBIT_PER_SEC': 1000
}
}
},
port_allocation
)
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch('nova.network.neutron.API._delete_nic_metadata')
@mock.patch.object(objects.VirtualInterface, 'get_by_uuid')

View File

@ -1357,36 +1357,53 @@ class TestUtils(TestUtilsBase):
rr = utils.ResourceRequest.from_request_spec(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_from_request_group(self):
rg = objects.RequestGroup.from_port_request(
def test_resource_request_from_request_groups(self):
rgs = objects.RequestGroup.from_extended_port_request(
self.context,
uuids.port_id,
port_resource_request={
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000},
"required": ["CUSTOM_PHYSNET_2",
"CUSTOM_VNIC_TYPE_NORMAL"]
"request_groups": [
{
"id": "group1",
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000},
"required": ["CUSTOM_PHYSNET_2",
"CUSTOM_VNIC_TYPE_NORMAL"]
},
{
"id": "group2",
"resources": {
"NET_PACKET_RATE_KILOPACKET_PER_SEC": 100,
},
"required": ["CUSTOM_VNIC_TYPE_NORMAL"],
}
],
}
)
req_lvl_params = objects.RequestLevelParams(
root_required={"CUSTOM_BLUE"},
root_forbidden={"CUSTOM_DIRTY"},
same_subtree=[[uuids.group1]],
same_subtree=[["group1", "group2"]],
)
rr = utils.ResourceRequest.from_request_group(rg, req_lvl_params)
rr = utils.ResourceRequest.from_request_groups(
rgs, req_lvl_params, 'none')
self.assertEqual(
f'limit=1000&'
f'required{uuids.port_id}='
f'CUSTOM_PHYSNET_2%2C'
f'CUSTOM_VNIC_TYPE_NORMAL&'
f'resources{uuids.port_id}='
f'NET_BW_EGR_KILOBIT_PER_SEC%3A1000%2C'
f'NET_BW_IGR_KILOBIT_PER_SEC%3A1000&'
f'root_required=CUSTOM_BLUE%2C%21CUSTOM_DIRTY&'
f'same_subtree={uuids.group1}',
'group_policy=none&'
'limit=1000&'
'requiredgroup1='
'CUSTOM_PHYSNET_2%2C'
'CUSTOM_VNIC_TYPE_NORMAL&'
'requiredgroup2='
'CUSTOM_VNIC_TYPE_NORMAL&'
'resourcesgroup1='
'NET_BW_EGR_KILOBIT_PER_SEC%3A1000%2C'
'NET_BW_IGR_KILOBIT_PER_SEC%3A1000&'
'resourcesgroup2='
'NET_PACKET_RATE_KILOPACKET_PER_SEC%3A100&'
'root_required=CUSTOM_BLUE%2C%21CUSTOM_DIRTY&'
'same_subtree=group1%2Cgroup2',
rr.to_querystring())
def test_resource_request_add_group_inserts_the_group(self):