Postpone heavy policy check for ports to later
When a port is validated, we check for the user to be the owner of corresponding network, among other things. Sadly, this check requires a plugin call to fetch the network, which goes straight into the database. Now, if there are multiple ports to validate with current policy, and the user is not admin, we fetch the network for each port, f.e. making list operation on ports to scale badly. To avoid that, we should postpone OwnerCheck (tenant_id) based validations that rely on foreign keys, tenant_id:%(network:...)s, to as late as possible. It will make policy checks avoid hitting database in some cases, like when a port is owned by current user. Also, added some unit tests to avoid later regressions: DbOperationBoundMixin now passes user context into API calls. It allows us to trigger policy engine checks when executing listing operations. Change-Id: I99e0c4280b06d8ebab0aa8adc497662c995133ad Closes-Bug: #1513782
This commit is contained in:
parent
13001099a6
commit
4398f14a9a
@ -4,7 +4,7 @@
|
||||
"admin_or_owner": "rule:context_is_admin or rule:owner",
|
||||
"context_is_advsvc": "role:advsvc",
|
||||
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
|
||||
"admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner",
|
||||
"admin_owner_or_network_owner": "rule:owner or rule:admin_or_network_owner",
|
||||
"admin_only": "rule:context_is_admin",
|
||||
"regular_user": "",
|
||||
"shared": "field:networks:shared=True",
|
||||
@ -60,30 +60,30 @@
|
||||
|
||||
"network_device": "field:port:device_owner=~^network:",
|
||||
"create_port": "",
|
||||
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:binding:host_id": "rule:admin_only",
|
||||
"create_port:binding:profile": "rule:admin_only",
|
||||
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:allowed_address_pairs": "rule:admin_or_network_owner",
|
||||
"get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
|
||||
"get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
|
||||
"get_port:queue_id": "rule:admin_only",
|
||||
"get_port:binding:vif_type": "rule:admin_only",
|
||||
"get_port:binding:vif_details": "rule:admin_only",
|
||||
"get_port:binding:host_id": "rule:admin_only",
|
||||
"get_port:binding:profile": "rule:admin_only",
|
||||
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
|
||||
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:binding:host_id": "rule:admin_only",
|
||||
"update_port:binding:profile": "rule:admin_only",
|
||||
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:allowed_address_pairs": "rule:admin_or_network_owner",
|
||||
"delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
|
||||
"delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
|
||||
|
||||
"get_router:ha": "rule:admin_only",
|
||||
"create_router": "rule:regular_user",
|
||||
|
@ -4,7 +4,7 @@
|
||||
"admin_or_owner": "rule:context_is_admin or rule:owner",
|
||||
"context_is_advsvc": "role:advsvc",
|
||||
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
|
||||
"admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner",
|
||||
"admin_owner_or_network_owner": "rule:owner or rule:admin_or_network_owner",
|
||||
"admin_only": "rule:context_is_admin",
|
||||
"regular_user": "",
|
||||
"shared": "field:networks:shared=True",
|
||||
@ -60,30 +60,30 @@
|
||||
|
||||
"network_device": "field:port:device_owner=~^network:",
|
||||
"create_port": "",
|
||||
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:binding:host_id": "rule:admin_only",
|
||||
"create_port:binding:profile": "rule:admin_only",
|
||||
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:allowed_address_pairs": "rule:admin_or_network_owner",
|
||||
"get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
|
||||
"get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
|
||||
"get_port:queue_id": "rule:admin_only",
|
||||
"get_port:binding:vif_type": "rule:admin_only",
|
||||
"get_port:binding:vif_details": "rule:admin_only",
|
||||
"get_port:binding:host_id": "rule:admin_only",
|
||||
"get_port:binding:profile": "rule:admin_only",
|
||||
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
|
||||
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:binding:host_id": "rule:admin_only",
|
||||
"update_port:binding:profile": "rule:admin_only",
|
||||
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:allowed_address_pairs": "rule:admin_or_network_owner",
|
||||
"delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
|
||||
"delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
|
||||
|
||||
"get_router:ha": "rule:admin_only",
|
||||
"create_router": "rule:regular_user",
|
||||
|
@ -59,9 +59,9 @@ DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
|
||||
DEVICE_OWNER_NOT_COMPUTE = constants.DEVICE_OWNER_DHCP
|
||||
|
||||
|
||||
def optional_ctx(obj, fallback):
|
||||
def optional_ctx(obj, fallback, **kwargs):
|
||||
if not obj:
|
||||
return fallback()
|
||||
return fallback(**kwargs)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def context_wrapper():
|
||||
@ -289,10 +289,12 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
|
||||
return req.get_response(self.api)
|
||||
|
||||
def _create_network(self, fmt, name, admin_state_up,
|
||||
arg_list=None, **kwargs):
|
||||
arg_list=None, set_context=False, tenant_id=None,
|
||||
**kwargs):
|
||||
tenant_id = tenant_id or self._tenant_id
|
||||
data = {'network': {'name': name,
|
||||
'admin_state_up': admin_state_up,
|
||||
'tenant_id': self._tenant_id}}
|
||||
'tenant_id': tenant_id}}
|
||||
for arg in (('admin_state_up', 'tenant_id', 'shared',
|
||||
'vlan_transparent',
|
||||
'availability_zone_hints') + (arg_list or ())):
|
||||
@ -300,10 +302,10 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
|
||||
if arg in kwargs:
|
||||
data['network'][arg] = kwargs[arg]
|
||||
network_req = self.new_create_request('networks', data, fmt)
|
||||
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
|
||||
if set_context and tenant_id:
|
||||
# create a specific auth context for this request
|
||||
network_req.environ['neutron.context'] = context.Context(
|
||||
'', kwargs['tenant_id'])
|
||||
'', tenant_id)
|
||||
|
||||
return network_req.get_response(self.api)
|
||||
|
||||
@ -373,9 +375,11 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
|
||||
return subnetpool_res
|
||||
|
||||
def _create_port(self, fmt, net_id, expected_res_status=None,
|
||||
arg_list=None, **kwargs):
|
||||
arg_list=None, set_context=False, tenant_id=None,
|
||||
**kwargs):
|
||||
tenant_id = tenant_id or self._tenant_id
|
||||
data = {'port': {'network_id': net_id,
|
||||
'tenant_id': self._tenant_id}}
|
||||
'tenant_id': tenant_id}}
|
||||
|
||||
for arg in (('admin_state_up', 'device_id',
|
||||
'mac_address', 'name', 'fixed_ips',
|
||||
@ -392,10 +396,10 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
|
||||
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
|
||||
data['port']['device_id'] = device_id
|
||||
port_req = self.new_create_request('ports', data, fmt)
|
||||
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
|
||||
if set_context and tenant_id:
|
||||
# create a specific auth context for this request
|
||||
port_req.environ['neutron.context'] = context.Context(
|
||||
'', kwargs['tenant_id'])
|
||||
'', tenant_id)
|
||||
|
||||
port_res = port_req.get_response(self.api)
|
||||
if expected_res_status:
|
||||
@ -595,7 +599,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
|
||||
ipv6_address_mode=None,
|
||||
tenant_id=None,
|
||||
set_context=False):
|
||||
with optional_ctx(network, self.network) as network_to_use:
|
||||
with optional_ctx(network, self.network,
|
||||
set_context=set_context,
|
||||
tenant_id=tenant_id) as network_to_use:
|
||||
subnet = self._make_subnet(fmt or self.fmt,
|
||||
network_to_use,
|
||||
gateway_ip,
|
||||
@ -621,10 +627,16 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
|
||||
yield subnetpool
|
||||
|
||||
@contextlib.contextmanager
|
||||
def port(self, subnet=None, fmt=None, **kwargs):
|
||||
with optional_ctx(subnet, self.subnet) as subnet_to_use:
|
||||
def port(self, subnet=None, fmt=None, set_context=False, tenant_id=None,
|
||||
**kwargs):
|
||||
with optional_ctx(
|
||||
subnet, self.subnet,
|
||||
set_context=set_context, tenant_id=tenant_id) as subnet_to_use:
|
||||
net_id = subnet_to_use['subnet']['network_id']
|
||||
port = self._make_port(fmt or self.fmt, net_id, **kwargs)
|
||||
port = self._make_port(
|
||||
fmt or self.fmt, net_id,
|
||||
set_context=set_context, tenant_id=tenant_id,
|
||||
**kwargs)
|
||||
yield port
|
||||
|
||||
def _test_list_with_sort(self, resource,
|
||||
@ -5946,6 +5958,8 @@ class TestNetworks(testlib_api.SqlTestCase):
|
||||
class DbOperationBoundMixin(object):
|
||||
"""Mixin to support tests that assert constraints on DB operations."""
|
||||
|
||||
admin = True
|
||||
|
||||
def setUp(self, *args, **kwargs):
|
||||
super(DbOperationBoundMixin, self).setUp(*args, **kwargs)
|
||||
self._db_execute_count = 0
|
||||
@ -5958,9 +5972,20 @@ class DbOperationBoundMixin(object):
|
||||
self.addCleanup(event.remove, engine, 'after_execute',
|
||||
_event_incrementer)
|
||||
|
||||
def _get_context(self):
|
||||
if self.admin:
|
||||
return context.get_admin_context()
|
||||
return context.Context('', 'fake')
|
||||
|
||||
def get_api_kwargs(self):
|
||||
context_ = self._get_context()
|
||||
return {'set_context': True, 'tenant_id': context_.tenant}
|
||||
|
||||
def _list_and_count_queries(self, resource):
|
||||
self._db_execute_count = 0
|
||||
self.assertNotEqual([], self._list(resource))
|
||||
self.assertNotEqual([],
|
||||
self._list(resource,
|
||||
neutron_context=self._get_context()))
|
||||
query_count = self._db_execute_count
|
||||
# sanity check to make sure queries are being observed
|
||||
self.assertNotEqual(0, query_count)
|
||||
|
@ -58,9 +58,11 @@ class DnsExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2):
|
||||
super(DnsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
|
||||
|
||||
def _create_port(self, fmt, net_id, expected_res_status=None,
|
||||
arg_list=None, **kwargs):
|
||||
arg_list=None, set_context=False, tenant_id=None,
|
||||
**kwargs):
|
||||
tenant_id = tenant_id or self._tenant_id
|
||||
data = {'port': {'network_id': net_id,
|
||||
'tenant_id': self._tenant_id}}
|
||||
'tenant_id': tenant_id}}
|
||||
|
||||
for arg in (('admin_state_up', 'device_id',
|
||||
'mac_address', 'name', 'fixed_ips',
|
||||
@ -77,10 +79,10 @@ class DnsExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2):
|
||||
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
|
||||
data['port']['device_id'] = device_id
|
||||
port_req = self.new_create_request('ports', data, fmt)
|
||||
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
|
||||
if set_context and tenant_id:
|
||||
# create a specific auth context for this request
|
||||
port_req.environ['neutron.context'] = context.Context(
|
||||
'', kwargs['tenant_id'])
|
||||
'', tenant_id)
|
||||
|
||||
port_res = port_req.get_response(self.api)
|
||||
if expected_res_status:
|
||||
|
@ -328,6 +328,7 @@ class L3NatTestCaseMixin(object):
|
||||
def _create_router(self, fmt, tenant_id, name=None,
|
||||
admin_state_up=None, set_context=False,
|
||||
arg_list=None, **kwargs):
|
||||
tenant_id = tenant_id or _uuid()
|
||||
data = {'router': {'tenant_id': tenant_id}}
|
||||
if name:
|
||||
data['router']['name'] = name
|
||||
@ -405,7 +406,7 @@ class L3NatTestCaseMixin(object):
|
||||
|
||||
@contextlib.contextmanager
|
||||
def router(self, name='router1', admin_state_up=True,
|
||||
fmt=None, tenant_id=_uuid(),
|
||||
fmt=None, tenant_id=None,
|
||||
external_gateway_info=None, set_context=False,
|
||||
**kwargs):
|
||||
router = self._make_router(fmt or self.fmt, tenant_id, name,
|
||||
@ -419,9 +420,11 @@ class L3NatTestCaseMixin(object):
|
||||
|
||||
def _create_floatingip(self, fmt, network_id, port_id=None,
|
||||
fixed_ip=None, set_context=False,
|
||||
floating_ip=None, subnet_id=False):
|
||||
floating_ip=None, subnet_id=False,
|
||||
tenant_id=None):
|
||||
tenant_id = tenant_id or self._tenant_id
|
||||
data = {'floatingip': {'floating_network_id': network_id,
|
||||
'tenant_id': self._tenant_id}}
|
||||
'tenant_id': tenant_id}}
|
||||
if port_id:
|
||||
data['floatingip']['port_id'] = port_id
|
||||
if fixed_ip:
|
||||
@ -433,17 +436,18 @@ class L3NatTestCaseMixin(object):
|
||||
if subnet_id:
|
||||
data['floatingip']['subnet_id'] = subnet_id
|
||||
floatingip_req = self.new_create_request('floatingips', data, fmt)
|
||||
if set_context and self._tenant_id:
|
||||
if set_context and tenant_id:
|
||||
# create a specific auth context for this request
|
||||
floatingip_req.environ['neutron.context'] = context.Context(
|
||||
'', self._tenant_id)
|
||||
'', tenant_id)
|
||||
return floatingip_req.get_response(self.ext_api)
|
||||
|
||||
def _make_floatingip(self, fmt, network_id, port_id=None,
|
||||
fixed_ip=None, set_context=False, floating_ip=None,
|
||||
http_status=exc.HTTPCreated.code):
|
||||
fixed_ip=None, set_context=False, tenant_id=None,
|
||||
floating_ip=None, http_status=exc.HTTPCreated.code):
|
||||
res = self._create_floatingip(fmt, network_id, port_id,
|
||||
fixed_ip, set_context, floating_ip)
|
||||
fixed_ip, set_context, floating_ip,
|
||||
tenant_id=tenant_id)
|
||||
self.assertEqual(http_status, res.status_int)
|
||||
return self.deserialize(fmt, res)
|
||||
|
||||
@ -459,15 +463,20 @@ class L3NatTestCaseMixin(object):
|
||||
|
||||
@contextlib.contextmanager
|
||||
def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None,
|
||||
set_context=False):
|
||||
with self.subnet(cidr='11.0.0.0/24') as public_sub:
|
||||
set_context=False, tenant_id=None):
|
||||
with self.subnet(cidr='11.0.0.0/24',
|
||||
set_context=set_context,
|
||||
tenant_id=tenant_id) as public_sub:
|
||||
self._set_net_external(public_sub['subnet']['network_id'])
|
||||
private_port = None
|
||||
if port_id:
|
||||
private_port = self._show('ports', port_id)
|
||||
with test_db_base_plugin_v2.optional_ctx(private_port,
|
||||
self.port) as private_port:
|
||||
with self.router() as r:
|
||||
with test_db_base_plugin_v2.optional_ctx(
|
||||
private_port, self.port,
|
||||
set_context=set_context,
|
||||
tenant_id=tenant_id) as private_port:
|
||||
with self.router(set_context=set_context,
|
||||
tenant_id=tenant_id) as r:
|
||||
sid = private_port['port']['fixed_ips'][0]['subnet_id']
|
||||
private_sub = {'subnet': {'id': sid}}
|
||||
floatingip = None
|
||||
@ -484,6 +493,7 @@ class L3NatTestCaseMixin(object):
|
||||
public_sub['subnet']['network_id'],
|
||||
port_id=private_port['port']['id'],
|
||||
fixed_ip=fixed_ip,
|
||||
tenant_id=tenant_id,
|
||||
set_context=set_context)
|
||||
yield floatingip
|
||||
|
||||
@ -3048,34 +3058,42 @@ class TestL3DbOperationBounds(test_db_base_plugin_v2.DbOperationBoundMixin,
|
||||
super(TestL3DbOperationBounds, self).setUp()
|
||||
ext_mgr = L3TestExtensionManager()
|
||||
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
|
||||
self.kwargs = self.get_api_kwargs()
|
||||
|
||||
def test_router_list_queries_constant(self):
|
||||
with self.subnet() as s:
|
||||
with self.subnet(**self.kwargs) as s:
|
||||
self._set_net_external(s['subnet']['network_id'])
|
||||
|
||||
def router_maker():
|
||||
ext_info = {'network_id': s['subnet']['network_id']}
|
||||
self._create_router(self.fmt, _uuid(),
|
||||
self._create_router(self.fmt,
|
||||
arg_list=('external_gateway_info',),
|
||||
external_gateway_info=ext_info)
|
||||
external_gateway_info=ext_info,
|
||||
**self.kwargs)
|
||||
|
||||
self._assert_object_list_queries_constant(router_maker, 'routers')
|
||||
|
||||
def test_floatingip_list_queries_constant(self):
|
||||
with self.floatingip_with_assoc() as flip:
|
||||
with self.floatingip_with_assoc(**self.kwargs) as flip:
|
||||
internal_port = self._show('ports', flip['floatingip']['port_id'])
|
||||
internal_net_id = internal_port['port']['network_id']
|
||||
|
||||
def float_maker():
|
||||
port = self._make_port(self.fmt, internal_net_id)
|
||||
port = self._make_port(
|
||||
self.fmt, internal_net_id, **self.kwargs)
|
||||
self._make_floatingip(
|
||||
self.fmt, flip['floatingip']['floating_network_id'],
|
||||
port_id=port['port']['id'])
|
||||
port_id=port['port']['id'],
|
||||
**self.kwargs)
|
||||
|
||||
self._assert_object_list_queries_constant(float_maker,
|
||||
'floatingips')
|
||||
|
||||
|
||||
class TestL3DbOperationBoundsTenant(TestL3DbOperationBounds):
|
||||
admin = False
|
||||
|
||||
|
||||
class L3NatDBTestCaseMixin(object):
|
||||
"""L3_NAT_dbonly_mixin specific test cases."""
|
||||
|
||||
|
@ -466,18 +466,22 @@ class TestMl2DbOperationBounds(test_plugin.DbOperationBoundMixin,
|
||||
stay the same.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestMl2DbOperationBounds, self).setUp()
|
||||
self.kwargs = self.get_api_kwargs()
|
||||
|
||||
def make_network(self):
|
||||
return self._make_network(self.fmt, 'name', True)
|
||||
return self._make_network(self.fmt, 'name', True, **self.kwargs)
|
||||
|
||||
def make_subnet(self):
|
||||
net = self.make_network()
|
||||
setattr(self, '_subnet_count', getattr(self, '_subnet_count', 0) + 1)
|
||||
cidr = '1.%s.0.0/24' % self._subnet_count
|
||||
return self._make_subnet(self.fmt, net, None, cidr)
|
||||
return self._make_subnet(self.fmt, net, None, cidr, **self.kwargs)
|
||||
|
||||
def make_port(self):
|
||||
net = self.make_network()
|
||||
return self._make_port(self.fmt, net['network']['id'])
|
||||
return self._make_port(self.fmt, net['network']['id'], **self.kwargs)
|
||||
|
||||
def test_network_list_queries_constant(self):
|
||||
self._assert_object_list_queries_constant(self.make_network,
|
||||
@ -490,6 +494,10 @@ class TestMl2DbOperationBounds(test_plugin.DbOperationBoundMixin,
|
||||
self._assert_object_list_queries_constant(self.make_port, 'ports')
|
||||
|
||||
|
||||
class TestMl2DbOperationBoundsTenant(TestMl2DbOperationBounds):
|
||||
admin = False
|
||||
|
||||
|
||||
class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
|
||||
|
||||
def test_update_port_status_build(self):
|
||||
|
Loading…
Reference in New Issue
Block a user