[S-RBAC] Switch to new policies by default

As part of the Secure RBAC community goal, we should switch options
"enforce_new_defaults" and "enforce_scope" to be True by default.
It will be still possible to fallback to old policy rules by configuring
those config options to False in Neutron config.

Change-Id: I09c0026ccf87e6c0bb1fa59165c03dc508fba6fa
This commit is contained in:
Slawek Kaplonski 2023-04-07 10:10:39 +02:00
parent 05ba4257de
commit 670cc383e0
69 changed files with 1627 additions and 1423 deletions

View File

@ -51,12 +51,15 @@ _RESOURCE_FOREIGN_KEYS = {
'security_groups': 'security_group_id'
}
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
# TODO(slaweq): Remove overriding the default value of config options
# 'policy_file', 'enforce_scope', and 'enforce_new_defaults' once
# oslo_policy change their default value to what is overridden here.
DEFAULT_POLICY_FILE = 'policy.yaml'
opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
opts.set_defaults(
cfg.CONF,
DEFAULT_POLICY_FILE,
enforce_scope=True,
enforce_new_defaults=True)
def reset():

View File

@ -43,8 +43,11 @@ class InjectContext(base.ConfigurableMiddleware):
# Determine the tenant
tenant_id = req.headers.get('X_PROJECT_ID')
# Suck out the roles
roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')]
roles = ['member', 'reader']
# Suck out the roles if any are set
custom_roles = req.headers.get('X_ROLES')
if custom_roles:
roles = [r.strip() for r in custom_roles.split(',')]
# Human-friendly names
tenant_name = req.headers.get('X_PROJECT_NAME')

View File

@ -226,6 +226,7 @@ class TestOVNClientQosExtensionEndToEnd(TestOVNClientQosExtensionBase):
arg_list = arg_list + (pnet.PHYSICAL_NETWORK,)
net_arg[pnet.PHYSICAL_NETWORK] = physnet
network = self._make_network(self.fmt, name, True,
as_admin=True,
arg_list=arg_list, **net_arg)
if cidr:
self._make_subnet(self.fmt, network, gateway, cidr,

View File

@ -55,9 +55,9 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
return row
def _create_network(self, name, external=False):
data = {'network': {'name': name, 'tenant_id': self._tenant_id,
extnet_apidef.EXTERNAL: external}}
req = self.new_create_request('networks', data, self.fmt)
data = {'network': {'name': name, extnet_apidef.EXTERNAL: external}}
req = self.new_create_request('networks', data, self.fmt,
as_admin=True)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['network']
@ -70,7 +70,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
def _create_port(self, name, net_id, security_groups=None,
device_owner=None):
data = {'port': {'name': name,
'tenant_id': self._tenant_id,
'network_id': net_id}}
if security_groups is not None:
@ -125,7 +124,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
data = {'subnet': {'name': name,
'network_id': net_id,
'ip_version': ip_version,
'tenant_id': self._tenant_id,
'cidr': cidr,
'enable_dhcp': True}}
data['subnet'].update(kwargs)
@ -146,10 +144,13 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
return row
def _create_router(self, name, external_gateway_info=None):
data = {'router': {'name': name, 'tenant_id': self._tenant_id}}
data = {'router': {'name': name}}
as_admin = False
if external_gateway_info is not None:
data['router']['external_gateway_info'] = external_gateway_info
req = self.new_create_request('routers', data, self.fmt)
as_admin = bool(external_gateway_info.get('enable_snat'))
req = self.new_create_request('routers', data, self.fmt,
as_admin=as_admin)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['router']
@ -167,7 +168,6 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
def _create_security_group(self):
data = {'security_group': {'name': 'sgtest',
'tenant_id': self._tenant_id,
'description': 'SpongeBob Rocks!'}}
req = self.new_create_request('security-groups', data, self.fmt)
res = req.get_response(self.api)
@ -183,8 +183,7 @@ class _TestMaintenanceHelper(base.TestOVNFunctionalBase):
'protocol': n_const.PROTO_NAME_TCP,
'ethertype': n_const.IPv4,
'port_range_min': 22,
'port_range_max': 22,
'tenant_id': self._tenant_id}}
'port_range_max': 22}}
req = self.new_create_request('security-group-rules', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group_rule']
@ -772,8 +771,8 @@ class TestMaintenance(_TestMaintenanceHelper):
p1 = self._create_port('testp1', net1['id'])
logical_ip = p1['fixed_ips'][0]['ip_address']
fip_info = {'floatingip': {
'description': 'test_fip',
'tenant_id': self._tenant_id,
'description': 'test_fip',
'floating_network_id': ext_net['id'],
'port_id': p1['id'],
'fixed_ip_address': logical_ip}}

View File

@ -365,7 +365,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
port_req.get_response(self.api)
# External network and subnet
e1 = self._make_network(self.fmt, 'e1', True,
e1 = self._make_network(self.fmt, 'e1', True, as_admin=True,
arg_list=('router:external',
'provider:network_type',
'provider:physical_network'),
@ -1608,20 +1608,23 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
net = self.deserialize(self.fmt, res)['network']
self._create_subnet(self.fmt, net['id'], '10.0.0.0/24')
res = self._create_qos_policy(self.fmt, 'qos_maxbw')
res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_maxbw = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
max_kbps=1000, max_burst_kbps=800)
max_kbps=1000, max_burst_kbps=800,
is_admin=True)
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
direction=constants.INGRESS_DIRECTION,
max_kbps=700, max_burst_kbps=600)
max_kbps=700, max_burst_kbps=600,
is_admin=True)
res = self._create_qos_policy(self.fmt, 'qos_maxbw')
res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_dscp = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_dscp['id'],
qos_const.RULE_TYPE_DSCP_MARKING, dscp_mark=14)
qos_const.RULE_TYPE_DSCP_MARKING, dscp_mark=14,
is_admin=True)
res = self._create_port(
self.fmt, net['id'], arg_list=('qos_policy_id', ),
@ -1677,7 +1680,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
{'floatingip': body})
def test_sync_fip_qos_policies(self):
res = self._create_network(self.fmt, 'n1_ext', True,
res = self._create_network(self.fmt, 'n1_ext', True, as_admin=True,
arg_list=('router:external', ),
**{'router:external': True})
net_ext = self.deserialize(self.fmt, res)['network']
@ -1687,15 +1690,17 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
net_int = self.deserialize(self.fmt, res)['network']
self._create_subnet(self.fmt, net_int['id'], '10.10.0.0/24')
res = self._create_qos_policy(self.fmt, 'qos_maxbw')
res = self._create_qos_policy(self.fmt, 'qos_maxbw', is_admin=True)
qos_maxbw = self.deserialize(self.fmt, res)['policy']
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
max_kbps=1000, max_burst_kbps=800)
max_kbps=1000, max_burst_kbps=800,
is_admin=True)
self._create_qos_rule(self.fmt, qos_maxbw['id'],
qos_const.RULE_TYPE_BANDWIDTH_LIMIT,
direction=constants.INGRESS_DIRECTION,
max_kbps=700, max_burst_kbps=600)
max_kbps=700, max_burst_kbps=600,
is_admin=True)
# Create a router with net_ext as GW network and net_int as internal
# one, and a floating IP on the external network.
@ -1750,7 +1755,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
self._validate_qos_records()
def test_fip_nat_revert_to_stateful(self):
res = self._create_network(self.fmt, 'n1_ext', True,
res = self._create_network(self.fmt, 'n1_ext', True, as_admin=True,
arg_list=('router:external', ),
**{'router:external': True})
net_ext = self.deserialize(self.fmt, res)['network']

View File

@ -103,12 +103,13 @@ class TestNBDbMonitor(base.TestOVNFunctionalBase):
allowedaddresspairs.ADDRESS_PAIRS: allowed_address_pairs
}
port_res = self._create_port(self.fmt, self.net['network']['id'],
is_admin=True,
arg_list=arg_list, **host_arg)
port = self.deserialize(self.fmt, port_res)['port']
return port
def _create_fip(self, port, fip_address):
e1 = self._make_network(self.fmt, 'e1', True,
e1 = self._make_network(self.fmt, 'e1', True, as_admin=True,
arg_list=('router:external',
'provider:network_type',
'provider:physical_network'),
@ -403,7 +404,8 @@ class TestSBDbMonitor(base.TestOVNFunctionalBase, test_l3.L3NatTestCaseMixin):
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
**kwargs)
self._make_subnet(self.fmt, ext_net, '10.251.0.1', '10.251.0.0/24',
enable_dhcp=True)
router = self._make_router(self.fmt, self._tenant_id)

View File

@ -101,20 +101,21 @@ class TestPortBinding(base.TestOVNFunctionalBase):
'network_id': self.n1['network']['id'],
'tenant_id': self._tenant_id})
port_req = self.new_create_request('ports', port_data, self.fmt)
port_req = self.new_create_request('ports', port_data, self.fmt,
as_admin=True)
port_res = port_req.get_response(self.api)
p = self.deserialize(self.fmt, port_res)
port_id = p['port']['id']
else:
port_req = self.new_update_request('ports', port_data, port_id,
self.fmt)
self.fmt, as_admin=True)
port_res = port_req.get_response(self.api)
self.deserialize(self.fmt, port_res)
return port_id
def _port_show(self, port_id):
port_req = self.new_show_request('ports', port_id)
port_req = self.new_show_request('ports', port_id, as_admin=True)
port_res = port_req.get_response(self.api)
return self.deserialize(self.fmt, port_res)
@ -715,13 +716,13 @@ class TestExternalPorts(base.TestOVNFunctionalBase):
def _test_external_port_create_switchdev(self, vnic_type):
port_data = {
'port': {'network_id': self.n1['network']['id'],
'tenant_id': self._tenant_id,
portbindings.VNIC_TYPE: vnic_type,
ovn_const.OVN_PORT_BINDING_PROFILE: {
ovn_const.PORT_CAP_PARAM: [
ovn_const.PORT_CAP_SWITCHDEV]}}}
port_req = self.new_create_request('ports', port_data, self.fmt)
port_req = self.new_create_request('ports', port_data, self.fmt,
as_admin=True)
port_res = port_req.get_response(self.api)
port = self.deserialize(self.fmt, port_res)['port']
@ -769,7 +770,8 @@ class TestExternalPorts(base.TestOVNFunctionalBase):
ovn_const.PORT_CAP_PARAM: [
ovn_const.PORT_CAP_SWITCHDEV]}}}
port_req = self.new_update_request(
'ports', port_upt_data, port['id'], self.fmt)
'ports', port_upt_data, port['id'], self.fmt,
as_admin=True)
port_res = port_req.get_response(self.api)
port = self.deserialize(self.fmt, port_res)['port']
@ -948,7 +950,7 @@ class TestProvnetPorts(base.TestOVNFunctionalBase):
def test_network_segments_localnet_ports(self):
n1 = self._make_network(
self.fmt, 'n1', True,
self.fmt, 'n1', True, as_admin=True,
arg_list=('provider:network_type',
'provider:segmentation_id',
'provider:physical_network'),

View File

@ -50,6 +50,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
# Note: Port creation invokes _bind_port_if_needed(),
@ -65,6 +66,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
# Since the agent is dead, expect binding to fail
@ -88,6 +90,7 @@ class TestMl2PortBinding(ml2_test_base.ML2TestFramework,
with self.subnet(network=network) as subnet:
with self.port(
subnet=subnet, device_owner=DEVICE_OWNER_COMPUTE,
is_admin=True,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**self.host_args) as port:
pass

View File

@ -134,21 +134,25 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
self.subnet(cidr='30.0.0.0/24') as subnet2, \
self.subnet(cidr='40.0.0.0/24') as subnet3, \
self.port(subnet=subnet1,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}), \
self.port(subnet=subnet2,
is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST2}), \
self.port(subnet=subnet3,
is_admin=True,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST3}):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client,
'prepare') as mock_prepare:
# add external gateway to router
@ -231,7 +235,7 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.subnet() as subnet, \
self.network(**kwargs) as ext_net, \
self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net, cidr='20.0.0.0/24'):
gw_info = {'network_id': ext_net['network']['id']}
self.l3_plugin.update_router(
@ -256,7 +260,7 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
router = self._create_router(distributed=True, ha=True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as ext_net, \
with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net), \
self.subnet(cidr='20.0.0.0/24') as subnet, \
self.port(subnet=subnet,
@ -300,7 +304,8 @@ class L3DvrHATestCase(test_l3_dvr_router_plugin.L3DvrTestCase):
def _create_external_network(self):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)

View File

@ -24,7 +24,6 @@ from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from neutron_lib import constants
from neutron_lib import context
from neutron.api.rpc.handlers import l3_rpc
from neutron.tests.common import helpers
@ -112,7 +111,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
subnet2 = self._make_subnet(
self.fmt, net1, '10.2.0.1', '10.2.0.0/24', enable_dhcp=True)
ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True)
# Create first router and add an interface
@ -170,7 +170,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='20.0.0.0/24') as subnet2:
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as ext_net, \
with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net,
cidr='30.0.0.0/24'):
router = self._create_router()
@ -287,7 +287,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.fmt, net1, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
subnet2 = self._make_subnet(
self.fmt, net2, '10.1.0.1', '10.1.0.0/24', enable_dhcp=True)
ext_net = self._make_network(self.fmt, 'ext_net', True, **kwargs)
ext_net = self._make_network(self.fmt, 'ext_net', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '20.0.0.1', '20.0.0.0/24', enable_dhcp=True)
# Create first router and add an interface
@ -358,7 +359,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
router = self._create_router(distributed=dvr)
self.l3_plugin.update_router(
@ -447,7 +449,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
router1 = self._create_router(distributed=dvr)
router2 = self._create_router(distributed=dvr)
@ -559,7 +562,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
router = self._create_router(distributed=dvr)
self.l3_plugin.update_router(
@ -636,7 +640,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
def test_router_with_ipv4_and_multiple_ipv6_on_same_network(self):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
ext_net = self._make_network(self.fmt, '', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.0.0.1', '10.0.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@ -710,7 +715,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
ext_net = self._make_network(self.fmt, '', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@ -820,7 +826,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
ext_net = self._make_network(self.fmt, '', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@ -904,7 +911,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
ext_net = self._make_network(self.fmt, '', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@ -982,7 +990,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
ext_net = self._make_network(self.fmt, '', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@ -1067,7 +1076,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
ext_net = self._make_network(self.fmt, '', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@ -1200,7 +1210,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
private_net1 = self._make_network(self.fmt, 'net1', True)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
ext_net = self._make_network(self.fmt, '', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@ -1243,7 +1254,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
ext_net = self._make_network(self.fmt, '', True, as_admin=True,
**kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=constants.IP_VERSION_4, enable_dhcp=True)
@ -1382,7 +1394,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as ext_net,\
with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet):
@ -1412,7 +1424,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as ext_net,\
with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet,
@ -1450,7 +1462,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
helpers.register_l3_agent(
host=HOST, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
with self.network(shared=True) as net,\
with self.network(as_admin=True, shared=True) as net,\
self.subnet(network=net) as subnet,\
self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
@ -1465,9 +1477,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with mock.patch.object(self.l3_plugin.l3_rpc_notifier,
'router_removed_from_agent') as remove_mock:
ctx = context.Context(
'', non_admin_tenant) if non_admin_port else self.context
self._delete('ports', port['port']['id'], neutron_context=ctx)
self._delete('ports', port['port']['id'],
tenant_id=non_admin_tenant)
remove_mock.assert_called_once_with(
mock.ANY, router['id'], HOST)
@ -1501,13 +1512,15 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with self.subnet() as ext_subnet,\
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.port(subnet=subnet1,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}) as vm_port:
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
# add external gateway to router
self.l3_plugin.update_router(
self.context, router['id'],
@ -1576,21 +1589,25 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.subnet(cidr='40.0.0.0/24') as subnet3,\
self.port(subnet=subnet1,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}),\
self.port(subnet=subnet2,
is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST2}),\
self.port(subnet=subnet3,
is_admin=True,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST3}):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
with mock.patch.object(self.l3_plugin.l3_rpc_notifier.client,
'prepare') as mock_prepare:
@ -1661,7 +1678,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.subnet() as subnet,\
self.network(**kwargs) as ext_net,\
self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net, cidr='20.0.0.0/24'):
gw_info = {'network_id': ext_net['network']['id']}
request_body = {
@ -1693,7 +1710,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
router = self._create_router()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as ext_net,\
with self.network(as_admin=True, **kwargs) as ext_net,\
self.subnet(network=ext_net),\
self.subnet(cidr='20.0.0.0/24') as subnet,\
self.port(subnet=subnet,
@ -1796,10 +1813,12 @@ class L3DvrTestCase(L3DvrTestCaseBase):
with self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.port(subnet=subnet1,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: host}),\
self.port(subnet=subnet2,
is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@ -1834,10 +1853,12 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='20.0.0.0/24') as subnet1,\
self.subnet(cidr='30.0.0.0/24') as subnet2,\
self.port(subnet=subnet1,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: host}),\
self.port(subnet=subnet2,
is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@ -1883,7 +1904,8 @@ class L3DvrTestCase(L3DvrTestCaseBase):
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
# add external gateway to router
self.l3_plugin.update_router(
self.context, router3['id'],
@ -1915,6 +1937,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.port(subnet=wan_subnet) as wan_port1,\
self.port(subnet=wan_subnet) as wan_port2,\
self.port(subnet=subnet1,
is_admin=True,
device_owner=constants.DEVICE_OWNER_DHCP,
arg_list=arg_list,
**{portbindings.HOST_ID: host}):
@ -1958,6 +1981,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
arg_list = (portbindings.HOST_ID,)
with self.subnet() as subnet,\
self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=arg_list,
**{portbindings.HOST_ID: HOST1}):
@ -2067,7 +2091,7 @@ class L3DvrTestCaseMigration(L3DvrTestCaseBase):
with self.subnet() as subnet1:
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as ext_net, \
with self.network(as_admin=True, **kwargs) as ext_net, \
self.subnet(network=ext_net,
cidr='30.0.0.0/24'):
router = self._create_router(distributed=False)

View File

@ -28,7 +28,7 @@ class LogApiTestCaseBase(functional_base.TestOVNFunctionalBase):
super().setUp()
self.log_driver = self.mech_driver.log_driver
self._check_is_supported()
self.ctxt = context.Context('admin', 'fake_tenant')
self.ctxt = context.Context('admin', self._tenant_id)
def _check_is_supported(self):
if not self.log_driver.network_logging_supported(self.nb_api):
@ -110,7 +110,6 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
def _create_port(self, name, net_id, security_groups):
data = {'port': {'name': name,
'tenant_id': self.ctxt.project_id,
'network_id': net_id,
'security_groups': security_groups}}
req = self.new_create_request('ports', data, self.fmt)
@ -118,8 +117,7 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
return self.deserialize(self.fmt, res)['port']['id']
def _create_security_group(self, name):
data = {'security_group': {'name': name,
'tenant_id': self.ctxt.project_id}}
data = {'security_group': {'name': name}}
req = self.new_create_request('security-groups', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group']['id']
@ -130,8 +128,7 @@ class LogApiTestCaseComplex(LogApiTestCaseBase):
'protocol': n_const.PROTO_NAME_TCP,
'ethertype': n_const.IPv4,
'port_range_min': tcp_port,
'port_range_max': tcp_port,
'tenant_id': self.ctxt.project_id}}
'port_range_max': tcp_port}}
req = self.new_create_request('security-group-rules', data, self.fmt)
res = req.get_response(self.api)
return self.deserialize(self.fmt, res)['security_group_rule']['id']

View File

@ -63,7 +63,7 @@ class TestRouter(base.TestOVNFunctionalBase):
if physnet:
arg_list = arg_list + (pnet.PHYSICAL_NETWORK,)
net_arg[pnet.PHYSICAL_NETWORK] = physnet
network = self._make_network(self.fmt, name, True,
network = self._make_network(self.fmt, name, True, as_admin=True,
arg_list=arg_list, **net_arg)
if cidr:
self._make_subnet(self.fmt, network, gateway, cidr,

View File

@ -98,7 +98,8 @@ class PortForwardingTestCase(PortForwardingTestCaseBase):
def _prepare_env(self):
self.router = self._create_router(distributed=True)
self.ext_net = self._create_network(
self.fmt, 'ext-net', True, arg_list=("router:external",),
self.fmt, 'ext-net', True, as_admin=True,
arg_list=("router:external",),
**{"router:external": True}).json['network']
self.ext_subnet = self._create_subnet(
self.fmt, self.ext_net['id'], '172.24.2.0/24').json['subnet']

View File

@ -55,24 +55,16 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertNotIn(portbindings.VIF_TYPE, port)
self.assertNotIn(portbindings.VIF_DETAILS, port)
def _get_non_admin_context(self):
return context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False)
def test_port_vif_details(self):
with self.port(name='name') as port:
with self.port(is_admin=True, name='name') as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = self._get_non_admin_context()
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
non_admin_port = self._show('ports', port_id)['port']
self._check_response_no_portbindings(non_admin_port)
def test_ports_vif_details(self):
@ -83,9 +75,7 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertEqual(len(ports), 2)
for port in ports:
self._check_response_portbindings(port)
# By default user is admin - now test non admin user
ctx = self._get_non_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
ports = self._list('ports')['ports']
self.assertEqual(len(ports), 2)
for non_admin_port in ports:
self._check_response_no_portbindings(non_admin_port)
@ -97,11 +87,12 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_create_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
with self.port(is_admin=True,
arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
port_id = port['port']['id']
self._check_port_binding_profile(port['port'], profile)
port = self._show('ports', port_id)
port = self._show('ports', port_id, as_admin=True)
self._check_port_binding_profile(port['port'], profile)
def test_create_port_binding_profile_none(self):
@ -112,14 +103,13 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_update_port_binding_profile(self, profile):
profile_arg = {portbindings.PROFILE: profile}
with self.port() as port:
with self.port(is_admin=True) as port:
self._check_port_binding_profile(port['port'])
port_id = port['port']['id']
ctx = context.get_admin_context()
port = self._update('ports', port_id, {'port': profile_arg},
neutron_context=ctx)['port']
as_admin=True)['port']
self._check_port_binding_profile(port, profile)
port = self._show('ports', port_id)['port']
port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_profile_none(self):
@ -131,18 +121,16 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def test_port_create_portinfo_non_admin(self):
profile_arg = {portbindings.PROFILE: {dummy_plugin.RESOURCE_NAME:
dummy_plugin.RESOURCE_NAME}}
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
with self.port(subnet=subnet1,
set_context=True, tenant_id='test'):
with self.port(subnet=subnet1):
pass
# fail with binding:profile
try:
with self.port(subnet=subnet1,
expected_res_status=403,
arg_list=(portbindings.PROFILE,),
set_context=True, tenant_id='test',
**profile_arg):
pass
except exc.HTTPClientError:
@ -156,11 +144,9 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
with self.port(subnet=subnet1) as port:
# By default user is admin - now test non admin user
port_id = port['port']['id']
ctx = self._get_non_admin_context()
port = self._update('ports', port_id,
{'port': profile_arg},
expected_code=exc.HTTPForbidden.code,
neutron_context=ctx)
expected_code=exc.HTTPForbidden.code)
class PortBindingsHostTestCaseMixin(object):
@ -192,74 +178,70 @@ class PortBindingsHostTestCaseMixin(object):
def test_port_vif_host(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
with self.port(name='name', is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port_id = port['port']['id']
# Check a response of create_port
self._check_response_portbindings_host(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings_host(port)
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False)
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
non_admin_port = self._show('ports', port_id)['port']
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name1',
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg), self.port(name='name2'):
ctx = context.get_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
self._check_response_portbindings_host(port)
else:
self.assertFalse(port[portbindings.HOST_ID])
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False)
ports = self._list('ports', neutron_context=ctx)['ports']
ports = self._list('ports')['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_no_portbindings_host(non_admin_port)
def test_ports_vif_host_update(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name1', arg_list=(portbindings.HOST_ID,),
with self.port(name='name1', is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1, self.port(name='name2') as port2:
data = {'port': {portbindings.HOST_ID: 'testhosttemp'}}
req = self.new_update_request('ports', data, port1['port']['id'])
req = self.new_update_request('ports', data, port1['port']['id'],
as_admin=True)
req.get_response(self.api)
req = self.new_update_request('ports', data, port2['port']['id'])
ctx = context.get_admin_context()
req = self.new_update_request('ports', data, port2['port']['id'],
as_admin=True)
req.get_response(self.api)
ports = self._list('ports', neutron_context=ctx)['ports']
ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
self.assertEqual('testhosttemp', port[portbindings.HOST_ID])
def test_ports_vif_non_host_update(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
with self.port(name='name', is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
data = {'port': {'admin_state_up': False}}
req = self.new_update_request('ports', data, port['port']['id'])
req = self.new_update_request('ports', data, port['port']['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][portbindings.HOST_ID],
res['port'][portbindings.HOST_ID])
def test_ports_vif_non_host_update_when_host_null(self):
with self.port() as port:
with self.port(is_admin=True) as port:
data = {'port': {'admin_state_up': False}}
req = self.new_update_request('ports', data, port['port']['id'])
req = self.new_update_request('ports', data, port['port']['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][portbindings.HOST_ID],
res['port'][portbindings.HOST_ID])
@ -267,10 +249,12 @@ class PortBindingsHostTestCaseMixin(object):
def test_ports_vif_host_list(self):
host_arg = {portbindings.HOST_ID: self.hostname}
with self.port(name='name1',
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1,\
self.port(name='name2'),\
self.port(name='name3',
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
self._test_list_resources(
@ -308,23 +292,16 @@ class PortBindingsVnicTestCaseMixin(object):
# Check a response of create_port
self._check_response_portbindings_vnic_type(port['port'])
# Check a response of get_port
ctx = context.get_admin_context()
port = self._show('ports', port_id, neutron_context=ctx)['port']
port = self._show('ports', port_id, as_admin=True)['port']
self._check_response_portbindings_vnic_type(port)
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False)
non_admin_port = self._show(
'ports', port_id, neutron_context=ctx)['port']
non_admin_port = self._show('ports', port_id)['port']
self._check_response_portbindings_vnic_type(non_admin_port)
def test_ports_vnic_type(self):
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
with self.port(name='name1', arg_list=(portbindings.VNIC_TYPE,),
**vnic_arg), self.port(name='name2'):
ctx = context.get_admin_context()
ports = self._list('ports', neutron_context=ctx)['ports']
ports = self._list('ports', as_admin=True)['ports']
self.assertEqual(2, len(ports))
for port in ports:
if port['name'] == 'name1':
@ -332,11 +309,7 @@ class PortBindingsVnicTestCaseMixin(object):
else:
self.assertEqual(portbindings.VNIC_NORMAL,
port[portbindings.VNIC_TYPE])
# By default user is admin - now test non admin user
ctx = context.Context(user_id=None,
tenant_id=self._tenant_id,
is_admin=False)
ports = self._list('ports', neutron_context=ctx)['ports']
ports = self._list('ports')['ports']
self.assertEqual(2, len(ports))
for non_admin_port in ports:
self._check_response_portbindings_vnic_type(non_admin_port)

View File

@ -17,6 +17,7 @@ import copy
from unittest import mock
import fixtures
from neutron_lib import context
from neutron_lib import exceptions
from neutron_lib.plugins import constants as lib_const
from neutron_lib.plugins import directory
@ -1045,6 +1046,8 @@ class ExtensionExtendedAttributeTestCase(base.BaseTestCase):
req = testlib_api.create_request(
path, body, content_type,
method, query_string=params)
req.environ['neutron.context'] = context.Context(
'', self._tenant_id, roles=['member', 'reader'])
res = req.get_response(self._api)
if res.status_code >= 400:
raise webexc.HTTPClientError(detail=res.body, code=res.status_code)

View File

@ -74,6 +74,14 @@ def _get_path(resource, id=None, action=None,
return path
def _get_neutron_env(tenant_id=None, as_admin=False):
tenant_id = tenant_id or _uuid()
roles = ['member', 'reader']
if as_admin:
roles.append('admin')
return {'neutron.context': context.Context('', tenant_id, roles=roles)}
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
@ -98,6 +106,8 @@ class APIv2TestBase(base.BaseTestCase):
api = router.APIRouter()
self.api = webtest.TestApp(api)
self._tenant_id = "api-test-tenant"
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', quota_conf.QUOTA_DB_DRIVER,
group='QUOTAS')
@ -105,6 +115,27 @@ class APIv2TestBase(base.BaseTestCase):
# APIRouter initialization resets policy module, re-initializing it
policy.init()
def _post_request(self, path, initial_input, expect_errors=None,
req_tenant_id=None, as_admin=False):
req_tenant_id = req_tenant_id or self._tenant_id
return self.api.post_json(
path, initial_input, expect_errors=expect_errors,
extra_environ=_get_neutron_env(req_tenant_id, as_admin))
def _put_request(self, path, initial_input, expect_errors=None,
req_tenant_id=None, as_admin=False):
req_tenant_id = req_tenant_id or self._tenant_id
return self.api.put_json(
path, initial_input, expect_errors=expect_errors,
extra_environ=_get_neutron_env(req_tenant_id, as_admin))
def _delete_request(self, path, expect_errors=None,
req_tenant_id=None, as_admin=False):
req_tenant_id = req_tenant_id or self._tenant_id
return self.api.delete_json(
path, expect_errors=expect_errors,
extra_environ=_get_neutron_env(req_tenant_id, as_admin))
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
@ -512,17 +543,16 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
env = _get_neutron_env(req_tenant_id)
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'project_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
instance.get_networks.return_value = [input_dict]
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
@ -789,7 +819,7 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
env = _get_neutron_env(tenant_id)
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
@ -947,8 +977,9 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def test_create_return_extra_attr(self):
net_id = _uuid()
project_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
'tenant_id': project_id}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
@ -959,7 +990,8 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
content_type='application/' + self.fmt,
extra_environ=_get_neutron_env(project_id))
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
@ -969,23 +1001,25 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
project_id = _uuid()
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
'project_id': project_id, 'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
fmt=self.fmt),
extra_environ=_get_neutron_env(project_id))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
env = _get_neutron_env(req_tenant_id)
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
instance.get_network.return_value = {'project_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
@ -1010,15 +1044,12 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
shared = req_tenant_id and req_tenant_id.endswith('another')
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
env = _get_neutron_env(req_tenant_id)
data = {'tenant_id': real_tenant_id, 'shared': shared}
data = {'project_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
@ -1060,14 +1091,14 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
env = _get_neutron_env(req_tenant_id)
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
instance.get_network.return_value = {'project_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
@ -1308,26 +1339,31 @@ class NotificationTest(APIv2TestBase):
group='QUOTAS')
def _resource_op_notifier(self, opname, resource, expected_errors=False):
initial_input = {resource: {'name': 'myname'}}
tenant_id = _uuid()
network_obj = {'name': 'myname',
'project_id': tenant_id}
initial_input = {resource: network_obj}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_network.return_value = network_obj
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
res = self._post_request(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
initial_input, expect_errors=expected_errors,
req_tenant_id=tenant_id)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
op_input = {resource: {'name': 'myname'}}
res = self._put_request(
_get_path('networks', id=tenant_id),
op_input, expect_errors=expected_errors,
req_tenant_id=tenant_id)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
res = self._delete_request(
_get_path('networks', id=tenant_id),
expect_errors=expected_errors,
req_tenant_id=tenant_id)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
@ -1472,7 +1508,9 @@ class ExtensionTestCase(base.BaseTestCase):
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
res = self.api.post_json(
_get_path('networks'), initial_input,
extra_environ=_get_neutron_env(tenant_id))
instance.create_network.assert_called_with(mock.ANY,
network=data)

View File

@ -16,7 +16,6 @@ import contextlib
from neutron_lib.api.definitions import metering as metering_apidef
from neutron_lib import constants as n_consts
from neutron_lib import context
from neutron_lib.db import constants as db_const
from neutron_lib.plugins import constants
from oslo_utils import uuidutils
@ -42,18 +41,12 @@ _fake_uuid = uuidutils.generate_uuid
class MeteringPluginDbTestCaseMixin(object):
def _create_metering_label(self, fmt, name, description, **kwargs):
data = {'metering_label': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test-tenant'),
'shared': kwargs.get('shared', False),
'description': description}}
req = self.new_create_request('metering-labels', data,
fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id'],
is_admin=kwargs.get('is_admin', True)))
req = self.new_create_request(
'metering-labels', data, fmt,
tenant_id=kwargs.get('tenant_id', self._tenant_id),
as_admin=kwargs.get('is_admin', True))
return req.get_response(self.ext_api)
@ -71,7 +64,6 @@ class MeteringPluginDbTestCaseMixin(object):
data = {
'metering_label_rule': {
'metering_label_id': metering_label_id,
'tenant_id': kwargs.get('tenant_id', 'test-tenant'),
'direction': direction,
'excluded': excluded,
}
@ -87,13 +79,10 @@ class MeteringPluginDbTestCaseMixin(object):
data['metering_label_rule']['destination_ip_prefix'] =\
destination_ip_prefix
req = self.new_create_request('metering-label-rules',
data, fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
req = self.new_create_request(
'metering-label-rules', data, fmt,
tenant_id=kwargs.get('tenant_id', self._tenant_id),
as_admin=kwargs.get('is_admin', True))
return req.get_response(self.ext_api)
@ -203,7 +192,8 @@ class TestMetering(MeteringPluginDbTestCase):
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
self._delete('metering-labels', metering_label_id, 204,
as_admin=True)
def test_list_metering_label(self):
name = 'my label'
@ -258,7 +248,7 @@ class TestMetering(MeteringPluginDbTestCase):
remote_ip_prefix=remote_ip_prefix) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._update('metering-label-rules', rule_id, data,
webob.exc.HTTPNotImplemented.code)
webob.exc.HTTPNotImplemented.code, as_admin=True)
def test_delete_metering_label_rule(self):
name = 'my label'
@ -275,7 +265,8 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_id, direction, excluded,
remote_ip_prefix=remote_ip_prefix) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._delete('metering-label-rules', rule_id, 204)
self._delete('metering-label-rules', rule_id, 204,
as_admin=True)
def test_list_metering_label_rule(self):
name = 'my label'
@ -297,7 +288,7 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
metering_label_rule, as_admin=True)
def test_create_metering_label_rules(self):
name = 'my label'
@ -319,7 +310,7 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
metering_label_rule, as_admin=True)
def test_create_overlap_metering_label_rules(self):
name = 'my label'
@ -365,4 +356,5 @@ class TestMetering(MeteringPluginDbTestCase):
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
metering_label_rule,
as_admin=True)

View File

@ -45,6 +45,7 @@ from neutron.db.models import agent as agent_model
from neutron.extensions import l3agentscheduler
from neutron.objects import agent as ag_obj
from neutron.objects import l3agent as rb_obj
from neutron import policy
from neutron.tests.common import helpers
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
@ -78,18 +79,21 @@ class AgentSchedulerTestMixIn(object):
def _path_req(self, path, method='GET', data=None,
query_string=None,
admin_context=True):
admin_context=True,
req_tenant_id=None):
content_type = 'application/%s' % self.fmt
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
roles = ['member', 'reader']
req_tenant_id = req_tenant_id or self._tenant_id
if admin_context:
return testlib_api.create_request(
roles.append('admin')
req = testlib_api.create_request(
path, body, content_type, method, query_string=query_string)
else:
return testlib_api.create_request(
path, body, content_type, method, query_string=query_string,
context=context.Context('', 'tenant_id'))
req.environ['neutron.context'] = context.Context(
'', req_tenant_id, roles=roles, is_admin=admin_context)
return req
def _path_create_request(self, path, data, admin_context=True):
return self._path_req(path, method='POST', data=data,
@ -218,7 +222,7 @@ class AgentSchedulerTestMixIn(object):
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = admin_state_up
self._update('agents', agent_id, new_agent)
self._update('agents', agent_id, new_agent, as_admin=True)
def _get_agent_id(self, agent_type, host):
agents = self._list_agents()
@ -269,6 +273,7 @@ class OvsAgentSchedulerTestCaseBase(test_l3.L3NatTestCaseMixin,
self.dhcp_notify_p = mock.patch(
'neutron.extensions.dhcpagentscheduler.notify')
self.patched_dhcp_notify = self.dhcp_notify_p.start()
policy.init()
class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
@ -911,10 +916,12 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertNotEqual(agent['host'], new_agent_host)
def test_router_auto_schedule_with_invalid_router(self):
with self.router() as router:
project_id = uuidutils.generate_uuid()
with self.router(project_id=project_id) as router:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
self._delete('routers', router['router']['id'])
self._delete('routers', router['router']['id'],
tenant_id=project_id)
# deleted router
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
@ -1106,19 +1113,22 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertEqual(0, len(router_ids))
def test_router_without_l3_agents(self):
project_id = uuidutils.generate_uuid()
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
data = {'router': {'tenant_id': project_id}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
router_req = self.new_create_request(
'routers', data, self.fmt, tenant_id=project_id)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
l3agents = (
self.l3plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['router']['id']]))
self._delete('routers', router['router']['id'])
self._delete(
'routers', router['router']['id'], tenant_id=project_id)
self.assertEqual(0, len(l3agents))
def test_dvr_router_scheduling_to_only_dvr_snat_agent(self):
@ -1217,26 +1227,30 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self.assertEqual(agent['id'], new_agent['id'])
def test_router_sync_data(self):
with self.subnet() as s1,\
self.subnet(cidr='10.0.2.0/24') as s2,\
self.subnet(cidr='10.0.3.0/24') as s3:
project_id = uuidutils.generate_uuid()
with self.subnet(project_id=project_id) as s1,\
self.subnet(project_id=project_id, cidr='10.0.2.0/24') as s2,\
self.subnet(project_id=project_id, cidr='10.0.3.0/24') as s3:
self._register_agent_states()
self._set_net_external(s1['subnet']['network_id'])
data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
data = {'router': {'tenant_id': project_id}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s1['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
router_req = self.new_create_request(
'routers', data, self.fmt, tenant_id=project_id)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
s2['subnet']['id'],
None)
None,
tenant_id=project_id)
self._router_interface_action('add',
router['router']['id'],
s3['subnet']['id'],
None)
None,
tenant_id=project_id)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(l3agents['agents']))
@ -1267,7 +1281,8 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self._router_interface_action('remove',
router['router']['id'],
s2['subnet']['id'],
None)
None,
tenant_id=project_id)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1,
@ -1275,8 +1290,10 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
self._router_interface_action('remove',
router['router']['id'],
s3['subnet']['id'],
None)
self._delete('routers', router['router']['id'])
None,
tenant_id=project_id)
self._delete('routers', router['router']['id'],
tenant_id=project_id)
def _test_router_add_to_l3_agent(self, admin_state_up=True):
with self.router() as router1:

File diff suppressed because it is too large Load Diff

View File

@ -188,22 +188,28 @@ class DvrDbMixinTestCase(test_plugin.Ml2PluginV2TestCase):
arg_list = (portbindings.HOST_ID,)
with self.subnet() as subnet,\
self.port(subnet=subnet,
is_admin=True,
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
arg_list=arg_list, **host_arg) as compute_port,\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_DHCP,
is_admin=True,
arg_list=arg_list, **host_arg) as dhcp_port,\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_LOADBALANCER,
is_admin=True,
arg_list=arg_list, **host_arg) as lb_port,\
self.port(device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
is_admin=True,
arg_list=arg_list, **host_arg),\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX,
is_admin=True,
arg_list=arg_list,
**{portbindings.HOST_ID: 'other'}),\
self.port(subnet=subnet,
device_owner=constants.DEVICE_OWNER_NETWORK_PREFIX,
is_admin=True,
arg_list=arg_list, **host_arg):
expected_ids = [port['port']['id'] for port in
[compute_port, dhcp_port, lb_port]]

View File

@ -373,7 +373,8 @@ class TestPortUpdateIpam(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
**{portbindings.HOST_ID: 'fakehost'},
is_admin=True)
port = self.deserialize(self.fmt, response)
# Create the subnet and try to update the port to get an IP
@ -381,7 +382,8 @@ class TestPortUpdateIpam(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'port': {
'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)

View File

@ -71,7 +71,6 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
plugin = 'neutron.tests.unit.db.test_ipam_backend_mixin.TestPlugin'
super(TestDbBasePluginIpam, self).setUp(plugin=plugin)
cfg.CONF.set_override("ipam_driver", 'internal')
self.tenant_id = uuidutils.generate_uuid()
self.subnet_id = uuidutils.generate_uuid()
self.admin_context = ncontext.get_admin_context()
@ -89,7 +88,7 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase):
'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
},
'subnet_request': ipam_req.SpecificSubnetRequest(
self.tenant_id,
self._tenant_id,
self.subnet_id,
'10.0.0.0/24',
'10.0.0.1',

View File

@ -928,7 +928,8 @@ class L3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
with db_api.CONTEXT_WRITER.using(self.ctx):
res = self._create_network(
self.fmt, name, True,
arg_list=(extnet_apidef.EXTERNAL,), **kwargs)
arg_list=(extnet_apidef.EXTERNAL,),
as_admin=True, **kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)

View File

@ -237,7 +237,7 @@ class TestRevisionNumberMaintenance(test_securitygroup.SecurityGroupsTestCase,
'10.0.0.0/24')['subnet']
self._set_net_external(self.net['id'])
info = {'network_id': self.net['id']}
router = self._make_router(self.fmt, None,
router = self._make_router(self.fmt, self._tenant_id,
external_gateway_info=info)['router']
fip = self._make_floatingip(self.fmt, self.net['id'])['floatingip']
port = self._make_port(self.fmt, self.net['id'])['port']

View File

@ -84,9 +84,8 @@ class AddressGroupTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_address_group_actions(self, addr_group_id, data, action,
expected=None, tenant_id=None):
act_req = self.new_action_request(
'address-groups', data, addr_group_id, action)
act_req.environ['neutron.context'] = context.Context(
'', tenant_id or self._tenant_id)
'address-groups', data, addr_group_id, action,
tenant_id=tenant_id or self._tenant_id)
act_res = act_req.get_response(self.ext_api)
if expected:

View File

@ -49,39 +49,40 @@ class AddressScopeTestExtensionManager(object):
class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _create_address_scope(self, fmt, ip_version=constants.IP_VERSION_4,
expected_res_status=None, admin=False, **kwargs):
expected_res_status=None, admin=False,
tenant_id=None, **kwargs):
address_scope = {'address_scope': {}}
address_scope['address_scope']['ip_version'] = ip_version
tenant_id = tenant_id or self._tenant_id
for k, v in kwargs.items():
address_scope['address_scope'][k] = str(v)
address_scope_req = self.new_create_request('address-scopes',
address_scope, fmt)
if not admin:
neutron_context = context.Context('', kwargs.get('tenant_id',
self._tenant_id))
address_scope_req.environ['neutron.context'] = neutron_context
address_scope, fmt,
tenant_id=tenant_id,
as_admin=admin)
address_scope_res = address_scope_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, address_scope_res.status_int)
return address_scope_res
def _make_address_scope(self, fmt, ip_version, admin=False, **kwargs):
def _make_address_scope(self, fmt, ip_version, admin=False, tenant_id=None,
**kwargs):
res = self._create_address_scope(fmt, ip_version,
admin=admin, **kwargs)
admin=admin, tenant_id=tenant_id,
**kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def address_scope(self, ip_version=constants.IP_VERSION_4,
admin=False, **kwargs):
if 'project_id' in kwargs:
kwargs['tenant_id'] = kwargs['project_id']
admin=False, tenant_id=None, **kwargs):
tenant_id = tenant_id if tenant_id else kwargs.pop(
'tenant_id', None)
addr_scope = self._make_address_scope(self.fmt, ip_version,
admin, **kwargs)
admin, tenant_id, **kwargs)
yield addr_scope
def _test_create_address_scope(self, ip_version=constants.IP_VERSION_4,
@ -99,9 +100,9 @@ class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _test_update_address_scope(self, addr_scope_id, data, admin=False,
expected=None, tenant_id=None):
update_req = self.new_update_request(
'address-scopes', data, addr_scope_id)
update_req.environ['neutron.context'] = context.Context(
'', tenant_id or self._tenant_id, is_admin=admin)
'address-scopes', data, addr_scope_id,
tenant_id=tenant_id or self._tenant_id,
as_admin=admin)
update_res = update_req.get_response(self.ext_api)
if expected:
@ -244,8 +245,7 @@ class TestAddressScope(AddressScopeTestCase):
admin=True)
admin_res = self._list('address-scopes')
mortal_res = self._list(
'address-scopes',
neutron_context=context.Context('', 'not-the-owner'))
'address-scopes', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['address_scopes']))
self.assertEqual(1, len(mortal_res['address_scopes']))
@ -254,8 +254,7 @@ class TestAddressScope(AddressScopeTestCase):
name='foo-address-scope')
admin_res = self._list('address-scopes')
mortal_res = self._list(
'address-scopes',
neutron_context=context.Context('', 'not-the-owner'))
'address-scopes', tenant_id='not-the-owner')
self.assertEqual(1, len(admin_res['address_scopes']))
self.assertEqual(0, len(mortal_res['address_scopes']))

View File

@ -59,11 +59,10 @@ class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2,
class AgentDBTestMixIn(object):
def _list_agents(self, expected_res_status=None,
neutron_context=None,
query_string=None):
agent_res = self._list('agents',
neutron_context=neutron_context,
query_params=query_string)
query_params=query_string,
as_admin=True)
if expected_res_status:
self.assertEqual(expected_res_status, agent_res.status_int)
return agent_res
@ -107,14 +106,12 @@ class AgentDBTestCase(AgentDBTestMixIn,
def test_create_agent(self):
data = {'agent': {}}
_req = self.new_create_request('agents', data, self.fmt)
_req.environ['neutron.context'] = context.Context(
'', 'tenant_id')
res = _req.get_response(self.ext_api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_list_agent(self):
agents = self._register_agent_states()
res = self._list('agents')
res = self._list('agents', as_admin=True)
self.assertEqual(len(agents), len(res['agents']))
def test_show_agent(self):
@ -122,7 +119,7 @@ class AgentDBTestCase(AgentDBTestMixIn,
agents = self._list_agents(
query_string='binary=' + constants.AGENT_PROCESS_L3)
self.assertEqual(2, len(agents['agents']))
agent = self._show('agents', agents['agents'][0]['id'])
agent = self._show('agents', agents['agents'][0]['id'], as_admin=True)
self.assertEqual(constants.AGENT_PROCESS_L3, agent['agent']['binary'])
def test_update_agent(self):
@ -132,13 +129,13 @@ class AgentDBTestCase(AgentDBTestMixIn,
'&host=' + L3_HOSTB))
self.assertEqual(1, len(agents['agents']))
com_id = agents['agents'][0]['id']
agent = self._show('agents', com_id)
agent = self._show('agents', com_id, as_admin=True)
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = False
new_agent['agent']['description'] = 'description'
self._update('agents', com_id, new_agent)
agent = self._show('agents', com_id)
self._update('agents', com_id, new_agent, as_admin=True)
agent = self._show('agents', com_id, as_admin=True)
self.assertFalse(agent['agent']['admin_state_up'])
self.assertEqual('description', agent['agent']['description'])

View File

@ -71,12 +71,11 @@ class TestAZAgentCase(AZTestCommon):
{'name': 'nova2', 'resource': 'network', 'state': 'available'},
{'name': 'nova2', 'resource': 'router', 'state': 'available'},
{'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}]
res = self._list('availability_zones')
res = self._list('availability_zones', as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
# not admin case
ctx = context.Context('', 'noadmin')
res = self._list('availability_zones', neutron_context=ctx)
res = self._list('availability_zones', as_admin=False)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
@ -89,33 +88,37 @@ class TestAZAgentCase(AZTestCommon):
{'name': 'nova2', 'resource': 'network', 'state': 'available'},
{'name': 'nova2', 'resource': 'router', 'state': 'available'},
{'name': 'nova3', 'resource': 'router', 'state': 'unavailable'}]
res = self._list('availability_zones')
res = self._list('availability_zones', as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected, azs)
# list with filter of 'name'
res = self._list('availability_zones',
query_params="name=nova1")
query_params="name=nova1",
as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[:1], azs)
# list with filter of 'resource'
res = self._list('availability_zones',
query_params="resource=router")
query_params="resource=router",
as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[-2:], azs)
# list with filter of 'state' as 'available'
res = self._list('availability_zones',
query_params="state=available")
query_params="state=available",
as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[:3], azs)
# list with filter of 'state' as 'unavailable'
res = self._list('availability_zones',
query_params="state=unavailable")
query_params="state=unavailable",
as_admin=True)
azs = res['availability_zones']
self.assertCountEqual(expected[-1:], azs)
def test_list_agent_with_az(self):
helpers.register_dhcp_agent(host='host1', az='nova1')
res = self._list('agents')
res = self._list('agents', as_admin=True)
self.assertEqual('nova1',
res['agents'][0]['availability_zone'])

View File

@ -80,7 +80,8 @@ class DataPlaneStatusExtensionTestCase(
data = {'port': {'data_plane_status': constants.ACTIVE}}
req = self.new_update_request(port_def.COLLECTION_NAME,
data,
port['port']['id'])
port['port']['id'],
as_admin=True)
res = req.get_response(self.api)
p = self.deserialize(self.fmt, res)['port']
self.assertEqual(200, res.status_code)
@ -106,9 +107,11 @@ class DataPlaneStatusExtensionTestCase(
with self.port(name='port1') as port:
res = self._update(port_def.COLLECTION_NAME, port['port']['id'],
{'port': {dps_lib.DATA_PLANE_STATUS:
constants.ACTIVE}})
constants.ACTIVE}},
as_admin=True)
res = self._update(port_def.COLLECTION_NAME, port['port']['id'],
{'port': {'name': 'port2'}})
{'port': {'name': 'port2'}},
as_admin=True)
self.assertEqual(res['port']['name'], 'port2')
self.assertEqual(res['port'][dps_lib.DATA_PLANE_STATUS],
constants.ACTIVE)
@ -125,7 +128,8 @@ class DataPlaneStatusExtensionTestCase(
with self.port(name='port1') as port:
self._update(port_def.COLLECTION_NAME, port['port']['id'],
{'port': {dps_lib.DATA_PLANE_STATUS:
constants.ACTIVE}})
constants.ACTIVE}},
as_admin=True)
notify = set(n['event_type'] for n in fake_notifier.NOTIFICATIONS)
duplicated_notify = expect_notify & notify
self.assertEqual(expect_notify, duplicated_notify)

View File

@ -71,9 +71,13 @@ class DefaultSubnetpoolsExtensionTestCase(
return self.deserialize(self.fmt, res)['subnet']
def _update_subnetpool(self, subnetpool_id, **data):
def _update_subnetpool(self, subnetpool_id, tenant_id=None,
as_admin=False, **data):
if 'shared' in data or 'is_default' in data:
as_admin = True
update_req = self.new_update_request(
'subnetpools', {'subnetpool': data}, subnetpool_id)
'subnetpools', {'subnetpool': data}, subnetpool_id,
tenant_id=tenant_id, as_admin=as_admin)
res = update_req.get_response(self.api)
return self.deserialize(self.fmt, res)['subnetpool']

View File

@ -109,10 +109,8 @@ class DnsExtensionTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertEqual(expected_res_status, port_res.status_int)
return port_res
def _test_list_resources(self, resource, items, neutron_context=None,
query_params=None):
def _test_list_resources(self, resource, items, query_params=None):
res = self._list('%ss' % resource,
neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertCountEqual([i['id'] for i in res['%ss' % resource]],

View File

@ -112,7 +112,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as extnet, self.network() as innet:
with self.network(as_admin=True, **kwargs) as extnet, \
self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'), \
self.subnet(network=innet, cidr='10.0.0.0/24') as insub, \
self.router() as router:
@ -148,7 +149,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as extnet, self.network() as innet:
with self.network(as_admin=True, **kwargs) as extnet,\
self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
self.router() as router:
@ -241,7 +243,8 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as extnet, self.network() as innet:
with self.network(as_admin=True, **kwargs) as extnet,\
self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
self.subnet(network=innet, cidr='10.0.8.0/24') as insub2,\
@ -317,10 +320,11 @@ class TestExtendFipPortForwardingExtension(
ctx = context.get_admin_context()
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as extnet, self.network() as innet:
with self.network(as_admin=True, **kwargs) as extnet,\
self.network() as innet:
with self.subnet(network=extnet, cidr='200.0.0.0/22'),\
self.subnet(network=innet, cidr='10.0.0.0/24') as insub,\
self.router(distributed=True) as router:
self.router(distributed=True, as_admin=True) as router:
fip = self._make_floatingip(self.fmt, extnet['network']['id'])
# check the floatingip response contains port_forwarding field
self.assertIn(apidef.COLLECTION_NAME, fip['floatingip'])

View File

@ -65,7 +65,8 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _set_net_external(self, net_id):
self._update('networks', net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
def test_list_nets_external(self):
with self.network() as n1:
@ -111,13 +112,14 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'network': {'router:external': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('', 'noadmin')
network['network']['id'],
tenant_id='noadmin')
res = req.get_response(self.api)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
def test_update_network_external_net_with_ports_set_not_shared(self):
with self.network(router__external=True, shared=True) as ext_net,\
with self.network(router__external=True, shared=True,
as_admin=True) as ext_net,\
self.subnet(network=ext_net) as ext_subnet, \
self.port(subnet=ext_subnet,
tenant_id='',
@ -125,7 +127,8 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
ext_net['network']['id'])
ext_net['network']['id'],
as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPOk.code, res.status_int)
ctx = context.Context(None, None, is_admin=True)
@ -158,18 +161,18 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt2))
def test_create_port_external_network_non_admin_fails(self):
with self.network(router__external=True) as ext_net:
with self.network(as_admin=True, router__external=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with testtools.ExpectedException(
exc.HTTPClientError) as ctx_manager:
with self.port(subnet=ext_subnet,
set_context='True',
is_admin=False,
tenant_id='noadmin'):
pass
self.assertEqual(403, ctx_manager.exception.code)
def test_create_port_external_network_admin_succeeds(self):
with self.network(router__external=True) as ext_net:
with self.network(router__external=True, as_admin=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with self.port(subnet=ext_subnet) as port:
self.assertEqual(port['port']['network_id'],
@ -178,13 +181,13 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def test_create_external_network_non_admin_fails(self):
with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager:
with self.network(router__external=True,
set_context='True',
as_admin=False,
tenant_id='noadmin'):
pass
self.assertEqual(403, ctx_manager.exception.code)
def test_create_external_network_admin_succeeds(self):
with self.network(router__external=True) as ext_net:
with self.network(router__external=True, as_admin=True) as ext_net:
self.assertTrue(ext_net['network'][extnet_apidef.EXTERNAL])
def test_delete_network_check_disassociated_floatingips(self):

View File

@ -17,7 +17,6 @@ from neutron_lib.api.definitions import external_net as enet_apidef
from neutron_lib.api.definitions import extraroute as xroute_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib import constants
from neutron_lib import context
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_utils import uuidutils
@ -62,14 +61,15 @@ class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin,
class ExtraRouteDBTestCaseBase(object):
def _routes_update_prepare(
self, router_id, subnet_id,
port_id, routes, skip_add=False, tenant_id=None):
port_id, routes, skip_add=False, tenant_id=None, as_admin=False):
if not skip_add:
self._router_interface_action(
'add', router_id, subnet_id, port_id, tenant_id=None)
ctxt = context.Context('', tenant_id) if tenant_id else None
'add', router_id, subnet_id, port_id, tenant_id=tenant_id,
as_admin=as_admin)
tenant_id = tenant_id or self._tenant_id
self._update('routers', router_id, {'router': {'routes': routes}},
neutron_context=ctxt)
return self._show('routers', router_id)
request_tenant_id=tenant_id, as_admin=as_admin)
return self._show('routers', router_id, tenant_id=tenant_id)
def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes):
self._update('routers', router_id, {'router': {'routes': routes}})
@ -91,7 +91,8 @@ class ExtraRouteDBTestCaseBase(object):
def test_route_update_with_external_route(self):
my_tenant = 'tenant1'
with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as ext_subnet,\
self.port(subnet=ext_subnet) as nexthop_port:
self.port(subnet=ext_subnet,
tenant_id='notme') as nexthop_port:
nexthop_ip = nexthop_port['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': nexthop_ip}]
@ -107,14 +108,14 @@ class ExtraRouteDBTestCaseBase(object):
def test_route_update_with_route_via_another_tenant_subnet(self):
my_tenant = 'tenant1'
with self.subnet(cidr='10.0.1.0/24', tenant_id='notme') as subnet,\
self.port(subnet=subnet) as nexthop_port:
self.port(subnet=subnet, tenant_id='notme') as nexthop_port:
nexthop_ip = nexthop_port['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': nexthop_ip}]
with self.router(tenant_id=my_tenant) as r:
body = self._routes_update_prepare(
r['router']['id'], subnet['subnet']['id'], None, routes,
tenant_id=my_tenant)
tenant_id=my_tenant, as_admin=True)
self.assertEqual(routes, body['router']['routes'])
def test_route_clear_routes_with_None(self):

View File

@ -198,7 +198,9 @@ class FlavorExtensionTestCase(extension.ExtensionTestCase):
'service_profiles': ['profile-1']}}
instance = self.plugin.return_value
instance.get_flavor.return_value = expected['flavor']
res = self.api.get(_get_path('flavors', id=flavor_id, fmt=self.fmt))
res = self.api.get(
_get_path('flavors', id=flavor_id, fmt=self.fmt),
extra_environ=test_base._get_neutron_env(as_admin=True))
instance.get_flavor.assert_called_with(mock.ANY,
flavor_id,
fields=mock.ANY)
@ -218,7 +220,9 @@ class FlavorExtensionTestCase(extension.ExtensionTestCase):
'service_profiles': ['profile-2', 'profile-1']}]}
instance = self.plugin.return_value
instance.get_flavors.return_value = data['flavors']
res = self.api.get(_get_path('flavors', fmt=self.fmt))
res = self.api.get(
_get_path('flavors', fmt=self.fmt),
extra_environ=test_base._get_neutron_env(as_admin=True))
instance.get_flavors.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)

View File

@ -14,7 +14,6 @@
from unittest import mock
from neutron_lib import context
from oslo_utils import uuidutils
from webob import exc
@ -50,8 +49,9 @@ class FloatingIPPorForwardingTestCase(test_l3.L3BaseForIntTests,
tenant_id=None,
description=None,
external_port_range=None,
internal_port_range=None):
tenant_id = tenant_id or _uuid()
internal_port_range=None,
as_admin=False):
tenant_id = tenant_id or self._tenant_id
data = {'port_forwarding': {
"protocol": protocol,
"internal_ip_address": internal_ip_address,
@ -69,28 +69,29 @@ class FloatingIPPorForwardingTestCase(test_l3.L3BaseForIntTests,
if description:
data['port_forwarding']['description'] = description
fip_pf_req = self._req(
'POST', 'floatingips', data,
fmt or self.fmt, id=floating_ip_id,
subresource='port_forwardings')
fip_pf_req.environ['neutron.context'] = context.Context(
'', tenant_id, is_admin=True)
fip_pf_req = self.new_create_request(
'floatingips', data, fmt or self.fmt, floating_ip_id,
subresource='port_forwardings',
tenant_id=tenant_id, as_admin=as_admin)
return fip_pf_req.get_response(self.ext_api)
def _update_fip_port_forwarding(self, fmt, floating_ip_id,
port_forwarding_id, **kwargs):
port_forwarding_id,
req_tenant_id=None, as_admin=False,
**kwargs):
req_tenant_id = req_tenant_id or self._tenant_id
port_forwarding = {}
for k, v in kwargs.items():
port_forwarding[k] = v
data = {'port_forwarding': port_forwarding}
fip_pf_req = self._req(
'PUT', 'floatingips', data,
fmt or self.fmt, id=floating_ip_id,
fip_pf_req = self.new_update_request(
'floatingips', data, floating_ip_id, fmt or self.fmt,
sub_id=port_forwarding_id,
subresource='port_forwardings')
subresource='port_forwardings',
tenant_id=req_tenant_id,
as_admin=as_admin)
return fip_pf_req.get_response(self.ext_api)

View File

@ -377,10 +377,10 @@ class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin,
class L3NatTestCaseMixin(object):
def _create_router(self, fmt, tenant_id, name=None,
admin_state_up=None, set_context=False,
arg_list=None, **kwargs):
tenant_id = tenant_id or _uuid()
def _create_router(self, fmt, tenant_id=None, name=None,
admin_state_up=None, arg_list=None,
as_admin=False, **kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'router': {'tenant_id': tenant_id}}
if name:
data['router']['name'] = name
@ -400,29 +400,27 @@ class L3NatTestCaseMixin(object):
if 'enable_ndp_proxy' in kwargs:
data['router']['enable_ndp_proxy'] = \
bool(kwargs['enable_ndp_proxy'])
router_req = self.new_create_request('routers', data, fmt)
if set_context and tenant_id:
# create a specific auth context for this request
router_req.environ['neutron.context'] = context.Context(
'', tenant_id)
router_req = self.new_create_request('routers', data, fmt,
tenant_id=tenant_id,
as_admin=as_admin)
return router_req.get_response(self.ext_api)
def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None,
external_gateway_info=None, set_context=False,
arg_list=None, **kwargs):
def _make_router(self, fmt, tenant_id=None, name=None, admin_state_up=None,
external_gateway_info=None,
arg_list=None, as_admin=False, **kwargs):
if external_gateway_info:
arg_list = ('external_gateway_info', ) + (arg_list or ())
res = self._create_router(fmt, tenant_id, name,
admin_state_up, set_context,
admin_state_up,
arg_list=arg_list,
external_gateway_info=external_gateway_info,
**kwargs)
as_admin=as_admin, **kwargs)
return self.deserialize(fmt, res)
def _add_external_gateway_to_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
neutron_context=None, ext_ips=None,
ext_ips=None, as_admin=False,
**kwargs):
ext_ips = ext_ips or []
body = {'router':
@ -435,7 +433,7 @@ class L3NatTestCaseMixin(object):
'qos_policy_id'] = kwargs.get('policy_id')
return self._update('routers', router_id, body,
expected_code=expected_code,
neutron_context=neutron_context)
as_admin=as_admin)
def _remove_external_gateway_from_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
@ -449,7 +447,8 @@ class L3NatTestCaseMixin(object):
expected_code=exc.HTTPOk.code,
expected_body=None,
tenant_id=None,
msg=None):
msg=None,
as_admin=False):
interface_data = {}
if subnet_id is not None:
interface_data.update({'subnet_id': subnet_id})
@ -457,11 +456,8 @@ class L3NatTestCaseMixin(object):
interface_data.update({'port_id': port_id})
req = self.new_action_request('routers', interface_data, router_id,
"%s_router_interface" % action)
# if tenant_id was specified, create a tenant context for this request
if tenant_id:
req.environ['neutron.context'] = context.Context(
'', tenant_id)
"%s_router_interface" % action,
tenant_id=tenant_id, as_admin=as_admin)
res = req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int, msg)
response = self.deserialize(self.fmt, res)
@ -472,23 +468,23 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def router(self, name='router1', admin_state_up=True,
fmt=None, project_id=None,
external_gateway_info=None, set_context=False,
external_gateway_info=None, as_admin=False,
**kwargs):
tenant_id = project_id if project_id else kwargs.pop(
'tenant_id', None)
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
as_admin=as_admin, **kwargs)
yield router
def _set_net_external(self, net_id):
self._update('networks', net_id,
{'network': {extnet_apidef.EXTERNAL: True}})
{'network': {extnet_apidef.EXTERNAL: True}},
as_admin=True)
def _create_floatingip(self, fmt, network_id, port_id=None,
fixed_ip=None, set_context=False,
floating_ip=None, subnet_id=None,
tenant_id=None, **kwargs):
fixed_ip=None, floating_ip=None, subnet_id=None,
tenant_id=None, as_admin=False, **kwargs):
tenant_id = tenant_id or self._tenant_id
data = {'floatingip': {'floating_network_id': network_id,
'tenant_id': tenant_id}}
@ -505,20 +501,18 @@ class L3NatTestCaseMixin(object):
data['floatingip'].update(kwargs)
floatingip_req = self.new_create_request('floatingips', data, fmt)
if set_context and tenant_id:
# create a specific auth context for this request
floatingip_req.environ['neutron.context'] = context.Context(
'', tenant_id)
floatingip_req = self.new_create_request(
'floatingips', data, fmt, tenant_id=tenant_id, as_admin=as_admin)
return floatingip_req.get_response(self.ext_api)
def _make_floatingip(self, fmt, network_id, port_id=None,
fixed_ip=None, set_context=False, tenant_id=None,
fixed_ip=None, tenant_id=None,
floating_ip=None, http_status=exc.HTTPCreated.code,
**kwargs):
as_admin=False, **kwargs):
res = self._create_floatingip(fmt, network_id, port_id,
fixed_ip, set_context, floating_ip,
tenant_id=tenant_id, **kwargs)
fixed_ip, floating_ip,
tenant_id=tenant_id, as_admin=as_admin,
**kwargs)
self.assertEqual(http_status, res.status_int)
return self.deserialize(fmt, res)
@ -534,16 +528,15 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None,
public_cidr='11.0.0.0/24', set_context=False,
project_id=None, flavor_id=None, **kwargs):
public_cidr='11.0.0.0/24', project_id=None,
flavor_id=None, as_admin=False, **kwargs):
tenant_id = project_id if project_id else kwargs.pop(
'tenant_id', None)
with self.subnet(cidr=public_cidr,
set_context=set_context,
tenant_id=tenant_id) as public_sub:
tenant_id=tenant_id,
as_admin=as_admin) as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
args_list = {'set_context': set_context,
'tenant_id': tenant_id}
args_list = {'tenant_id': tenant_id}
if flavor_id:
args_list['flavor_id'] = flavor_id
private_port = None
@ -551,8 +544,8 @@ class L3NatTestCaseMixin(object):
private_port = self._show('ports', port_id)
with test_db_base_plugin_v2.optional_ctx(
private_port, self.port,
set_context=set_context,
tenant_id=tenant_id) as private_port:
tenant_id=tenant_id,
is_admin=as_admin) as private_port:
with self.router(**args_list) as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
@ -571,7 +564,7 @@ class L3NatTestCaseMixin(object):
port_id=private_port['port']['id'],
fixed_ip=fixed_ip,
tenant_id=tenant_id,
set_context=set_context,
as_admin=as_admin,
**kwargs)
yield floatingip
@ -581,8 +574,8 @@ class L3NatTestCaseMixin(object):
@contextlib.contextmanager
def floatingip_no_assoc_with_public_sub(self, private_sub, fmt=None,
set_context=False, public_sub=None,
flavor_id=None, **kwargs):
public_sub=None, flavor_id=None,
as_admin=False, **kwargs):
if 'project_id' in kwargs:
kwargs['tenant_id'] = kwargs['project_id']
self._set_net_external(public_sub['subnet']['network_id'])
@ -606,7 +599,7 @@ class L3NatTestCaseMixin(object):
floatingip = self._make_floatingip(
fmt or self.fmt,
public_sub['subnet']['network_id'],
set_context=set_context,
as_admin=as_admin,
**kwargs)
yield floatingip, r
@ -615,14 +608,14 @@ class L3NatTestCaseMixin(object):
floatingip['floatingip']['id'])
@contextlib.contextmanager
def floatingip_no_assoc(self, private_sub, fmt=None,
set_context=False, flavor_id=None, **kwargs):
def floatingip_no_assoc(self, private_sub, fmt=None, flavor_id=None,
as_admin=False, **kwargs):
if 'project_id' in kwargs:
kwargs['tenant_id'] = kwargs['project_id']
with self.subnet(cidr='12.0.0.0/24') as public_sub:
with self.floatingip_no_assoc_with_public_sub(
private_sub, fmt, set_context, public_sub,
flavor_id, **kwargs) as (f, r):
private_sub, fmt, public_sub, flavor_id,
as_admin=as_admin, **kwargs) as (f, r):
# Yield only the floating ip object
yield f
@ -707,10 +700,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_create_with_gwinfo(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
data = {'router': {'tenant_id': _uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
data = {'router': {
'name': 'router1',
'external_gateway_info': {
'network_id': s['subnet']['network_id']}}}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
@ -726,8 +719,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'external_fixed_ips': [{'ip_address': '10.0.0.99'}]
}
res = self._create_router(
self.fmt, _uuid(), arg_list=('external_gateway_info',),
external_gateway_info=ext_info
self.fmt, arg_list=('external_gateway_info',),
external_gateway_info=ext_info,
as_admin=True
)
router = self.deserialize(self.fmt, res)
self.assertEqual(
@ -749,8 +743,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'subnet_id': s['subnet']['id']}]
}
res = self._create_router(
self.fmt, _uuid(), arg_list=('external_gateway_info',),
external_gateway_info=ext_info
self.fmt,
arg_list=('external_gateway_info',),
external_gateway_info=ext_info,
as_admin=True
)
router = self.deserialize(self.fmt, res)
ext_ips = router['router']['external_gateway_info'][
@ -768,8 +764,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'external_fixed_ips': [{'ip_address': '10.0.0.99'}]
}
res = self._create_router(
self.fmt, _uuid(), arg_list=('external_gateway_info',),
set_context=True, external_gateway_info=ext_info
self.fmt, arg_list=('external_gateway_info',),
external_gateway_info=ext_info
)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
@ -873,7 +869,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s['subnet']['network_id'],
ext_ips=[{'ip_address': s['subnet']['gateway_ip']}],
expected_code=exc.HTTPBadRequest.code)
expected_code=exc.HTTPBadRequest.code,
as_admin=True)
def test_router_update_gateway_with_invalid_external_ip(self):
with self.router() as r:
@ -883,7 +880,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s['subnet']['network_id'],
ext_ips=[{'ip_address': '99.99.99.99'}],
expected_code=exc.HTTPBadRequest.code)
expected_code=exc.HTTPBadRequest.code,
as_admin=True)
def test_router_update_gateway_with_invalid_external_subnet(self):
with self.subnet() as s1,\
@ -895,7 +893,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
s1['subnet']['network_id'],
# this subnet is not on the same network so this should fail
ext_ips=[{'subnet_id': s2['subnet']['id']}],
expected_code=exc.HTTPBadRequest.code)
expected_code=exc.HTTPBadRequest.code,
as_admin=True)
def test_router_update_gateway_with_different_external_subnet(self):
with self.network() as n:
@ -906,11 +905,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
ext_ips=[{'subnet_id': s1['subnet']['id']}])
ext_ips=[{'subnet_id': s1['subnet']['id']}],
as_admin=True)
res2 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
ext_ips=[{'subnet_id': s2['subnet']['id']}])
ext_ips=[{'subnet_id': s2['subnet']['id']}],
as_admin=True)
fip1 = res1['router']['external_gateway_info']['external_fixed_ips'][0]
fip2 = res2['router']['external_gateway_info']['external_fixed_ips'][0]
self.assertEqual(s1['subnet']['id'], fip1['subnet_id'])
@ -944,7 +945,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
ext_ips=[{'subnet_id': s1['subnet']['id']}])
ext_ips=[{'subnet_id': s1['subnet']['id']}],
as_admin=True)
fip1 = (res1['router']['external_gateway_info']
['external_fixed_ips'][0])
self.assertEqual(s1['subnet']['id'], fip1['subnet_id'])
@ -953,7 +955,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
n['network']['id'],
ext_ips=[{'ip_address': fip1['ip_address'],
'subnet_id': s1['subnet']['id']},
{'subnet_id': s2['subnet']['id']}])
{'subnet_id': s2['subnet']['id']}],
as_admin=True)
self.assertEqual(fip1, res2['router']['external_gateway_info']
['external_fixed_ips'][0])
fip2 = (res2['router']['external_gateway_info']
@ -971,7 +974,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
ext_ips=[{'subnet_id': s1['subnet']['id']}])
ext_ips=[{'subnet_id': s1['subnet']['id']}],
as_admin=True)
plugin = directory.get_plugin(plugin_constants.L3)
mock.patch.object(
plugin, 'update_router',
@ -990,7 +994,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res1 = self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'],
ext_ips=[{'subnet_id': s1['subnet']['id']}])
ext_ips=[{'subnet_id': s1['subnet']['id']}],
as_admin=True)
fip1 = (res1['router']['external_gateway_info']
['external_fixed_ips'][0])
sres = self._create_subnet(self.fmt, net_id=n['network']['id'],
@ -1028,7 +1033,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
n['network']['id'],
ext_ips=[{'subnet_id': s1['subnet']['id']},
{'subnet_id': s2['subnet']['id']}],
expected_code=exc.HTTPOk.code)
expected_code=exc.HTTPOk.code,
as_admin=True)
res1 = self._show('routers', r['router']['id'])
original_fips = (res1['router']['external_gateway_info']
['external_fixed_ips'])
@ -1309,9 +1315,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_subnet_with_bad_tenant_returns_404(self):
tenant_id = _uuid()
with self.router(tenant_id=tenant_id, set_context=True) as r:
with self.network(tenant_id=tenant_id, set_context=True) as n:
with self.subnet(network=n, set_context=True) as s:
with self.router(tenant_id=tenant_id) as r:
with self.network(tenant_id=tenant_id) as n:
with self.subnet(network=n, tenant_id=tenant_id) as s:
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
@ -1322,7 +1328,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
body = self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
None,
tenant_id=tenant_id)
self.assertIn('port_id', body)
self._router_interface_action('remove',
r['router']['id'],
@ -1334,8 +1341,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_by_subnet_other_tenant_subnet_returns_400(
self):
router_tenant_id = _uuid()
with self.router(tenant_id=router_tenant_id, set_context=True) as r:
with self.network(shared=True) as n:
with self.router(tenant_id=router_tenant_id) as r:
with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s:
err_code = exc.HTTPBadRequest.code
self._router_interface_action('add',
@ -1350,10 +1357,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
):
router_tenant_id = _uuid()
with mock.patch.object(network_obj.NetworkRBAC, "get_projects") as g:
with self.router(
tenant_id=router_tenant_id, set_context=True
) as r:
with self.network(shared=True) as n:
with self.router(tenant_id=router_tenant_id) as r:
with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s:
g.return_value = [router_tenant_id]
self._router_interface_action(
@ -1369,8 +1374,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self, out_of_pool=False, router_action_as_admin=False,
expected_code=exc.HTTPOk.code):
router_tenant_id = _uuid()
with self.router(tenant_id=router_tenant_id, set_context=True) as r:
with self.network(shared=True) as n:
with self.router(tenant_id=router_tenant_id) as r:
with self.network(as_admin=True, shared=True) as n:
with self.subnet(network=n) as s1, (
self.subnet(network=n, cidr='fd00::/64',
ip_version=lib_constants.IP_VERSION_6)
@ -1386,13 +1391,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'ip_address':
s2['subnet']['gateway_ip']}
with self.port(subnet=s1, fixed_ips=fixed_ips,
tenant_id=router_tenant_id) as p:
kwargs = {'expected_code': expected_code}
if not router_action_as_admin:
kwargs['tenant_id'] = router_tenant_id
tenant_id=router_tenant_id,
is_admin=True) as p:
self._router_interface_action(
'add', r['router']['id'], None, p['port']['id'],
**kwargs)
expected_code=expected_code,
tenant_id=router_tenant_id,
as_admin=router_action_as_admin)
def test_router_add_interface_by_port_other_tenant_address_in_pool(
self):
@ -1414,13 +1419,17 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
with self.router(tenant_id=tenant_id) as r,\
self.network(tenant_id=tenant_id) as n1,\
self.network(tenant_id=other_tenant_id) as n2:
with self.subnet(network=n1, cidr='10.0.0.0/24') as s1,\
self.subnet(network=n2, cidr='10.1.0.0/24') as s2:
with self.subnet(network=n1, cidr='10.0.0.0/24',
tenant_id=tenant_id) as s1,\
self.subnet(network=n2, cidr='10.1.0.0/24',
tenant_id=other_tenant_id) as s2:
body = self._router_interface_action(
'add',
r['router']['id'],
s2['subnet']['id'],
None)
None,
tenant_id=other_tenant_id,
as_admin=True)
self.assertIn('port_id', body)
self._router_interface_action(
'add',
@ -1472,7 +1481,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'ip_address': '1.1.1.1'},
{'ip_address': '2.2.2.2'}]}}
self._update('ports', p['port']['id'], data,
neutron_context=context.get_admin_context(),
as_admin=True,
expected_code=exc.HTTPBadRequest.code)
self._router_interface_action('remove',
@ -1666,12 +1675,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_add_interface_port_bad_tenant_returns_404(self):
tenant_id = _uuid()
with self.router(tenant_id=tenant_id, set_context=True) as r:
with self.network(tenant_id=tenant_id, set_context=True) as n:
with self.subnet(tenant_id=tenant_id, network=n,
set_context=True) as s:
with self.port(tenant_id=tenant_id, subnet=s,
set_context=True) as p:
with self.router(tenant_id=tenant_id) as r:
with self.network(tenant_id=tenant_id) as n:
with self.subnet(tenant_id=tenant_id, network=n) as s:
with self.port(tenant_id=tenant_id, subnet=s) as p:
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
@ -1837,7 +1844,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._add_external_gateway_to_router(
r['router']['id'], ext_net_id,
ext_ips=[{'subnet_id': s1['subnet']['id']}],
expected_code=exc.HTTPBadRequest.code)
expected_code=exc.HTTPBadRequest.code,
as_admin=True)
expected_msg = (
"Bad router request: Cidr 10.0.2.0/24 of subnet "
"%(external_subnet_id)s overlaps with cidr 10.0.2.0/24 of "
@ -1967,15 +1975,12 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.assertIsNone(gw_info)
def test_router_add_and_remove_gateway_tenant_ctx(self):
with self.router(tenant_id='noadmin',
set_context=True) as r:
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
ctx = context.Context('', 'noadmin')
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'],
neutron_context=ctx)
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
@ -1988,8 +1993,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_create_router_port_with_device_id_of_other_tenants_router(self):
with self.router() as admin_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n):
for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS:
self._create_port(
@ -1997,7 +2001,6 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
tenant_id='tenant_a',
device_id=admin_router['router']['id'],
device_owner=device_owner,
set_context=True,
expected_res_status=exc.HTTPConflict.code)
def test_create_non_router_port_device_id_of_other_tenants_router_update(
@ -2006,38 +2009,32 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
# port that matches the device_id of another tenants router and then
# we change the device_owner to be network:router_interface.
with self.router() as admin_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n):
for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS:
port_res = self._create_port(
self.fmt, n['network']['id'],
tenant_id='tenant_a',
device_id=admin_router['router']['id'],
set_context=True)
device_id=admin_router['router']['id'])
port = self.deserialize(self.fmt, port_res)
neutron_context = context.Context('', 'tenant_a')
data = {'port': {'device_owner': device_owner}}
self._update('ports', port['port']['id'], data,
neutron_context=neutron_context,
expected_code=exc.HTTPConflict.code)
expected_code=exc.HTTPConflict.code,
request_tenant_id='tenant_a')
def test_update_port_device_id_to_different_tenants_router(self):
with self.router() as admin_router:
with self.router(tenant_id='tenant_a',
set_context=True) as tenant_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.router(tenant_id='tenant_a') as tenant_router:
with self.network(tenant_id='tenant_a') as n:
with self.subnet(network=n) as s:
port = self._router_interface_action(
'add', tenant_router['router']['id'],
s['subnet']['id'], None, tenant_id='tenant_a')
neutron_context = context.Context('', 'tenant_a')
data = {'port':
{'device_id': admin_router['router']['id']}}
self._update('ports', port['port_id'], data,
neutron_context=neutron_context,
expected_code=exc.HTTPConflict.code)
expected_code=exc.HTTPConflict.code,
request_tenant_id='tenant_a')
def test_router_add_gateway_invalid_network_returns_400(self):
with self.router() as r:
@ -2122,7 +2119,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._add_external_gateway_to_router(
r['router']['id'], n['network']['id'],
ext_ips=[{'subnet_id': s['subnet']['id'],
'ip_address': '10.0.0.4'}])
'ip_address': '10.0.0.4'}],
as_admin=True)
gw_info = res['router']['external_gateway_info']
ext_ips = gw_info['external_fixed_ips'][0]
expected_gw_ips = [ext_ips['ip_address']]
@ -2314,7 +2312,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_delete_with_port_existed_returns_409(self):
with self.subnet() as subnet:
res = self._create_router(self.fmt, _uuid())
res = self._create_router(self.fmt)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
@ -2329,7 +2327,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
res = self._create_router(self.fmt, _uuid())
res = self._create_router(self.fmt)
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
@ -2346,12 +2344,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_show(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
expected_value = [('name', name), ('tenant_id', self._tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name='router1', admin_state_up=True,
tenant_id=tenant_id) as router:
with self.router(name='router1', admin_state_up=True) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
@ -2365,7 +2361,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
s1['subnet']['network_id'])
self._update('networks', s1['subnet']['network_id'],
{'network': {extnet_apidef.EXTERNAL: False}},
expected_code=exc.HTTPConflict.code)
expected_code=exc.HTTPConflict.code,
as_admin=True)
def test_network_update_external(self):
with self.router() as r:
@ -2377,7 +2374,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
r['router']['id'],
s1['subnet']['network_id'])
self._update('networks', testnet['network']['id'],
{'network': {extnet_apidef.EXTERNAL: False}})
{'network': {extnet_apidef.EXTERNAL: False}},
as_admin=True)
def test_floatingip_crd_ops(self):
with self.floatingip_with_assoc() as fip:
@ -2457,8 +2455,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._make_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'],
set_context=False)
port_id=private_port['port']['id'])
self.assertTrue(agent_notification.called)
def test_floating_port_status_not_applicable(self):
@ -2903,23 +2900,23 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
with self.router(tenant_id='router-owner',
set_context=True) as r:
with self.router(tenant_id='router-owner') as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
public_sub['subnet']['network_id'],
as_admin=True)
self._router_interface_action(
'add', r['router']['id'],
private_sub['subnet']['id'], None)
private_sub['subnet']['id'], None,
as_admin=True)
self._make_floatingip(self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'],
fixed_ip=None,
set_context=True)
fixed_ip=None)
def test_floatingip_update_different_router(self):
# Create subnet with different CIDRs to account for plugins which
@ -2983,10 +2980,12 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_floatingip_update_different_port_owner_as_admin(self):
with self.subnet() as private_sub:
with self.floatingip_no_assoc(private_sub) as fip:
with self.port(subnet=private_sub, tenant_id='other') as p:
with self.port(subnet=private_sub, tenant_id='other',
is_admin=True) as p:
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip':
{'port_id': p['port']['id']}})
{'port_id': p['port']['id']}},
as_admin=True)
self.assertEqual(p['port']['id'],
body['floatingip']['port_id'])
@ -3032,7 +3031,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
res = self._create_router(self.fmt, _uuid())
res = self._create_router(self.fmt)
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
@ -3060,8 +3059,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
subnet_id=public_sub['subnet']['id'],
set_context=True)
subnet_id=public_sub['subnet']['id'])
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def test_create_floatingip_with_subnet_id_and_fip_address(self):
@ -3073,7 +3071,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.fmt,
ext_net['network']['id'],
subnet_id=ext_subnet['subnet']['id'],
floating_ip='10.10.10.100')
floating_ip='10.10.10.100',
as_admin=True)
fip = self.deserialize(self.fmt, res)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
self.assertEqual('10.10.10.100',
@ -3088,7 +3087,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self.fmt,
ext_net['network']['id'],
subnet_id=ext_subnet['subnet']['id'],
floating_ip='20.20.20.200')
floating_ip='20.20.20.200',
as_admin=True)
data = self.deserialize(self.fmt, res)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = str(n_exc.InvalidIpForSubnet(ip_address='20.20.20.200'))
@ -3472,7 +3472,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
fp = self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.0.10')
floating_ip='10.0.0.10',
as_admin=True)
self.assertEqual('10.0.0.10',
fp['floatingip']['floating_ip_address'])
@ -3484,18 +3485,17 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
fp = self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.0.30')
floating_ip='10.0.0.30',
as_admin=True)
self.assertEqual('10.0.0.30',
fp['floatingip']['floating_ip_address'])
def test_create_floatingip_with_specific_ip_non_admin(self):
ctx = context.Context('user_id', 'tenant_id')
with self.subnet(cidr='10.0.0.0/24') as s:
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
set_context=ctx,
tenant_id='tenant_id',
floating_ip='10.0.0.10',
http_status=exc.HTTPForbidden.code)
@ -3506,7 +3506,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.1.10',
http_status=exc.HTTPBadRequest.code)
http_status=exc.HTTPBadRequest.code,
as_admin=True)
def test_create_floatingip_with_duplicated_specific_ip(self):
@ -3514,11 +3515,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
network_id = s['subnet']['network_id']
self._set_net_external(network_id)
self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.0.10')
floating_ip='10.0.0.10',
as_admin=True)
self._make_floatingip(self.fmt, network_id,
floating_ip='10.0.0.10',
http_status=exc.HTTPConflict.code)
http_status=exc.HTTPConflict.code,
as_admin=True)
def test_create_floatingips_native_quotas(self):
quota = 1
@ -3711,7 +3714,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
{'port_id': port['port']['id']})
# fetch port and confirm device_id and device_owner
body = self._show('ports', port['port']['id'])
body = self._show('ports', port['port']['id'],
tenant_id=tenant_id)
self.assertEqual('', body['port']['device_owner'])
self.assertEqual('', body['port']['device_id'])
@ -3756,7 +3760,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
data = {'port': {'fixed_ips': [
{'ip_address': gw_ip}]}}
req = self.new_update_request('ports', data,
gw_port_id)
gw_port_id,
as_admin=True)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(gw_ip_len, len(res['port']['fixed_ips']))
@ -3833,9 +3838,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
'network_id': network['network']['id'],
'subnetpool_id': subnetpool_id,
'prefixlen': 24,
'ip_version': lib_constants.IP_VERSION_4,
'tenant_id': tenant_id}}
req = self.new_create_request('subnets', data)
'ip_version': lib_constants.IP_VERSION_4}}
req = self.new_create_request('subnets', data, tenant_id=tenant_id)
subnet = self.deserialize(self.fmt, req.get_response(self.api))
admin_ctx = context.get_admin_context()
@ -3881,7 +3885,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
# simulate a failed update by just setting the device_id of
# the fip port back to PENDING
data = {'port': {'device_id': 'PENDING'}}
self._update('ports', fip_port['id'], data)
self._update('ports', fip_port['id'], data, as_admin=True)
plugin._clean_garbage()
# first call just marks as candidate, so it shouldn't be changed
port = self._show('ports', fip_port['id'])
@ -3925,7 +3929,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
events.BEFORE_DELETE)
with self.subnet():
res = self._create_router(self.fmt, _uuid())
res = self._create_router(self.fmt)
router = self.deserialize(self.fmt, res)
self._delete('routers', router['router']['id'],
exc.HTTPForbidden.code)
@ -4151,8 +4155,7 @@ class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
f = self._make_floatingip(self.fmt,
public_sub['subnet']['network_id'],
port_id=None,
fixed_ip=None,
set_context=True)
fixed_ip=None)
self._delete('floatingips', f['floatingip']['id'])
fake_method.assert_called_once_with(
resources.FLOATING_IP, events.AFTER_DELETE, mock.ANY,
@ -4194,7 +4197,7 @@ class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
# converted into its API equivalent of 404
e404 = mock.Mock(side_effect=l3_exc.RouterNotFound(router_id='1'))
registry.subscribe(e404, resources.ROUTER, events.PRECOMMIT_CREATE)
res = self._create_router(self.fmt, 'tenid')
res = self._create_router(self.fmt)
self.assertEqual(exc.HTTPNotFound.code, res.status_int)
# make sure nothing committed
body = self._list('routers')
@ -4521,7 +4524,7 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
self.mock_admin_client.reset_mock()
def _create_network(self, fmt, name, admin_state_up,
arg_list=None, set_context=False, tenant_id=None,
arg_list=None, tenant_id=None, as_admin=False,
**kwargs):
new_arg_list = ('dns_domain',)
if arg_list is not None:
@ -4529,12 +4532,12 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
return super(L3NatDBFloatingIpTestCaseWithDNS,
self)._create_network(fmt, name, admin_state_up,
arg_list=new_arg_list,
set_context=set_context,
tenant_id=tenant_id,
as_admin=as_admin,
**kwargs)
def _create_port(self, fmt, name, admin_state_up,
arg_list=None, set_context=False, tenant_id=None,
arg_list=None, tenant_id=None, is_admin=False,
**kwargs):
new_arg_list = ('dns_name',)
if arg_list is not None:
@ -4542,8 +4545,8 @@ class L3NatDBFloatingIpTestCaseWithDNS(L3BaseForSepTests, L3NatTestCaseMixin):
return super(L3NatDBFloatingIpTestCaseWithDNS,
self)._create_port(fmt, name, admin_state_up,
arg_list=new_arg_list,
set_context=set_context,
tenant_id=tenant_id,
is_admin=is_admin,
**kwargs)
def _create_net_sub_port(self, dns_domain='', dns_name=''):

View File

@ -19,7 +19,6 @@ from webob import exc
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_conntrack_helper as l3_ct
from neutron_lib import context
from oslo_utils import uuidutils
from neutron.extensions import l3
@ -67,19 +66,16 @@ class L3NConntrackHelperTestCase(test_l3.L3BaseForIntTests,
def _create_router_conntrack_helper(self, fmt, router_id,
protocol, port, helper):
tenant_id = self.tenant_id or _uuid()
data = {'conntrack_helper': {
"protocol": protocol,
"port": port,
"helper": helper}
}
router_ct_req = self._req(
'POST', 'routers', data,
router_ct_req = self.new_create_request(
'routers', data,
fmt or self.fmt, id=router_id,
subresource='conntrack_helpers')
router_ct_req.environ['neutron.context'] = context.Context(
'', tenant_id, is_admin=True)
subresource='conntrack_helpers',
as_admin=True)
return router_ct_req.get_response(self.ext_api)
@ -90,11 +86,10 @@ class L3NConntrackHelperTestCase(test_l3.L3BaseForIntTests,
conntrack_helper[k] = v
data = {'conntrack_helper': conntrack_helper}
router_ct_req = self._req(
'PUT', 'routers', data,
fmt or self.fmt, id=router_id,
sub_id=conntrack_helper_id,
subresource='conntrack_helpers')
router_ct_req = self.new_update_request(
'routers', data, router_id,
fmt or self.fmt, sub_id=conntrack_helper_id,
subresource='conntrack_helpers', as_admin=True)
return router_ct_req.get_response(self.ext_api)
def test_create_ct_with_duplicate_entry(self):

View File

@ -380,7 +380,7 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
neutron_context=None):
tenant_id=None, as_admin=False):
ext_gw_info = {'network_id': network_id}
# Need to set enable_snat also if snat_enabled == False
if snat_enabled is not None:
@ -389,7 +389,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
neutron_context=neutron_context)
request_tenant_id=tenant_id,
as_admin=as_admin)
def test_router_gateway_set_fail_after_port_create(self):
with self.router() as r, self.subnet() as s:
@ -444,7 +445,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
res = self._show('routers', router['router']['id'],
tenant_id=tenant_id)
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
@ -468,8 +470,10 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'subnet_id': s['subnet']['id']}]})]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id,
external_gateway_info=input_value) as router:
res = self._show('routers', router['router']['id'])
external_gateway_info=input_value,
as_admin=True) as router:
res = self._show('routers', router['router']['id'],
tenant_id=tenant_id)
for k, v in expected_value:
self.assertEqual(v, res['router'][k])
@ -493,7 +497,8 @@ class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
expected_code=expected_http_code)
expected_code=expected_http_code,
as_admin=True)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])

View File

@ -22,7 +22,6 @@ from neutron_lib.api.definitions import external_net as enet_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_ext_gw_mode
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import fixture
from oslo_config import cfg
from oslo_utils import uuidutils
@ -66,7 +65,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
test_l3.L3BaseForIntTests,
test_l3.L3NatTestCaseMixin):
fmt = 'json'
tenant_id = _uuid()
_tenant_id = _uuid()
def setUp(self):
mock.patch('neutron.api.rpc.handlers.resources_rpc.'
@ -81,11 +80,11 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
self.address_scope_id = self._make_address_scope(
self.fmt, constants.IP_VERSION_6,
**{'tenant_id': self.tenant_id})['address_scope']['id']
**{'tenant_id': self._tenant_id})['address_scope']['id']
self.subnetpool_id = self._make_subnetpool(
self.fmt, ['2001::0/96'],
**{'address_scope_id': self.address_scope_id,
'default_prefixlen': 112, 'tenant_id': self.tenant_id,
'default_prefixlen': 112,
'name': "test-ipv6-pool"})['subnetpool']['id']
self.ext_net = self._make_network(
self.fmt, 'ext-net', True)
@ -103,7 +102,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
self._ext_subnet_v6_id = self._ext_subnet_v6['subnet']['id']
self.router1 = self._make_router(self.fmt, self.tenant_id)
self.router1 = self._make_router(self.fmt, self._tenant_id)
self.router1_id = self.router1['router']['id']
self.private_net = self._make_network(self.fmt, 'private-net', True)
self.private_subnet = self._make_subnet(
@ -125,7 +124,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
description=None, fmt=None, tenant_id=None,
expected_code=exc.HTTPCreated.code,
expected_message=None):
tenant_id = tenant_id or self.tenant_id
tenant_id = tenant_id or self._tenant_id
data = {'ndp_proxy': {
"port_id": port_id,
"router_id": router_id}
@ -135,11 +134,9 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
if description:
data['ndp_proxy']['description'] = description
req_res = self._req(
'POST', 'ndp-proxies', data,
fmt or self.fmt)
req_res.environ['neutron.context'] = context.Context(
'', tenant_id, is_admin=True)
req_res = self.new_create_request(
'ndp-proxies', data, fmt or self.fmt,
tenant_id=tenant_id, as_admin=True)
res = req_res.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
@ -152,15 +149,14 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
tenant_id=None, fmt=None,
expected_code=exc.HTTPOk.code,
expected_message=None, **kwargs):
tenant_id = tenant_id or self.tenant_id
tenant_id = tenant_id or self._tenant_id
data = {}
for k, v in kwargs.items():
data[k] = v
req_res = self._req(
'PUT', 'ndp-proxies', {'ndp_proxy': data},
fmt or self.fmt, id=ndp_proxy_id)
req_res.environ['neutron.context'] = context.Context(
'', tenant_id, is_admin=True)
req_res = self.new_update_request(
'ndp-proxies', {'ndp_proxy': data},
ndp_proxy_id, fmt or self.fmt,
tenant_id=tenant_id, as_admin=True)
res = req_res.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:
@ -208,13 +204,12 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def _update_router(self, router_id, update_date, tenant_id=None,
fmt=None, expected_code=exc.HTTPOk.code,
expected_message=None):
tenant_id = tenant_id or self.tenant_id
tenant_id = tenant_id or self._tenant_id
data = {'router': update_date}
router_req = self.new_update_request(
'routers', id=router_id, data=data,
fmt=(fmt or self.fmt))
router_req.environ['neutron.context'] = context.Context(
'', tenant_id, is_admin=True)
fmt=(fmt or self.fmt),
tenant_id=tenant_id, as_admin=True)
res = router_req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:
@ -275,7 +270,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
ipv6_address_mode=constants.DHCPV6_STATEFUL):
self._set_net_external(ext_net['network']['id'])
res = self._make_router(
self.fmt, self.tenant_id,
self.fmt, self._tenant_id,
external_gateway_info={'network_id': ext_net['network']['id']},
**{'enable_ndp_proxy': True})
expected_msg = (
@ -284,7 +279,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
"scope.") % ext_net['network']['id']
self.assertTrue(expected_msg in res['NeutronError']['message'])
router = self._make_router(
self.fmt, self.tenant_id,
self.fmt, self._tenant_id,
external_gateway_info={'network_id': ext_net['network']['id']})
expected_msg = (
"Can not enable ndp proxy on router %s, The router has no "
@ -473,18 +468,18 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def test_create_ndp_proxy_with_different_address_scope(self):
with self.address_scope(
ip_version=constants.IP_VERSION_6,
tenant_id=self.tenant_id) as addr_scope, \
tenant_id=self._tenant_id) as addr_scope, \
self.subnetpool(['2001::100:0:0/100'],
**{'address_scope_id': addr_scope['address_scope']['id'],
'default_prefixlen': 112, 'name': 'test1',
'tenant_id': self.tenant_id}) as subnetpool, \
'tenant_id': self._tenant_id}) as subnetpool, \
self.subnet(
cidr='2001::100:1:0/112',
ip_version=constants.IP_VERSION_6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL,
subnetpool_id=subnetpool['subnetpool']['id'],
tenant_id=self.tenant_id) as subnet, \
tenant_id=self._tenant_id) as subnet, \
self.port(subnet) as port:
subnet_id = subnet['subnet']['id']
port_id = port['port']['id']
@ -503,9 +498,7 @@ class L3NDPProxyTestCase(test_address_scope.AddressScopeTestCase,
def _create_router(self, data, expected_code=exc.HTTPCreated.code,
expected_message=None):
router_req = self.new_create_request(
'routers', data, self.fmt)
router_req.environ['neutron.context'] = context.Context(
'', self.tenant_id, is_admin=True)
'routers', data, self.fmt, as_admin=True)
res = router_req.get_response(self.ext_api)
self.assertEqual(expected_code, res.status_int)
if expected_message:

View File

@ -19,7 +19,6 @@ from unittest import mock
import netaddr
from neutron_lib.api.definitions import local_ip as apidef
from neutron_lib import constants
from neutron_lib import context
import webob.exc
from neutron.extensions import local_ip as lip_ext
@ -46,10 +45,8 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
for k, v in kwargs.items():
local_ip['local_ip'][k] = v
req = self.new_create_request('local-ips', local_ip)
neutron_context = context.Context(
'', kwargs.get('project_id', self._tenant_id), is_admin=True)
req.environ['neutron.context'] = neutron_context
req = self.new_create_request('local-ips', local_ip,
tenant_id=self._tenant_id, as_admin=True)
res = req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@ -57,9 +54,7 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _update_local_ip(self, lip_id, data):
update_req = self.new_update_request(
'local-ips', data, lip_id)
update_req.environ['neutron.context'] = context.Context(
'', self._tenant_id)
'local-ips', data, lip_id, tenant_id=self._tenant_id)
res = update_req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@ -73,9 +68,8 @@ class LocalIPTestBase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
req = self.new_create_request('local_ips',
data=local_ip_assoc,
id=local_ip_id,
subresource='port_associations')
neutron_context = context.Context('', self._tenant_id)
req.environ['neutron.context'] = neutron_context
subresource='port_associations',
tenant_id=self._tenant_id)
res = req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)

View File

@ -65,7 +65,9 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# list by query fields: total_ips
params = 'fields=total_ips'
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE,
params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -83,7 +85,8 @@ class TestNetworkIPAvailabilityAPI(
params = ['total_ips']
request = self.new_show_request(API_RESOURCE,
network['id'],
fields=params)
fields=params,
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@ -103,7 +106,9 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
network = net['network']
# Get ALL
request = self.new_list_request(API_RESOURCE, self.fmt)
request = self.new_list_request(API_RESOURCE,
self.fmt,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -112,7 +117,8 @@ class TestNetworkIPAvailabilityAPI(
net, 0)
# Get single via id
request = self.new_show_request(API_RESOURCE, network['id'])
request = self.new_show_request(API_RESOURCE, network['id'],
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@ -134,7 +140,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=subnet3_1):
# Test get ALL
request = self.new_list_request(API_RESOURCE)
request = self.new_list_request(API_RESOURCE,
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -148,7 +155,8 @@ class TestNetworkIPAvailabilityAPI(
# Test get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
network['id'])
network['id'],
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@ -165,7 +173,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=subnet1_2),\
self.port(subnet=subnet1_2):
# Get ALL
request = self.new_list_request(API_RESOURCE)
request = self.new_list_request(API_RESOURCE,
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -176,7 +185,8 @@ class TestNetworkIPAvailabilityAPI(
# Get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
network['id'])
network['id'],
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
@ -186,7 +196,8 @@ class TestNetworkIPAvailabilityAPI(
def test_usages_port_consumed_v4(self):
with self.network() as net:
with self.subnet(network=net) as subnet:
request = self.new_list_request(API_RESOURCE)
request = self.new_list_request(API_RESOURCE,
as_admin=True)
# Consume 2 ports
with self.port(subnet=subnet), self.port(subnet=subnet):
response = self.deserialize(self.fmt,
@ -200,7 +211,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get IPv4
params = 'ip_version=%s' % constants.IP_VERSION_4
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -210,7 +222,8 @@ class TestNetworkIPAvailabilityAPI(
# Get IPv6 should return empty array
params = 'ip_version=%s' % constants.IP_VERSION_6
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@ -225,7 +238,8 @@ class TestNetworkIPAvailabilityAPI(
ipv6_address_mode=constants.DHCPV6_STATELESS):
# Get IPv6
params = 'ip_version=%s' % constants.IP_VERSION_6
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
@ -234,7 +248,8 @@ class TestNetworkIPAvailabilityAPI(
# Get IPv4 should return empty array
params = 'ip_version=%s' % constants.IP_VERSION_4
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@ -247,7 +262,8 @@ class TestNetworkIPAvailabilityAPI(
network=net, cidr=cidr_ipv6,
ip_version=constants.IP_VERSION_6,
ipv6_address_mode=constants.DHCPV6_STATELESS) as subnet:
request = self.new_list_request(API_RESOURCE)
request = self.new_list_request(API_RESOURCE,
as_admin=True)
# Consume 3 ports
with self.port(subnet=subnet),\
self.port(subnet=subnet), \
@ -266,7 +282,8 @@ class TestNetworkIPAvailabilityAPI(
test_id = network['id']
# Get by query param: network_id
params = 'network_id=%s' % test_id
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -276,7 +293,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: network_id
params = 'network_id=clearlywontmatch'
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@ -287,7 +305,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: network_name
params = 'network_name=%s' % test_name
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -297,7 +316,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: network_name
params = 'network_name=clearly-wont-match'
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@ -308,7 +328,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: tenant_id
params = 'tenant_id=%s' % test_tenant_id
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -320,7 +341,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: tenant_id
params = 'tenant_id=clearly-wont-match'
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@ -331,7 +353,8 @@ class TestNetworkIPAvailabilityAPI(
with self.subnet(network=net):
# Get by query param: project_id
params = 'project_id=%s' % test_project_id
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
@ -343,7 +366,8 @@ class TestNetworkIPAvailabilityAPI(
# Get by NON-matching query param: project_id
params = 'project_id=clearly-wont-match'
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
@ -369,7 +393,8 @@ class TestNetworkIPAvailabilityAPI(
self.port(subnet=s42), self.port(subnet=s42):
# Verify consumption across all
request = self.new_list_request(API_RESOURCE)
request = self.new_list_request(API_RESOURCE,
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
avails_list = response[IP_AVAILS_KEY]
@ -387,7 +412,8 @@ class TestNetworkIPAvailabilityAPI(
constants.IP_VERSION_6]:
params = 'ip_version=%i' % ip_ver
request = self.new_list_request(API_RESOURCE,
params=params)
params=params,
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
for net_avail in response[IP_AVAILS_KEY]:
@ -399,7 +425,8 @@ class TestNetworkIPAvailabilityAPI(
API_RESOURCE,
params='network_id=%s&network_id=%s'
% (net_v4_2['network']['id'],
net_v6_2['network']['id']))
net_v6_2['network']['id']),
as_admin=True)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
avails_list = response[IP_AVAILS_KEY]
@ -414,7 +441,8 @@ class TestNetworkIPAvailabilityAPI(
networks = (net1, net2, net3, net4)
for idx in range(1, len(networks) + 1):
params = 'limit=%s' % idx
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(idx, len(response[IP_AVAILS_KEY]))
@ -426,14 +454,16 @@ class TestNetworkIPAvailabilityAPI(
network_ids = sorted([net['network']['id'] for net in networks])
params = 'sort_key=network_id;sort_dir=asc'
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
res = [net['network_id'] for net in response[IP_AVAILS_KEY]]
self.assertEqual(network_ids, res)
params = 'sort_key=network_id;sort_dir=desc'
request = self.new_list_request(API_RESOURCE, params=params)
request = self.new_list_request(API_RESOURCE, params=params,
as_admin=True)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
res = [net['network_id'] for net in response[IP_AVAILS_KEY]]

View File

@ -56,7 +56,8 @@ class NetworkSegmentRangeTestBase(test_db_base_plugin_v2.
network_segment_range['network_segment_range'][k] = str(v)
network_segment_range_req = self.new_create_request(
'network-segment-ranges', network_segment_range, fmt)
'network-segment-ranges', network_segment_range, fmt,
as_admin=True)
network_segment_range_res = network_segment_range_req.get_response(
self.ext_api)
@ -84,7 +85,7 @@ class NetworkSegmentRangeTestBase(test_db_base_plugin_v2.
def _test_update_network_segment_range(self, range_id,
data, expected=None):
update_req = self.new_update_request(
'network-segment-ranges', data, range_id)
'network-segment-ranges', data, range_id, as_admin=True)
update_res = update_req.get_response(self.ext_api)
if expected:
@ -264,7 +265,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'name': 'foo-name'}},
expected_code=webob.exc.HTTPOk.code)
expected_code=webob.exc.HTTPOk.code,
as_admin=True)
self.assertEqual('foo-name',
result['network_segment_range']['name'])
@ -277,7 +279,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'name': ''}},
expected_code=webob.exc.HTTPOk.code)
expected_code=webob.exc.HTTPOk.code,
as_admin=True)
self.assertEqual('', result['network_segment_range']['name'])
def test_update_network_segment_range_min_max(self):
@ -288,7 +291,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'minimum': 1200, 'maximum': 1300}},
expected_code=webob.exc.HTTPOk.code)
expected_code=webob.exc.HTTPOk.code,
as_admin=True)
self.assertEqual(1200, result['network_segment_range']['minimum'])
self.assertEqual(1300, result['network_segment_range']['maximum'])
@ -296,7 +300,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
network_segment_range = self._test_create_network_segment_range()
req = self.new_show_request(
'network-segment-ranges',
network_segment_range['network_segment_range']['id'])
network_segment_range['network_segment_range']['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(
network_segment_range['network_segment_range']['id'],
@ -306,7 +311,7 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_create_network_segment_range(name='foo-range1')
self._test_create_network_segment_range(
name='foo-range2', minimum=400, maximum=500)
res = self._list('network-segment-ranges')
res = self._list('network-segment-ranges', as_admin=True)
self.assertEqual(2, len(res['network_segment_ranges']))
def test_list_network_segment_ranges_with_sort(self):
@ -316,7 +321,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
name='foo-range2', physical_network='phys_net2')
self._test_list_with_sort('network-segment-range',
(range2, range1),
[('name', 'desc')])
[('name', 'desc')],
as_admin=True)
def test_list_network_segment_ranges_with_pagination(self):
range1 = self._test_create_network_segment_range(
@ -328,7 +334,8 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_list_with_pagination(
'network-segment-range',
(range1, range2, range3),
('name', 'asc'), 2, 2)
('name', 'asc'), 2, 2,
as_admin=True)
def test_list_network_segment_ranges_with_pagination_reverse(self):
range1 = self._test_create_network_segment_range(
@ -340,14 +347,17 @@ class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
self._test_list_with_pagination_reverse(
'network-segment-range',
(range1, range2, range3),
('name', 'asc'), 2, 2)
('name', 'asc'), 2, 2,
as_admin=True)
def test_delete_network_segment_range(self):
network_segment_range = self._test_create_network_segment_range()
with mock.patch.object(segments_db, 'network_segments_exist_in_range',
return_value=False):
self._delete('network-segment-ranges',
network_segment_range['network_segment_range']['id'])
network_segment_range['network_segment_range']['id'],
as_admin=True)
self._show('network-segment-ranges',
network_segment_range['network_segment_range']['id'],
expected_code=webob.exc.HTTPNotFound.code)
expected_code=webob.exc.HTTPNotFound.code,
as_admin=True)

View File

@ -18,7 +18,6 @@ from unittest import mock
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api import validators
from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.db import utils as db_utils
from neutron_lib.exceptions import port_security as psec_exc
@ -311,11 +310,11 @@ class TestPortSecurity(PortSecurityDBTestCase):
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
set_context=True,
tenant_id='admin_tenant',
port_security_enabled=False)
net = self.deserialize('json', res)
self._create_subnet('json', net['network']['id'], '10.0.0.0/24')
self._create_subnet('json', net['network']['id'], '10.0.0.0/24',
tenant_id='admin_tenant')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf',
tenant_id='other_tenant'))
@ -323,7 +322,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
set_context=True,
is_admin=True,
tenant_id='admin_tenant',
port_security_enabled=True,
@ -331,19 +329,18 @@ class TestPortSecurity(PortSecurityDBTestCase):
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
self.assertEqual(port['port']['security_groups'], [security_group_id])
self._delete('ports', port['port']['id'])
self._delete('ports', port['port']['id'], tenant_id='admin_tenant')
def test_create_port_with_no_admin_use_other_tenant_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
set_context=True,
tenant_id='demo_tenant',
port_security_enabled=False)
net = self.deserialize('json', res)
self._create_subnet('json', net['network']['id'], '10.0.0.0/24',
set_context=True, tenant_id='demo_tenant')
tenant_id='demo_tenant')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf',
tenant_id='other_tenant'))
@ -351,7 +348,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
set_context=True,
tenant_id='demo_tenant',
port_security_enabled=True,
security_groups=[security_group_id])
@ -396,7 +392,7 @@ class TestPortSecurity(PortSecurityDBTestCase):
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
set_context=True, is_admin=True,
is_admin=True,
tenant_id='admin_tenant',)
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
@ -408,7 +404,9 @@ class TestPortSecurity(PortSecurityDBTestCase):
update_port = {'port':
{'security_groups': [security_group_id]}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port['port']['id'],
tenant_id='admin_tenant',
as_admin=True)
port = self.deserialize('json', req.get_response(self.api))
security_groups = port['port']['security_groups']
self.assertIn(security_group_id, security_groups)
@ -420,7 +418,6 @@ class TestPortSecurity(PortSecurityDBTestCase):
with self.network(tenant_id='demo_tenant') as net:
with self.subnet(network=net, tenant_id='demo_tenant'):
res = self._create_port('json', net['network']['id'],
set_context=True,
tenant_id='demo_tenant',)
port = self.deserialize('json', res)
self.assertTrue(port['port'][psec.PORTSECURITY])
@ -432,9 +429,8 @@ class TestPortSecurity(PortSecurityDBTestCase):
update_port = {'port':
{'security_groups': [security_group_id]}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
req.environ['neutron.context'] = context.Context(
'', 'other_tenant')
port['port']['id'],
tenant_id='other_tenant')
res = req.get_response(self.api)
self.assertEqual(404, res.status_int)
@ -490,29 +486,26 @@ class TestPortSecurity(PortSecurityDBTestCase):
self._delete('ports', port['port']['id'])
def test_create_port_security_off_shared_network(self):
with self.network(shared=True) as net:
with self.network(as_admin=True, shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=False,
tenant_id='not_network_owner',
set_context=True)
tenant_id='not_network_owner')
self.deserialize('json', res)
self.assertEqual(403, res.status_int)
def test_update_port_security_off_shared_network(self):
with self.network(shared=True) as net:
with self.network(as_admin=True, shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
tenant_id='not_network_owner',
set_context=True)
tenant_id='not_network_owner')
port = self.deserialize('json', res)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
req.environ['neutron.context'] = context.Context(
'', 'not_network_owner')
port['port']['id'],
tenant_id='not_network_owner')
res = req.get_response(self.api)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)

View File

@ -92,21 +92,28 @@ class ProvidernetExtensionTestCase(testlib_api.WebTestCase):
def _put_network_with_provider_attrs(self, ctx, expect_errors=False):
data = self._prepare_net_data()
ctx.roles = ['member', 'reader']
if ctx.is_admin:
ctx.roles.append('admin')
env = {'neutron.context': ctx}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': ctx.tenant_id,
instance.get_network.return_value = {'project_id': ctx.tenant_id,
'shared': False}
net_id = uuidutils.generate_uuid()
res = self.api.put(test_base._get_path('networks',
id=net_id,
fmt=self.fmt),
self.serialize({'network': data}),
content_type='application/' + self.fmt,
extra_environ=env,
expect_errors=expect_errors)
return res, data, net_id
def _post_network_with_provider_attrs(self, ctx, expect_errors=False):
data = self._prepare_net_data()
ctx.roles = ['member', 'reader']
if ctx.is_admin:
ctx.roles.append('admin')
env = {'neutron.context': ctx}
res = self.api.post(test_base._get_path('networks', fmt=self.fmt),
self.serialize({'network': data}),
@ -119,6 +126,9 @@ class ProvidernetExtensionTestCase(testlib_api.WebTestCase):
expect_errors=False):
data = self._prepare_net_data()
data.update(bad_data)
ctx.roles = ['member', 'reader']
if ctx.is_admin:
ctx.roles.append('admin')
env = {'neutron.context': ctx}
res = self.api.post(test_base._get_path('networks', fmt=self.fmt),
self.serialize({'network': data}),

View File

@ -69,7 +69,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
project_id='tenant', name='pol1',
project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@ -88,7 +88,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
project_id='tenant', name='pol1',
project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@ -115,7 +115,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
project_id='tenant', name='pol1',
project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\
@ -153,7 +153,7 @@ class GatewayIPQoSDBTestCaseBase(object):
ctx = context.get_admin_context()
policy_obj = policy.QosPolicy(ctx,
id=uuidutils.generate_uuid(),
project_id='tenant', name='pol1',
project_id=self._tenant_id, name='pol1',
rules=[])
policy_obj.create()
with self.subnet(cidr='11.0.0.0/24') as public_sub,\

View File

@ -120,8 +120,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_with_admin(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=True)}
env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@ -137,8 +136,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_with_owner_project(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=False)}
env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@ -154,8 +152,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_default_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=False)}
env = test_base._get_neutron_env(project_id + '2', as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
@ -164,8 +161,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_quotas_with_admin(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=True)}
env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@ -179,16 +175,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_show_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=False)}
env = test_base._get_neutron_env(project_id + '2', as_admin=False)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_project(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=False)}
env = test_base._get_neutron_env(project_id, as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@ -202,8 +196,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_list_quotas_with_admin(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=True)}
env = test_base._get_neutron_env(project_id, as_admin=True)
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@ -212,16 +205,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_list_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=False)}
env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=False)}
env = test_base._get_neutron_env(project_id, as_admin=False)
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@ -230,8 +221,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_non_integer_returns_400(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=True)}
env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@ -240,8 +230,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_negative_integer_returns_400(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=True)}
env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@ -250,8 +239,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_out_of_range_integer_returns_400(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=True)}
env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@ -260,8 +248,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_to_unlimited(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=True)}
env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@ -270,8 +257,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_exceeding_current_limit(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=True)}
env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@ -280,8 +266,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_non_support_resource_returns_400(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=True)}
env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@ -290,8 +275,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_quotas_with_admin(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=True)}
env = test_base._get_neutron_env(project_id + '2', as_admin=True)
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
@ -306,8 +290,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_update_attributes(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=True)}
env = test_base._get_neutron_env(project_id + '2', as_admin=True)
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
@ -321,8 +304,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
@mock.patch.object(driver_nolock.DbQuotaNoLockDriver, 'get_resource_usage')
def test_update_quotas_check_limit(self, mock_get_resource_usage):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
env = test_base._get_neutron_env(tenant_id, as_admin=True)
quotas = {'quota': {'network': 100, 'check_limit': False}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
@ -338,8 +320,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_delete_quotas_with_admin(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=True)}
env = test_base._get_neutron_env(project_id + '2', as_admin=True)
# Create a quota to ensure we have something to delete
quotas = {'quota': {'network': 100}}
self.api.put(_get_path('quotas', id=project_id, fmt=self.fmt),
@ -350,16 +331,14 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_delete_quotas_without_admin_forbidden_returns_403(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=False)}
env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quota_with_unknown_project_returns_404(self):
project_id = 'idnotexist'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=True)}
env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(exc.HTTPNotFound.code, res.status_int)
@ -373,8 +352,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
def test_quotas_limit_check(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=True)}
env = test_base._get_neutron_env(project_id, as_admin=True)
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=project_id,
fmt=self.fmt),
@ -465,8 +443,7 @@ class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
def test_show_quotas_with_admin(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id + '2',
is_admin=True)}
env = test_base._get_neutron_env(project_id + '2', as_admin=True)
res = self.api.get(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
@ -489,8 +466,7 @@ class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
def test_delete_quotas_forbidden(self):
project_id = 'project_id1'
env = {'neutron.context': context.Context('', project_id,
is_admin=False)}
env = test_base._get_neutron_env(project_id, as_admin=False)
res = self.api.delete(_get_path('quotas', id=project_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)

View File

@ -92,41 +92,39 @@ class SecurityGroupTestExtensionManager(object):
class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _build_security_group(self, name, description, **kwargs):
def _build_security_group(self, name, description):
data = {
'security_group': {
'name': name,
'tenant_id': kwargs.get(
'tenant_id', test_db_base_plugin_v2.TEST_TENANT_ID),
'description': description}}
return data
def _create_security_group_response(self, fmt, data, **kwargs):
security_group_req = self.new_create_request('security-groups', data,
fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
def _create_security_group_response(self, fmt, data, tenant_id=None,
as_admin=False, **kwargs):
security_group_req = self.new_create_request(
'security-groups', data, fmt, tenant_id=tenant_id,
as_admin=as_admin)
return security_group_req.get_response(self.ext_api)
def _create_security_group(self, fmt, name, description, **kwargs):
data = self._build_security_group(name, description, **kwargs)
return self._create_security_group_response(fmt, data, **kwargs)
def _create_security_group(self, fmt, name, description, tenant_id=None,
as_admin=False, **kwargs):
data = self._build_security_group(name, description)
return self._create_security_group_response(
fmt, data, tenant_id=tenant_id, as_admin=as_admin, **kwargs)
def _build_security_group_rule(
self, security_group_id, direction, proto,
port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
remote_address_group_id=None,
tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID,
ethertype=const.IPv4):
tenant_id=None,
ethertype=const.IPv4,
as_admin=False):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': proto,
'ethertype': ethertype,
'tenant_id': tenant_id}}
'ethertype': ethertype}}
if port_range_min:
data['security_group_rule']['port_range_min'] = port_range_min
@ -145,19 +143,13 @@ class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
return data
def _create_security_group_rule(self, fmt, rules, **kwargs):
def _create_security_group_rule(self, fmt, rules, tenant_id=None,
as_admin=False, **kwargs):
security_group_rule_req = self.new_create_request(
'security-group-rules', rules, fmt)
'security-group-rules', rules, fmt, tenant_id=tenant_id,
as_admin=as_admin)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_rule_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
elif kwargs.get('admin_context'):
security_group_rule_req.environ['neutron.context'] = (
context.Context(user_id='admin', tenant_id='admin-tenant',
is_admin=True))
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
@ -166,8 +158,10 @@ class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_security_group_rule(self, fmt, rules, **kwargs):
res = self._create_security_group_rule(self.fmt, rules)
def _make_security_group_rule(self, fmt, rules, tenant_id=None,
as_admin=False, **kwargs):
res = self._create_security_group_rule(
self.fmt, rules, tenant_id=tenant_id, as_admin=as_admin)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@ -819,9 +813,10 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
sg['security_group']['id'], "ingress", const.PROTO_NAME_TCP,
port_range_min=22, port_range_max=22,
remote_ip_prefix="10.0.2.0/24",
ethertype=const.IPv4,
tenant_id='admin-tenant')
self._make_security_group_rule(self.fmt, rule, admin_context=True)
ethertype=const.IPv4)
self._make_security_group_rule(self.fmt, rule,
tenant_id='admin-tenant',
as_admin=True)
# Now, let's make sure all the rules are there, with their odd
# tenant_id behavior.
@ -878,23 +873,20 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPNoContent.code)
webob.exc.HTTPNoContent.code, as_admin=True)
def test_delete_default_security_group_nonadmin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
neutron_context = context.Context(
'', test_db_base_plugin_v2.TEST_TENANT_ID)
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPConflict.code,
neutron_context=neutron_context)
tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID)
def test_security_group_list_creates_default_security_group(self):
neutron_context = context.Context(
'', test_db_base_plugin_v2.TEST_TENANT_ID)
sg = self._list('security-groups',
neutron_context=neutron_context).get('security_groups')
tenant_id=test_db_base_plugin_v2.TEST_TENANT_ID).get(
'security_groups')
self.assertEqual(1, len(sg))
def test_security_group_port_create_creates_default_security_group(self):
@ -2112,11 +2104,13 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', const.PROTO_NUM_TCP)
rule['security_group_rule'].update({'id': specified_id,
rule['security_group_rule'].update({
'id': specified_id,
'port_range_min': None,
'port_range_max': None,
'remote_ip_prefix': None,
'remote_group_id': None,
'tenant_id': test_db_base_plugin_v2.TEST_TENANT_ID,
'remote_address_group_id':
None})
result = self.plugin.create_security_group_rule(

View File

@ -114,7 +114,7 @@ class SegmentTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
segment['segment'][k] = None if v is None else str(v)
segment_req = self.new_create_request(
'segments', segment, fmt)
'segments', segment, fmt, as_admin=True)
segment_res = segment_req.get_response(self.ext_api)
if expected_res_status:
@ -189,7 +189,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'name': 'Segment name'}},
expected_code=webob.exc.HTTPOk.code)
expected_code=webob.exc.HTTPOk.code,
as_admin=True)
self.assertEqual('Segment name', result['segment']['name'])
def test_update_segment_set_description(self):
@ -197,7 +198,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'description': 'Segment desc'}},
expected_code=webob.exc.HTTPOk.code)
expected_code=webob.exc.HTTPOk.code,
as_admin=True)
self.assertEqual('Segment desc', result['segment']['description'])
def test_update_segment_set_name_to_none(self):
@ -206,7 +208,8 @@ class TestSegmentNameDescription(SegmentTestCase):
result = self._update('segments',
segment['segment']['id'],
{'segment': {'name': None}},
expected_code=webob.exc.HTTPOk.code)
expected_code=webob.exc.HTTPOk.code,
as_admin=True)
self.assertIsNone(result['segment']['name'])
def test_update_segment_set_description_to_none(self):
@ -273,7 +276,8 @@ class TestSegment(SegmentTestCase):
with self.network() as network:
network = network['network']
local_segment = self._list('segments')['segments'][0]
local_segment = self._list('segments',
as_admin=True)['segments'][0]
with mock.patch.object(registry, 'publish') as publish:
publish.side_effect = exceptions.CallbackFailure(errors=Exception)
self.assertRaises(webob.exc.HTTPClientError,
@ -312,7 +316,7 @@ class TestSegment(SegmentTestCase):
physical_network='physnet0')
segment = self.segment(network_id=network['id'], segmentation_id=201,
physical_network='physnet1')
self._delete('segments', segment['segment']['id'])
self._delete('segments', segment['segment']['id'], as_admin=True)
self._show('segments', segment['segment']['id'],
expected_code=webob.exc.HTTPNotFound.code)
@ -326,8 +330,10 @@ class TestSegment(SegmentTestCase):
segment_id = segment['segment']['id']
with self.subnet(network=network, segment_id=segment_id):
self._delete('segments', segment_id,
expected_code=webob.exc.HTTPConflict.code)
exist_segment = self._show('segments', segment_id)
expected_code=webob.exc.HTTPConflict.code,
as_admin=True)
exist_segment = self._show('segments', segment_id,
as_admin=True)
self.assertEqual(segment_id, exist_segment['segment']['id'])
def test_get_segment(self):
@ -336,7 +342,8 @@ class TestSegment(SegmentTestCase):
segment = self._test_create_segment(network_id=network['id'],
physical_network='physnet',
segmentation_id=200)
req = self.new_show_request('segments', segment['segment']['id'])
req = self.new_show_request('segments', segment['segment']['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(segment['segment']['id'], res['segment']['id'])
@ -349,14 +356,15 @@ class TestSegment(SegmentTestCase):
self._test_create_segment(network_id=network['id'],
physical_network='physnet2',
segmentation_id=201)
res = self._list('segments')
res = self._list('segments', as_admin=True)
self.assertEqual(3, len(res['segments']))
def test_list_segments_with_sort(self):
with self.network() as network:
network = network['network']
local_segment = {'segment': self._list('segments')['segments'][0]}
local_segment = {'segment': self._list('segments',
as_admin=True)['segments'][0]}
s1 = self._test_create_segment(network_id=network['id'],
physical_network='physnet1',
segmentation_id=200)
@ -366,13 +374,15 @@ class TestSegment(SegmentTestCase):
self._test_list_with_sort('segment',
(s2, s1, local_segment),
[('physical_network', 'desc')],
query_params='network_id=%s' % network['id'])
query_params='network_id=%s' % network['id'],
as_admin=True)
def test_list_segments_with_pagination(self):
with self.network() as network:
network = network['network']
local_segment = {'segment': self._list('segments')['segments'][0]}
local_segment = {'segment': self._list('segments',
as_admin=True)['segments'][0]}
s1 = self._test_create_segment(network_id=network['id'],
physical_network='physnet0',
segmentation_id=200)
@ -386,7 +396,8 @@ class TestSegment(SegmentTestCase):
'segment',
(local_segment, s1, s2, s3),
('physical_network', 'asc'), 3, 2,
query_params='network_id=%s' % network['id'])
query_params='network_id=%s' % network['id'],
as_admin=True)
def test_list_segments_with_pagination_reverse(self):
with self.network() as network:
@ -405,7 +416,8 @@ class TestSegment(SegmentTestCase):
'segment',
(s1, s2, s3),
('physical_network', 'asc'), 2, 2,
query_params='network_id=%s' % network['id'])
query_params='network_id=%s' % network['id'],
as_admin=True)
def test_update_segments(self):
with self.network() as network:
@ -456,7 +468,7 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.subnet(network=network, segment_id=segment_id) as subnet:
subnet = subnet['subnet']
request = self.new_show_request('subnets', subnet['id'])
request = self.new_show_request('subnets', subnet['id'], as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
self.assertEqual(segment_id,
@ -556,12 +568,14 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.network() as network:
pass
segment_id = self._list('segments')['segments'][0]['id']
segment_id = self._list('segments',
as_admin=True)['segments'][0]['id']
with self.subnet(network=network, segment_id=None) as subnet:
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment_id}}
request = self.new_update_request('subnets', data, subnet['id'])
request = self.new_update_request('subnets', data, subnet['id'],
as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
@ -582,7 +596,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
request = self.new_update_request('subnets', data, subnet['id'])
request = self.new_update_request('subnets', data, subnet['id'],
as_admin=True)
response = request.get_response(self.api)
res = self.deserialize(self.fmt, response)
@ -604,7 +619,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet = subnet['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
request = self.new_update_request('subnets', data, subnet['id'])
request = self.new_update_request('subnets', data, subnet['id'],
as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@ -627,7 +643,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
subnet2 = subnet2['subnet']
data = {'subnet': {'segment_id': segment1['id']}}
request = self.new_update_request('subnets', data, subnet1['id'])
request = self.new_update_request('subnets', data, subnet1['id'],
as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@ -636,7 +653,7 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
with self.network() as network:
net = network['network']
segment_id = self._list('segments')['segments'][0]['id']
segment_id = self._list('segments', as_admin=True)['segments'][0]['id']
with self.subnet(network=network, segment_id=segment_id) as subnet:
subnet = subnet['subnet']
@ -645,7 +662,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase):
segmentation_id=202)['segment']
data = {'subnet': {'segment_id': segment2['id']}}
request = self.new_update_request('subnets', data, subnet['id'])
request = self.new_update_request('subnets', data, subnet['id'],
as_admin=True)
response = request.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int)
@ -855,7 +873,7 @@ class TestMl2HostSegmentMappingOVS(HostSegmentMappingTestCase):
def test_segment_deletion_removes_host_mapping(self):
host = 'host1'
segment = self._test_one_segment_one_host(host)
self._delete('segments', segment['id'])
self._delete('segments', segment['id'], as_admin=True)
segments_host_db = self._get_segments_for_host(host)
self.assertFalse(segments_host_db)
@ -1021,7 +1039,8 @@ class SegmentAwareIpamTestCase(SegmentTestCase):
segment_id=segment['segment']['id'],
ip_version=ip_version,
cidr=cidr,
allocation_pools=allocation_pools) as subnet:
allocation_pools=allocation_pools,
as_admin=True) as subnet:
self._validate_l2_adjacency(network['network']['id'],
is_adjacent=False)
return subnet
@ -1098,6 +1117,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
fixed_ips=[
{'subnet_id': subnet['subnet']['id']}
])
@ -1125,6 +1145,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@ -1147,6 +1168,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@ -1173,6 +1195,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
@ -1188,6 +1211,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@ -1201,6 +1225,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
res = self.deserialize(self.fmt, response)
@ -1220,6 +1245,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self.deserialize(self.fmt, response)
@ -1282,6 +1308,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
**kwargs)
port = self.deserialize(self.fmt, response)
request = self.new_show_request('ports', port['port']['id'])
@ -1326,6 +1353,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
port = self.deserialize(self.fmt, response)
@ -1362,6 +1390,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
port = self.deserialize(self.fmt, response)
@ -1403,7 +1432,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
def _create_deferred_ip_port(self, network):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'])
tenant_id=network['network']['tenant_id'],
is_admin=True)
port = self.deserialize(self.fmt, response)
ips = port['port']['fixed_ips']
self.assertEqual(0, len(ips))
@ -1423,7 +1453,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
# Port update succeeds and allocates a new IP address.
@ -1441,7 +1472,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
with self.subnet(network=network):
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@ -1457,7 +1489,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
fixed_ips=[])
fixed_ips=[],
is_admin=True)
port = self.deserialize(self.fmt, response)
ips = port['port']['fixed_ips']
self.assertEqual(0, len(ips))
@ -1465,7 +1498,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Create the subnet and try to update the port to get an IP
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@ -1485,7 +1519,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
portbindings.HOST_ID: 'fakehost',
'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@ -1510,7 +1545,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
portbindings.HOST_ID: 'fakehost',
'fixed_ips': []}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, response.status_int)
@ -1528,7 +1564,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)
@ -1551,7 +1588,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the only subnet is on a segment)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
self.deserialize(self.fmt, response)
@ -1599,7 +1637,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Try requesting an IP (but the subnet ran out of ips)
data = {'port': {portbindings.HOST_ID: 'fakehost'}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
res = self.deserialize(self.fmt, response)
@ -1619,6 +1658,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])
@ -1626,7 +1666,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Now, try to update binding to a host on the other segment
data = {'port': {portbindings.HOST_ID: 'fakehost2'}}
port_req = self.new_update_request('ports', data, port['port']['id'])
port_req = self.new_update_request('ports', data, port['port']['id'],
as_admin=True)
response = port_req.get_response(self.api)
# It fails since the IP address isn't compatible with the new segment
@ -1644,6 +1685,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])
@ -1651,7 +1693,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
# Now, try to update binding to another host in same segment
data = {'port': {portbindings.HOST_ID: 'fakehost1'}}
port_req = self.new_update_request('ports', data, port['port']['id'])
port_req = self.new_update_request('ports', data, port['port']['id'],
as_admin=True)
response = port_req.get_response(self.api)
# Since the new host is in the same segment, it succeeds.
@ -1671,7 +1714,8 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
data = {'port': {portbindings.HOST_ID: 'fakehost',
port_apidef.PORT_MAC_ADDRESS: '00:00:00:00:00:01'}}
port_id = port['port']['id']
port_req = self.new_update_request('ports', data, port_id)
port_req = self.new_update_request('ports', data, port_id,
as_admin=True)
response = port_req.get_response(self.api)
# Port update succeeds and allocates a new IP address.
@ -1722,6 +1766,7 @@ class TestSegmentAwareIpam(SegmentAwareIpamTestCase):
response = self._create_port(self.fmt,
net_id=network['network']['id'],
tenant_id=network['network']['tenant_id'],
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost_a'})
res = self.deserialize(self.fmt, response)
@ -1849,7 +1894,8 @@ class TestSegmentAwareIpamML2(TestSegmentAwareIpam):
network, segment, subnet = self._create_test_segment_with_subnet()
self.assertTrue(self.VLAN_MIN <=
segment['segment']['segmentation_id'] <= self.VLAN_MAX)
retrieved_segment = self._show('segments', segment['segment']['id'])
retrieved_segment = self._show('segments', segment['segment']['id'],
as_admin=True)
self.assertEqual(segment['segment']['segmentation_id'],
retrieved_segment['segment']['segmentation_id'])
@ -1975,7 +2021,8 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
def test_update_subnet_association_with_segment(self, cidr='10.0.0.0/24',
allocation_pools=None):
with self.network() as network:
segment_id = self._list('segments')['segments'][0]['id']
segment_id = self._list('segments',
as_admin=True)['segments'][0]['id']
network_id = network['network']['id']
self._setup_host_mappings([(segment_id, 'fakehost')])
@ -1993,9 +2040,11 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
segment_id=None) as subnet:
self._validate_l2_adjacency(network_id, is_adjacent=True)
data = {'subnet': {'segment_id': segment_id}}
self.new_update_request('subnets', data, subnet['subnet']['id'])
self.new_update_request('subnets', data, subnet['subnet']['id'],
as_admin=True)
self.new_update_request(
'subnets', data, subnet['subnet']['id']).get_response(self.api)
'subnets', data, subnet['subnet']['id'],
as_admin=True).get_response(self.api)
self._validate_l2_adjacency(network_id, is_adjacent=False)
self._assert_inventory_creation(segment_id, aggregate, subnet)
@ -2287,7 +2336,8 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
def _create_test_port(self, network_id, tenant_id, subnet, **kwargs):
port = self._make_port(self.fmt, network_id, tenant_id=tenant_id,
arg_list=(portbindings.HOST_ID,), **kwargs)
as_admin=True, arg_list=(portbindings.HOST_ID,),
**kwargs)
self.batch_notifier._notify()
return port
@ -2403,7 +2453,7 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
if compute_owned:
port_data['port']['device_owner'] = (
constants.DEVICE_OWNER_COMPUTE_PREFIX)
self._update('ports', port['port']['id'], port_data)
self._update('ports', port['port']['id'], port_data, as_admin=True)
self.batch_notifier._notify()
self._assert_inventory_update_port(
first_subnet['subnet']['segment_id'], original_inventory,

View File

@ -251,7 +251,8 @@ class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase):
super(ServiceTypeManagerExtTestCase, self).setUp()
def _list_service_providers(self):
return self.api.get(_get_path('service-providers', fmt=self.fmt))
return self.api.get(_get_path('service-providers', fmt=self.fmt),
extra_environ=test_base._get_neutron_env())
def test_list_service_providers(self):
res = self._list_service_providers()

View File

@ -37,7 +37,7 @@ class SubnetOnboardTestsBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
tenant_id = _uuid()
tenant_id = self._tenant_id
scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version,
'shared': shared, 'name': name + '-scope'}
@ -53,7 +53,7 @@ class SubnetOnboardTestsBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
tenant_id = _uuid()
tenant_id = self._tenant_id
pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name,
'address_scope_id': address_scope_id,
'prefixes': prefixes, 'is_default': is_default_pool}

View File

@ -344,13 +344,14 @@ class SubnetServiceTypesExtensionTestCase(
tenant_id=network['tenant_id'],
device_owner=service_type,
arg_list=(portbindings.HOST_ID,),
**{portbindings.HOST_ID: 'fakehost'})
**{portbindings.HOST_ID: 'fakehost'},
is_admin=True)
port = self.deserialize('json', port)['port']
# Update the port's host binding.
data = {'port': {portbindings.HOST_ID: 'fakehost2'}}
# self._update will fail with a MismatchError if the update cannot be
# applied
port = self._update('ports', port['id'], data)
port = self._update('ports', port['id'], data, as_admin=True)
class SubnetServiceTypesExtensionTestCasev6(

View File

@ -36,7 +36,7 @@ class SubnetpoolPrefixOpsTestBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
tenant_id = _uuid()
tenant_id = self._tenant_id
scope_data = {'tenant_id': tenant_id, 'ip_version': ip_version,
'shared': shared, 'name': name + '-scope'}
@ -52,7 +52,7 @@ class SubnetpoolPrefixOpsTestBase(object):
tenant_id = project_id if project_id else kwargs.get(
'tenant_id', None)
if not tenant_id:
tenant_id = _uuid()
tenant_id = self._tenant_id
pool_data = {'tenant_id': tenant_id, 'shared': shared, 'name': name,
'address_scope_id': address_scope_id,
'prefixes': prefixes, 'is_default': is_default_pool}

View File

@ -78,6 +78,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
self._network = self._make_network(self.fmt, 'net1', True,
as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@ -86,6 +87,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '2'}
self._network2 = self._make_network(self.fmt, 'net2', True,
as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@ -94,6 +96,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
net_arg = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'noagent'}
self._network3 = self._make_network(self.fmt, 'net3', True,
as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,),
**net_arg)
@ -299,6 +302,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@ -329,6 +333,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True}
with self.port(subnet=snet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@ -357,6 +362,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network,
enable_dhcp=False) as snet:
with self.port(
is_admin=True,
subnet=snet,
project_id=self.tenant,
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)\
@ -365,8 +371,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
plugin.update_distributed_port_binding(self.adminContext,
port_id, {'port': {portbindings.HOST_ID: HOST_4,
'device_id': router['id']}})
port = self._show('ports', port_id,
neutron_context=self.adminContext)
port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.callbacks.update_device_up(self.adminContext,
@ -388,6 +393,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@ -423,6 +429,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@ -478,10 +485,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@ -512,9 +521,11 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_3'}
with self.port(subnet=subnet,
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@ -535,10 +546,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network2) as subnet:
host_arg = {portbindings.HOST_ID: host}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
@ -569,11 +582,13 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
@ -610,12 +625,14 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST,
'admin_state_up': True}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2',
'admin_state_up': True}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,
'admin_state_up',),
@ -669,16 +686,19 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.subnet(cidr='10.1.0.0/24') as subnet2:
with self.port(subnet=subnet2,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
@ -742,6 +762,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
ipv6_address_mode=constants.IPV6_SLAAC) as subnet2:
with self.port(
subnet,
is_admin=True,
fixed_ips=[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}],
device_owner=DEVICE_OWNER_COMPUTE,
@ -783,10 +804,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST}
# 2 ports on host 1
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@ -794,6 +817,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
# agent on host 1
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
@ -833,10 +857,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@ -877,10 +903,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@ -919,6 +947,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST_4, 'admin_state_up': True}
with self.port(subnet=snet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@ -954,6 +983,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@ -966,6 +996,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
device=device)
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
@ -995,10 +1026,12 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@ -1029,6 +1062,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_5'}
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@ -1043,7 +1077,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
new_mac = ':'.join(mac)
data = {'port': {'mac_address': new_mac,
portbindings.HOST_ID: HOST}}
req = self.new_update_request('ports', data, p1['id'])
req = self.new_update_request('ports', data, p1['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIn('port', res)
self.assertEqual(new_mac, res['port']['mac_address'])
@ -1080,6 +1115,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, cidr='10.0.0.0/24',
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
fixed_ips=fixed_ips,
@ -1094,7 +1130,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
self.new_update_request('ports', data, p['id'])
self.new_update_request('ports', data, p['id'],
as_admin=True)
l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
l2pop_mech.L2PopulationAgentNotify = mock.Mock()
l2notify = l2pop_mech.L2PopulationAgentNotify
@ -1109,6 +1146,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
with self.port(subnet=subnet, cidr='10.0.0.0/24',
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
fixed_ips=fixed_ips,
@ -1125,7 +1163,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
req = self.new_update_request('ports', data, p1['id'])
req = self.new_update_request('ports', data, p1['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
@ -1143,7 +1182,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.16'}]}}
req = self.new_update_request('ports', data, p1['id'])
req = self.new_update_request('ports', data, p1['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
@ -1162,7 +1202,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}}
req = self.new_update_request('ports', data, p1['id'])
req = self.new_update_request('ports', data, p1['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
@ -1182,6 +1223,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@ -1204,6 +1246,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
self._register_ml2_agents()
host_arg = {portbindings.HOST_ID: HOST}
with self.port(arg_list=(portbindings.HOST_ID,),
is_admin=True,
**host_arg) as port:
port_id = port['port']['id']
# ensure various formats all result in correct port_id
@ -1217,7 +1260,8 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
def _update_and_check_portbinding(self, port_id, host_id):
data = {'port': {portbindings.HOST_ID: host_id}}
req = self.new_update_request('ports', data, port_id)
req = self.new_update_request('ports', data, port_id,
as_admin=True)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(host_id, res['port'][portbindings.HOST_ID])
@ -1227,6 +1271,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
@ -1326,6 +1371,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
with self.subnet(network=self._network, enable_dhcp=False) as snet:
host_arg = {portbindings.HOST_ID: HOST, 'admin_state_up': True}
with self.port(subnet=snet,
is_admin=True,
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT,
arg_list=(portbindings.HOST_ID,),
**host_arg) as p:

View File

@ -449,11 +449,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
{'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1',
'tag': 1024, 'parent_name': 'fakename'},
]
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
# succeed without binding:profile
with self.port(subnet=subnet1,
set_context=True, tenant_id='test'):
with self.port(subnet=subnet1):
pass
# fail with invalid binding profiles
for invalid_profile in invalid_binding_profiles:
@ -465,7 +464,6 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
expected_res_status=403,
arg_list=(
ovn_const.OVN_PORT_BINDING_PROFILE,),
set_context=True, tenant_id='test',
**kwargs):
pass
except exc.HTTPClientError:
@ -534,10 +532,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
'opt_value': 'apple'},
{'ip_version': 6, 'opt_name': 'grape',
'opt_value': 'grape'}]}}
with self.network(set_context=True, tenant_id='test') as net:
with self.network() as net:
with self.subnet(network=net) as subnet:
with self.port(subnet=subnet,
set_context=True, tenant_id='test') as port:
with self.port(subnet=subnet) as port:
port_id = port['port']['id']
self._update('ports', port_id, data)
@ -548,11 +545,12 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
mock_log.assert_has_calls([expected_call])
def test_create_and_update_ignored_fip_port(self):
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
device_owner=const.DEVICE_OWNER_FLOATINGIP,
set_context=True, tenant_id='test') as port:
with self.port(
subnet=subnet1,
is_admin=True,
device_owner=const.DEVICE_OWNER_FLOATINGIP) as port:
self.nb_ovn.create_lswitch_port.assert_not_called()
data = {'port': {'name': 'new'}}
req = self.new_update_request('ports', data,
@ -562,15 +560,17 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.nb_ovn.set_lswitch_port.assert_not_called()
def test_update_ignored_port_from_fip_device_owner(self):
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
device_owner=const.DEVICE_OWNER_FLOATINGIP,
set_context=True, tenant_id='test') as port:
with self.port(
subnet=subnet1,
is_admin=True,
device_owner=const.DEVICE_OWNER_FLOATINGIP) as port:
self.nb_ovn.create_lswitch_port.assert_not_called()
data = {'port': {'device_owner': 'test'}}
req = self.new_update_request('ports', data,
port['port']['id'])
port['port']['id'],
as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = jsonutils.loads(res.body)['NeutronError']['message']
@ -581,17 +581,18 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.nb_ovn.set_lswitch_port.assert_not_called()
def test_update_ignored_port_to_fip_device_owner(self):
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
device_owner='test',
set_context=True, tenant_id='test') as port:
is_admin=True,
device_owner='test') as port:
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
data = {'port': {'device_owner':
const.DEVICE_OWNER_FLOATINGIP}}
req = self.new_update_request('ports', data,
port['port']['id'])
port['port']['id'],
as_admin=True)
res = req.get_response(self.api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
msg = jsonutils.loads(res.body)['NeutronError']['message']
@ -605,11 +606,11 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
kwargs = {'mac_address': '00:00:00:00:00:01',
'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.4'}]}
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
is_admin=True,
arg_list=('mac_address', 'fixed_ips'),
set_context=True, tenant_id='test',
**kwargs) as port:
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@ -621,7 +622,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:02'}}
req = self.new_update_request(
'ports',
data, port['port']['id'])
data, port['port']['id'],
as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@ -635,11 +637,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
# be treated as VIP.
kwargs = {'port_security_enabled': False,
'device_owner': 'compute:nova'}
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('port_security_enabled',),
set_context=True, tenant_id='test',
**kwargs) as port:
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@ -653,7 +654,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:01'}}
req = self.new_update_request(
'ports',
data, port['port']['id'])
data, port['port']['id'],
as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@ -687,11 +689,11 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
{"ip_address": "2.2.2.2",
"mac_address": "22:22:22:22:22:22"}],
'device_owner': 'compute:nova'}
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
is_admin=True,
arg_list=('allowed_address_pairs',),
set_context=True, tenant_id='test',
**kwargs) as port:
port_ip = port['port'].get('fixed_ips')[0]['ip_address']
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
@ -718,7 +720,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
data = {'port': {'mac_address': '00:00:00:00:00:01'}}
req = self.new_update_request(
'ports',
data, port['port']['id'])
data, port['port']['id'],
as_admin=True)
req.get_response(self.api)
self.assertTrue(self.nb_ovn.set_lswitch_port.called)
called_args_dict = (
@ -737,10 +740,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
called_args_dict.get('addresses'))
def test_create_port_ovn_octavia_vip(self):
with (self.network(set_context=True, tenant_id='test')) as net1, (
self.subnet(network=net1)) as subnet1, (
with self.network() as net1,\
self.subnet(network=net1) as subnet1,\
self.port(name=ovn_const.LB_VIP_PORT_PREFIX + 'foo',
subnet=subnet1, set_context=True, tenant_id='test')):
subnet=subnet1):
self.assertTrue(self.nb_ovn.create_lswitch_port.called)
called_args_dict = (
@ -865,6 +868,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '2'}
net = self._make_network(self.fmt, 'net1', True,
as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@ -884,11 +888,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_create_port_without_security_groups(self):
kwargs = {'security_groups': []}
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',),
set_context=True, tenant_id='test',
**kwargs):
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
@ -896,22 +899,20 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_create_port_without_security_groups_no_ps(self):
kwargs = {'security_groups': [], 'port_security_enabled': False}
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',
'port_security_enabled'),
set_context=True, tenant_id='test',
**kwargs):
self.assertEqual(
1, self.nb_ovn.create_lswitch_port.call_count)
self.nb_ovn.add_acl.assert_not_called()
def test_update_port_changed_security_groups(self):
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
set_context=True, tenant_id='test') as port1:
with self.port(subnet=subnet1) as port1:
sg_id = port1['port']['security_groups'][0]
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
@ -938,10 +939,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
self.assertTrue(self.nb_ovn.pg_add_ports.called)
def test_update_port_unchanged_security_groups(self):
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
set_context=True, tenant_id='test') as port1:
with self.port(subnet=subnet1) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
port1['port']))
@ -967,11 +967,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_update_port_vip(self, is_vip=True):
kwargs = {}
with (
self.network(set_context=True, tenant_id='test')) as net1, (
self.subnet(network=net1)) as subnet1, (
self.port(subnet=subnet1, set_context=True,
tenant_id='test', **kwargs)) as port1:
with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
self.port(subnet=subnet1, **kwargs) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
port1['port']))
@ -1001,11 +999,10 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_without_security_groups(self):
kwargs = {'security_groups': []}
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1) as subnet1:
with self.port(subnet=subnet1,
arg_list=('security_groups',),
set_context=True, tenant_id='test',
**kwargs) as port1:
fake_lsp = (
fakes.FakeOVNPort.from_neutron_port(
@ -1022,10 +1019,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_exception_delete_revision(self, mock_del_port,
mock_del_rev):
mock_del_port.side_effect = Exception('BoOoOoOoOmmmmm!!!')
with self.network(set_context=True, tenant_id='test') as net:
with self.network() as net:
with self.subnet(network=net) as subnet:
with self.port(subnet=subnet,
set_context=True, tenant_id='test') as port:
with self.port(subnet=subnet) as port:
self._delete('ports', port['port']['id'])
# Assert that delete_revision wasn't invoked
mock_del_rev.assert_not_called()
@ -1035,10 +1031,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_delete_port_not_exist_in_ovn(self, mock_del_port,
mock_del_rev):
mock_del_port.side_effect = idlutils.RowNotFound
with self.network(set_context=True, tenant_id='test') as net:
with self.network() as net:
with self.subnet(network=net) as subnet:
with self.port(subnet=subnet,
set_context=True, tenant_id='test') as port:
with self.port(subnet=subnet) as port:
self._delete('ports', port['port']['id'])
# Assert that delete_revision wasn't invoked
mock_del_rev.assert_not_called()
@ -1050,10 +1045,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
created_at = timeutils.utcnow() - datetime.timedelta(
seconds=ovn_const.DB_CONSISTENCY_CHECK_INTERVAL * 2)
mock_del_port.side_effect = idlutils.RowNotFound
with self.network(set_context=True, tenant_id='test') as net:
with self.network() as net:
with self.subnet(network=net) as subnet:
with self.port(subnet=subnet,
set_context=True, tenant_id='test') as port, \
with self.port(subnet=subnet) as port, \
mock.patch.object(ovn_revision_numbers_db,
'get_revision_row',
return_value=OvnRevNumberRow(
@ -1067,10 +1061,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_set_port_status_up(self, is_compute_port=False):
port_device_owner = 'compute:nova' if is_compute_port else ''
self.mech_driver._plugin.nova_notifier = mock.Mock()
with self.network(set_context=True, tenant_id='test') as net1, \
with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
self.port(subnet=subnet1, set_context=True,
tenant_id='test',
self.port(subnet=subnet1, is_admin=True,
device_owner=port_device_owner) as port1, \
mock.patch.object(provisioning_blocks,
'provisioning_complete') as pc, \
@ -1106,10 +1099,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_set_port_status_down(self, is_compute_port=False):
port_device_owner = 'compute:nova' if is_compute_port else ''
self.mech_driver._plugin.nova_notifier = mock.Mock()
with self.network(set_context=True, tenant_id='test') as net1, \
with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
self.port(subnet=subnet1, set_context=True,
tenant_id='test',
self.port(subnet=subnet1, is_admin=True,
device_owner=port_device_owner) as port1, \
mock.patch.object(provisioning_blocks,
'add_provisioning_component') as apc, \
@ -1158,10 +1150,9 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def test_set_port_status_concurrent_delete(self):
exc = os_db_exc.DBReferenceError('', '', '', '')
with self.network(set_context=True, tenant_id='test') as net1, \
with self.network() as net1, \
self.subnet(network=net1) as subnet1, \
self.port(subnet=subnet1, set_context=True,
tenant_id='test') as port1, \
self.port(subnet=subnet1) as port1, \
mock.patch.object(provisioning_blocks,
'add_provisioning_component',
side_effect=exc) as apc, \
@ -2411,7 +2402,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
def _test_update_network_fragmentation(self, new_mtu, expected_opts, grps):
network_attrs = {external_net.EXTERNAL: True}
network = self._make_network(
self.fmt, 'net1', True, arg_list=(external_net.EXTERNAL,),
self.fmt, 'net1', True, as_admin=True,
arg_list=(external_net.EXTERNAL,),
**network_attrs)
with self.subnet(network=network) as subnet:
@ -2712,6 +2704,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '1'}
net = self._make_network(self.fmt, 'net1', True,
as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
@ -2724,7 +2717,8 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
# Issue an update to the network changing the segmentation_id
data = {'network': {pnet.SEGMENTATION_ID: new_vlan_tag}}
req = self.new_update_request('networks', data, net['id'])
req = self.new_update_request('networks', data, net['id'],
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(new_vlan_tag, res['network'][pnet.SEGMENTATION_ID])
@ -2854,6 +2848,7 @@ class TestOVNMechanismDriverSubnetsV2(test_plugin.TestMl2SubnetsV2,
net_arg = {pnet.NETWORK_TYPE: 'geneve',
pnet.SEGMENTATION_ID: '1'}
network = self._make_network(self.fmt, 'net1', True,
as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@ -3022,7 +3017,7 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
segment = self._test_create_segment(
network_id=net['id'], physical_network='physnet1',
segmentation_id=200, network_type='vlan')['segment']
self._delete('segments', segment['id'])
self._delete('segments', segment['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(segment['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
@ -3050,12 +3045,12 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
'options': {'network_name': 'physnet2'},
'tag': 300,
'name': ovn_utils.ovn_provnet_port_name(seg_2['id'])})]
self._delete('segments', seg_1['id'])
self._delete('segments', seg_1['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(net['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
ovn_nb_api.delete_lswitch_port.reset_mock()
self._delete('segments', seg_2['id'])
self._delete('segments', seg_2['id'], as_admin=True)
ovn_nb_api.delete_lswitch_port.assert_called_once_with(
lport_name=ovn_utils.ovn_provnet_port_name(seg_2['id']),
lswitch_name=ovn_utils.ovn_name(net['id']))
@ -3159,8 +3154,8 @@ class TestOVNMechanismDriverSegment(MechDriverSetupBase,
ovn_nb_api.delete_lswitch_port.assert_not_called()
# Delete both segments
self._delete('segments', self.seg_2['id'])
self._delete('segments', self.seg_1['id'])
self._delete('segments', self.seg_2['id'], as_admin=True)
self._delete('segments', self.seg_1['id'], as_admin=True)
# Make sure that the metadata port wasn't deleted.
deleted_ports = [
@ -4096,7 +4091,7 @@ class TestOVNMechanismDriverSecurityGroup(MechDriverSetupBase,
1, self.mech_driver.nb_ovn.pg_acl_del.call_count)
def test_delete_port_with_security_groups_port_doesnt_remove_pg(self):
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1):
sg = self._create_sg('sg')
port = self._make_port(
@ -4174,7 +4169,7 @@ class TestOVNMechanismDriverMetadataPort(MechDriverSetupBase,
"""
self.mech_driver.nb_ovn.get_subnet_dhcp_options.return_value = {
'subnet': {}, 'ports': {}}
with self.network(set_context=True, tenant_id='test') as net1:
with self.network() as net1:
with self.subnet(network=net1, cidr='10.0.0.0/24') as subnet1:
with self.subnet(network=net1,
cidr='20.0.0.0/24') as subnet2:
@ -4215,6 +4210,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
self._create_port(
self.fmt, n['network']['id'],
expected_res_status=404,
is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
@ -4226,6 +4222,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
with self.port(s) as p:
binding[OVN_PROFILE]['parent_name'] = p['port']['id']
res = self._create_port(self.fmt, n['network']['id'],
is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
port = self.deserialize(self.fmt, res)
@ -4240,6 +4237,7 @@ class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase):
with self.port(s) as p:
binding[OVN_PROFILE]['parent_name'] = p['port']['id']
self._create_port(self.fmt, n['network']['id'],
is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@ -4253,6 +4251,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
is_admin=True,
arg_list=(OVN_PROFILE,),
**binding)
port = self.deserialize(self.fmt, res)
@ -4264,6 +4263,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@ -4273,6 +4273,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@ -4283,6 +4284,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=400,
**binding)
@ -4294,6 +4296,7 @@ class TestOVNVtepPortBinding(OVNMechanismDriverTestCase):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'],
is_admin=True,
arg_list=(OVN_PROFILE,),
expected_res_status=404,
**binding)

View File

@ -39,10 +39,12 @@ class TestMigrateNeutronDatabaseToOvn(
for sid in range(1, 6):
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: sid}
network_id = self._make_network(self.fmt, 'net%d' % sid, True,
network_id = self._make_network(
self.fmt, 'net%d' % sid, True, as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)['network']['id']
**net_arg
)['network']['id']
for vif_details in vif_details_list:
port = self._make_port(self.fmt, network_id)['port']

View File

@ -50,7 +50,7 @@ class DNSDomainKeywordsTestCase(
net_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,)
net_kwargs['shared'] = True
res = self._create_network(self.fmt, 'test_network', True,
**net_kwargs)
as_admin=True, **net_kwargs)
network = self.deserialize(self.fmt, res)
if ipv4:
cidr = '10.0.0.0/24'
@ -108,8 +108,8 @@ class DNSDomainKeywordsTestCase(
# NOTE(slaweq): Admin context is required here to be able to update
# fixed_ips of the port as by default it is not possible for non-admin
# users
ctx = context.Context(project_id=PROJECT_ID, is_admin=True)
req = self.new_update_request('ports', data, port['id'], context=ctx)
req = self.new_update_request('ports', data, port['id'],
tenant_id=PROJECT_ID, as_admin=True)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
port = self.deserialize(self.fmt, res)['port']

View File

@ -80,7 +80,7 @@ class DNSIntegrationTestCase(test_plugin.Ml2PluginV2TestCase):
net_kwargs['arg_list'] = \
net_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,)
res = self._create_network(self.fmt, 'test_network', True,
**net_kwargs)
as_admin=True, **net_kwargs)
network = self.deserialize(self.fmt, res)
if ipv4:
cidr = '10.0.0.0/24'

View File

@ -50,12 +50,10 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_create_ports_bulk_with_tags(self):
num_ports = 3
tenant_id = 'some_tenant'
with self.network(tenant_id=tenant_id) as network_to_use:
with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'network_id': net_id,
'admin_state_up': True,
'tenant_id': tenant_id}}
'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
ports_tags_map = {}
for port, tags in zip(ports, TAGS):
@ -73,13 +71,11 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_create_ports_bulk_no_tags(self):
num_ports = 2
tenant_id = 'some_tenant'
with self.network(tenant_id=tenant_id) as network_to_use:
with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'name': 'port',
'network_id': net_id,
'admin_state_up': True,
'tenant_id': tenant_id}}
'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
req_body = {'ports': ports}
ports_req = self.new_create_request('ports', req_body)
@ -90,13 +86,11 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertFalse(port['tags'])
def test_create_port_with_tags(self):
tenant_id = 'some_tenant'
with self.network(tenant_id=tenant_id) as network_to_use:
with self.network() as network_to_use:
net_id = network_to_use['network']['id']
req_body = {'port': {'name': 'port',
'network_id': net_id,
'admin_state_up': True,
'tenant_id': tenant_id,
'tags': TAGS[0]}}
port_req = self.new_create_request('ports', req_body)
res = port_req.get_response(self.api)
@ -106,16 +100,14 @@ class TagPortsDuringBulkCreationTestCase(test_plugin.Ml2PluginV2TestCase):
def test_type_args_passed_to_extension(self):
num_ports = 2
tenant_id = 'some_tenant'
extension = tag_ports_during_bulk_creation
with mock.patch.object(
extension.TagPortsDuringBulkCreationExtensionDriver,
'process_create_port') as patched_method:
with self.network(tenant_id=tenant_id) as network_to_use:
with self.network() as network_to_use:
net_id = network_to_use['network']['id']
port = {'port': {'network_id': net_id,
'admin_state_up': True,
'tenant_id': tenant_id}}
'admin_state_up': True}}
ports = [copy.deepcopy(port) for x in range(num_ports)]
ports[0]['port']['tags'] = TAGS[0]
ports[1]['port']['tags'] = TAGS[1]

View File

@ -16,7 +16,6 @@ from neutron_lib import constants
from neutron_lib import context
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.tests.unit.plugins.ml2.drivers import ext_test
from neutron.tests.unit.plugins.ml2 import test_plugin
@ -35,9 +34,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
self._ctxt = context.get_admin_context()
def _verify_network_create(self, code, exc_reason):
tenant_id = uuidutils.generate_uuid()
data = {'network': {'name': 'net1',
'tenant_id': tenant_id}}
data = {'network': {'name': 'net1'}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(code, res.status_int)
@ -47,7 +44,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
self.assertEqual(exc_reason,
network['NeutronError']['type'])
return (network, tenant_id)
return network
def _verify_network_update(self, network, code, exc_reason):
net_id = network['network']['id']
@ -64,10 +61,9 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_create_network',
side_effect=TypeError):
net, tenant_id = self._verify_network_create(500,
'HTTPInternalServerError')
self._verify_network_create(500, 'HTTPInternalServerError')
# Verify the operation is rolled back
query_params = "tenant_id=%s" % tenant_id
query_params = "tenant_id=%s" % self._tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
@ -75,7 +71,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_network',
side_effect=TypeError):
network, tid = self._verify_network_create(201, None)
network = self._verify_network_create(201, None)
self._verify_network_update(network, 500,
'HTTPInternalServerError')
@ -83,7 +79,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
with mock.patch.object(ext_test.TestExtensionDriver,
'extend_network_dict',
side_effect=[None, None, TypeError]):
network, tid = self._verify_network_create(201, None)
network = self._verify_network_create(201, None)
self._verify_network_update(network, 400, 'ExtensionDriverError')
def test_network_attr(self):

View File

@ -381,7 +381,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
for net_idx, net in enumerate(networks):
# create
req = self.new_create_request('networks',
{'network': net})
{'network': net},
as_admin=True)
# verify
network = self.deserialize(self.fmt,
req.get_response(self.api))['network']
@ -399,7 +400,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
def _lookup_network_by_segmentation_id(self, seg_id, num_expected_nets):
params_str = "%s=%s" % (pnet.SEGMENTATION_ID, seg_id)
net_req = self.new_list_request('networks', None,
params=params_str)
params=params_str,
as_admin=True)
networks = self.deserialize(self.fmt, net_req.get_response(self.api))
if num_expected_nets:
self.assertIsNotNone(networks)
@ -446,9 +448,9 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
plugin.type_manager, 'create_network_segments',
side_effect=db_exc.RetryRequest(ValueError())
) as f:
data = {'network': {'tenant_id': 'sometenant', 'name': 'dummy',
data = {'network': {'name': 'dummy',
'admin_state_up': True, 'shared': False}}
req = self.new_create_request('networks', data)
req = self.new_create_request('networks', data, as_admin=True)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
# 1 + retry count
@ -459,7 +461,7 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
plugin = directory.get_plugin()
kwargs = {'arg_list': (pnet.NETWORK_TYPE, ),
pnet.NETWORK_TYPE: 'vlan'}
with self.network(**kwargs) as net:
with self.network(as_admin=True, **kwargs) as net:
for attribute in set(pnet.ATTRIBUTES) - {pnet.SEGMENTATION_ID}:
net_data = {attribute: net['network'][attribute]}
self.assertIsNone(
@ -491,7 +493,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 2}]
with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
with self.network(as_admin=True,
**{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net:
self.assertRaises(
exc.InvalidInput, plugin._update_segmentation_id, self.context,
@ -518,7 +521,8 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2,
mock.patch.object(type(mech_driver), 'agent_type',
new_callable=mock.PropertyMock(return_value=None)).start()
with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
with self.network(as_admin=True,
**{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net, \
mock.patch.object(
port_obj.Port, 'check_network_ports_by_binding_types',
@ -598,7 +602,8 @@ class TestMl2NetworksV2AgentMechDrivers(Ml2PluginV2TestCase):
segments = [{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}]
with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
with self.network(as_admin=True,
**{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: segments}) as net, \
mock.patch.object(
port_obj.Port, 'check_network_ports_by_binding_types',
@ -623,9 +628,8 @@ class TestExternalNetwork(Ml2PluginV2TestCase):
def _create_external_network(self):
data = {'network': {'name': 'net1',
'router:external': 'True',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
'router:external': 'True'}}
network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
return network
@ -669,7 +673,6 @@ class TestMl2NetworksWithVlanTransparencyBase(TestMl2NetworksV2):
mpnet_apidef.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one',
'vlan_transparent': 'True'}}
def setUp(self, plugin=None):
@ -685,7 +688,8 @@ class TestMl2NetworksWithVlanTransparency(
with mock.patch.object(mech_test.TestMechanismDriver,
'check_vlan_transparency',
return_value=False):
network_req = self.new_create_request('networks', self.data)
network_req = self.new_create_request(
'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(500, res.status_int)
error_result = self.deserialize(self.fmt, res)['NeutronError']
@ -696,7 +700,8 @@ class TestMl2NetworksWithVlanTransparency(
with mock.patch.object(mech_test.TestMechanismDriver,
'check_vlan_transparency',
return_value=True):
network_req = self.new_create_request('networks', self.data)
network_req = self.new_create_request(
'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
network = self.deserialize(self.fmt, res)['network']
@ -713,7 +718,8 @@ class TestMl2NetworksWithVlanTransparencyAndMTU(
return_value=True):
cfg.CONF.set_override('path_mtu', 1000, group='ml2')
cfg.CONF.set_override('global_physnet_mtu', 1000)
network_req = self.new_create_request('networks', self.data)
network_req = self.new_create_request(
'networks', self.data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
network = self.deserialize(self.fmt, res)['network']
@ -727,8 +733,7 @@ class TestMl2NetworksWithAvailabilityZone(TestMl2NetworksV2):
def test_create_network_availability_zone(self):
az_hints = ['az1', 'az2']
data = {'network': {'name': 'net1',
az_def.AZ_HINTS: az_hints,
'tenant_id': 'tenant_one'}}
az_def.AZ_HINTS: az_hints}}
with mock.patch.object(agents_db.AgentAvailabilityZoneMixin,
'validate_availability_zones'):
network_req = self.new_create_request('networks', data)
@ -879,6 +884,7 @@ class TestMl2SubnetsV2(test_plugin.TestSubnetsV2,
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
network = self._make_network(self.fmt, 'net1', True,
as_admin=True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
@ -1280,7 +1286,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
def test_update_port_with_empty_data(self):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
with self.port() as port:
with self.port(is_admin=True) as port:
port_id = port['port']['id']
new_port = plugin.update_port(ctx, port_id, {"port": {}})
new_port.pop('standard_attr_id')
@ -1422,7 +1428,8 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
data = {'port': {'mac_address': None}}
with self.port() as port:
current_mac = port['port']['mac_address']
req = self.new_update_request('ports', data, port['port']['id'])
req = self.new_update_request(
'ports', data, port['port']['id'], as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
new_mac = plugin.get_port(ctx, port['port']['id'])['mac_address']
self.assertNotEqual(current_mac, new_mac)
@ -1458,7 +1465,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
l3plugin = directory.get_plugin(plugin_constants.L3)
kwargs = {'arg_list': (extnet_apidef.EXTERNAL,),
extnet_apidef.EXTERNAL: True}
with self.network(**kwargs) as n:
with self.network(as_admin=True, **kwargs) as n:
with self.subnet(network=n, cidr='200.0.0.0/22'):
l3plugin.create_floatingip(
context.get_admin_context(),
@ -1488,24 +1495,23 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
res, 'ports', webob.exc.HTTPServerError.code)
def test_create_ports_bulk_with_sec_grp(self):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
with self.network() as net,\
mock.patch.object(plugin.notifier,
'security_groups_member_updated') as m_upd:
res = self._create_port_bulk(self.fmt, 3, net['network']['id'],
'test', True, context=ctx)
'test', True)
ports = self.deserialize(self.fmt, res)
if 'ports' in ports:
used_sg = ports['ports'][0]['security_groups']
m_upd.assert_has_calls(
[mock.call(ctx, [sg]) for sg in used_sg], any_order=True)
[mock.call(mock.ANY, [sg]) for sg in used_sg],
any_order=True)
else:
self.assertTrue('ports' in ports)
def test_create_ports_bulk_with_portbinding_attrs(self):
ctx = context.get_admin_context()
with self.network() as net:
overrides = {0: {portbindings.HOST_ID: 'host1',
portbindings.VNIC_TYPE: 'direct',
@ -1514,7 +1520,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
portbindings.VNIC_TYPE: 'macvtap',
portbindings.PROFILE: {'bar': 'bar'}}}
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx,
'test', True, as_admin=True,
override=overrides)
ports = self.deserialize(self.fmt, res)['ports']
self.assertCountEqual(['direct', 'macvtap'],
@ -1525,7 +1531,6 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
[p[portbindings.HOST_ID] for p in ports])
def test_create_ports_bulk_with_sec_grp_member_provider_update(self):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
bulk_mock_name = "security_groups_member_updated"
with self.network() as net,\
@ -1534,28 +1539,25 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
net_id = net['network']['id']
data = [{
'network_id': net_id,
'tenant_id': self._tenant_id
},
{
'network_id': net_id,
'tenant_id': self._tenant_id,
'device_owner': constants.DEVICE_OWNER_DHCP
}
]
res = self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
res = self._create_bulk_from_list(self.fmt, 'port', data,
as_admin=True)
ports = self.deserialize(self.fmt, res)
used_sg = ports['ports'][0]['security_groups']
m_upd.assert_called_with(ctx, used_sg)
m_upd.assert_called_with(mock.ANY, used_sg)
m_upd.reset_mock()
data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP
self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
data, as_admin=True)
self.assertFalse(m_upd.called)
def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
fake_prefix = '2001:db8::/64'
fake_gateway = 'fe80::1'
@ -1571,13 +1573,12 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
net_id = net['network']['id']
data = [{
'network_id': net_id,
'tenant_id': self._tenant_id,
'fixed_ips': [{'subnet_id': snet_v6['subnet']['id']}],
'device_owner': constants.DEVICE_OWNER_ROUTER_INTF
}
]
self._create_bulk_from_list(self.fmt, 'port',
data, context=ctx)
data, as_admin=True)
self.assertFalse(m_upd.called)
def test_create_ports_bulk_ip_allocation_reverted_in_case_of_error(self):
@ -1842,7 +1843,8 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
port_kwargs = {portbindings.HOST_ID: 'host1',
'subnet': subnet,
'device_id': 'deadlocktest'}
with self.port(arg_list=(portbindings.HOST_ID,),
with self.port(is_admin=True,
arg_list=(portbindings.HOST_ID,),
**port_kwargs) as port:
self.assertTrue(port['port']['id'])
self.assertTrue(get_port_mock.called)
@ -2037,7 +2039,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
host_arg = {portbindings.HOST_ID: HOST}
with self.port(arg_list=(portbindings.HOST_ID,),
with self.port(is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port = plugin.get_port(ctx, port['port']['id'])
updated_ports = []
@ -2066,7 +2069,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
registry.subscribe(creceiver, resources.PORT,
events.AFTER_CREATE)
host_arg = {portbindings.HOST_ID: HOST}
with self.port(arg_list=(portbindings.HOST_ID,),
with self.port(is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg):
self.assertGreater(updated_ports[0]['revision_number'],
created_ports[0]['revision_number'])
@ -2079,7 +2083,8 @@ class TestMl2PortsV2WithRevisionPlugin(Ml2PluginV2TestCase):
registry.subscribe(p_update_receiver, resources.PORT,
events.AFTER_UPDATE)
host_arg = {portbindings.HOST_ID: HOST}
with self.port(device_owner=constants.DEVICE_OWNER_DVR_INTERFACE,
with self.port(is_admin=True,
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE,
device_id=TEST_ROUTER_ID,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@ -2116,7 +2121,8 @@ class TestMl2PortsV2WithL3(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST}
with mock.patch.object(l3plugin.l3_rpc_notifier,
'routers_updated_on_host') as mock_updated:
with self.port(device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
with self.port(is_admin=True,
device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
device_id=TEST_ROUTER_ID,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
@ -2399,7 +2405,7 @@ class TestMl2DvrPortsV2(TestMl2PortsV2):
if floating_ip:
router_ids.add(ns_to_delete['router_id'])
with self.port() as port, \
with self.port(is_admin=True) as port, \
mock.patch.object(registry, 'publish') as publish, \
mock.patch.object(self.l3plugin,
'disassociate_floatingips',
@ -2442,7 +2448,8 @@ class TestMl2DvrPortsV2(TestMl2PortsV2):
def test_delete_port_with_floatingip_create_precommit_event(self):
fake_method = mock.Mock()
with self.port(device_owner='network:floatingip') as port:
with self.port(is_admin=True,
device_owner='network:floatingip') as port:
try:
registry.subscribe(fake_method, resources.FLOATING_IP,
events.PRECOMMIT_DELETE)
@ -2534,6 +2541,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
profile_arg = {portbindings.PROFILE: {'d': s}}
try:
with self.port(expected_res_status=400,
is_admin=True,
arg_list=(portbindings.PROFILE,),
**profile_arg):
pass
@ -2543,15 +2551,17 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_remove_port_binding_profile(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
with self.port(is_admin=True,
arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
profile_arg = {portbindings.PROFILE: None}
port = self._update('ports', port_id,
{'port': profile_arg})['port']
{'port': profile_arg},
as_admin=True)['port']
self._check_port_binding_profile(port)
port = self._show('ports', port_id)['port']
port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port)
def test_return_on_concurrent_delete_and_binding(self):
@ -2744,15 +2754,17 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_port_binding_profile_not_changed(self):
profile = {'e': 5}
profile_arg = {portbindings.PROFILE: profile}
with self.port(arg_list=(portbindings.PROFILE,),
with self.port(is_admin=True,
arg_list=(portbindings.PROFILE,),
**profile_arg) as port:
self._check_port_binding_profile(port['port'], profile)
port_id = port['port']['id']
state_arg = {'admin_state_up': True}
port = self._update('ports', port_id,
{'port': state_arg})['port']
{'port': state_arg},
as_admin=True)['port']
self._check_port_binding_profile(port, profile)
port = self._show('ports', port_id)['port']
port = self._show('ports', port_id, as_admin=True)['port']
self._check_port_binding_profile(port, profile)
def test_update_port_binding_host_id_none(self):
@ -2885,8 +2897,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
super(TestMultiSegmentNetworks, self).setUp()
def test_allocate_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@ -2914,8 +2925,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertEqual(dynamic_segment[driver_api.SEGMENTATION_ID], 1234)
def test_allocate_dynamic_segment_multiple_physnets(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@ -2950,8 +2960,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
segment = {driver_api.NETWORK_TYPE: 'vlan',
driver_api.PHYSICAL_NETWORK: physnet_name}
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@ -3000,8 +3009,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertEqual(1, len(allocs))
def test_allocate_release_dynamic_segment(self):
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
@ -3026,9 +3034,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
pnet.SEGMENTATION_ID: 1}}
network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
@ -3039,9 +3046,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def test_fail_update_network_provider_attr(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'physnet1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
pnet.PHYSICAL_NETWORK: 'physnet1'}}
network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE])
@ -3051,7 +3057,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'update_physnet1'}}
network_req = self.new_update_request('networks', data,
network['network']['id'])
network['network']['id'],
as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertIn('NeutronError', network)
@ -3063,9 +3070,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
def test_update_network_provider_attr_no_change(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'physnet1',
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
pnet.PHYSICAL_NETWORK: 'physnet1'}}
network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE])
@ -3075,7 +3081,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'physnet1'}}
network_req = self.new_update_request('networks', data,
network['network']['id'])
network['network']['id'],
as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual('updated-net1', network['network']['name'])
@ -3085,9 +3092,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
mpnet_apidef.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
net_req = self.new_create_request('networks', data)
pnet.SEGMENTATION_ID: 1}]}}
net_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
@ -3095,7 +3101,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
self.assertNotIn(mpnet_apidef.SEGMENTS, network['network'])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
net_req = self.new_show_request('networks', network['network']['id'],
as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
self.assertEqual('vlan', network['network'][pnet.NETWORK_TYPE])
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
@ -3110,9 +3117,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet2',
pnet.SEGMENTATION_ID: 2}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
pnet.SEGMENTATION_ID: 2}]}}
network_req = self.new_create_request('networks', data, as_admin=True)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
segments = network['network'][mpnet_apidef.SEGMENTS]
@ -3124,7 +3130,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
segments[segment_index][field])
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
net_req = self.new_show_request('networks', network['network']['id'],
as_admin=True)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
segments = network['network'][mpnet_apidef.SEGMENTS]
for segment_index, segment in enumerate(data['network']
@ -3157,9 +3164,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
pnet.SEGMENTATION_ID: 1}]}}
network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(400, res.status_int)
@ -3169,11 +3175,10 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
pnet.PHYSICAL_NETWORK: 'physnet1'}]}}
retry_fixture = fixture.DBRetryErrorsFixture(max_retries=2)
retry_fixture.setUp()
network_req = self.new_create_request('networks', data)
network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
self.assertEqual(201, res.status_int)
@ -3183,9 +3188,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
pnet.SEGMENTATION_ID: 1}}
network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
@ -3217,9 +3221,8 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase):
'shared': False,
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
pnet.SEGMENTATION_ID: 1}}
network_req = self.new_create_request('networks', data, as_admin=True)
res = network_req.get_response(self.api)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
@ -3394,9 +3397,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'create_network_postcommit',
side_effect=(exc.InvalidInput(
error_message=err_msg))):
tenant_id = uuidutils.generate_uuid()
data = {'network': {'name': 'net1',
'tenant_id': tenant_id}}
data = {'network': {'name': 'net1'}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(400, res.status_int)
@ -3405,7 +3406,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
error['NeutronError']['type'])
# Check the client can see the root cause of error.
self.assertIn(err_msg, error['NeutronError']['message'])
query_params = "tenant_id=%s" % tenant_id
query_params = "tenant_id=%s" % self._tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
@ -3417,8 +3418,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'delete_network_postcommit') as dnp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
@ -3442,8 +3442,7 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with mock.patch.object(mech_logger.LoggerMechanismDriver,
'update_network_postcommit') as unp:
data = {'network': {'name': 'net1',
'tenant_id': 'tenant_one'}}
data = {'network': {'name': 'net1'}}
network_req = self.new_create_request('networks', data)
network_res = network_req.get_response(self.api)
self.assertEqual(201, network_res.status_int)
@ -3481,8 +3480,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
@ -3510,8 +3507,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
@ -3543,8 +3538,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
'cidr': '10.0.20.0/24',
'ip_version': constants.IP_VERSION_4,
'name': 'subnet1',
'tenant_id':
network['network']['tenant_id'],
'gateway_ip': '10.0.20.1'}}
subnet_req = self.new_create_request('subnets', data)
subnet_res = subnet_req.get_response(self.api)
@ -3579,8 +3572,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with self.network() as network:
net_id = network['network']['id']
data = {'port': {'network_id': net_id,
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
@ -3606,8 +3597,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'admin_state_up': 1,
'fixed_ips': []}}
@ -3655,8 +3644,6 @@ class TestFaultyMechansimDriver(Ml2PluginV2FaultyDriverTestCase):
subnet_id = subnet['subnet']['id']
data = {'port': {
'network_id': network['network']['id'],
'tenant_id':
network['network']['tenant_id'],
'name': 'port1',
'device_owner':
constants.DEVICE_OWNER_DVR_INTERFACE,
@ -3691,7 +3678,7 @@ class TestML2PluggableIPAM(test_ipam.UseIpamMixin, TestMl2SubnetsV2):
request.subnet_cidr = netaddr.IPNetwork(cidr)
request.allocation_pools = []
request.gateway_ip = netaddr.IPAddress(gateway_ip)
request.tenant_id = uuidutils.generate_uuid()
request.tenant_id = self._tenant_id
ipam_subnet = mock.Mock()
ipam_subnet.get_details.return_value = request
@ -3910,7 +3897,8 @@ class TestML2Segments(Ml2PluginV2TestCase):
driver_api.PHYSICAL_NETWORK: physical_network,
driver_api.SEGMENTATION_ID: segmentation_id}
with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
with self.network(as_admin=True,
**{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: network_segments})\
as test_network:
multisegment_network = test_network['network']
@ -3942,7 +3930,8 @@ class TestML2Segments(Ml2PluginV2TestCase):
driver_api.PHYSICAL_NETWORK: physical_network,
driver_api.SEGMENTATION_ID: segmentation_id}
with self.network(**{'arg_list': (mpnet_apidef.SEGMENTS, ),
with self.network(as_admin=True,
**{'arg_list': (mpnet_apidef.SEGMENTS, ),
mpnet_apidef.SEGMENTS: network_segments})\
as test_network:
multisegment_network = test_network['network']
@ -3968,7 +3957,7 @@ class TestML2Segments(Ml2PluginV2TestCase):
pnet.PHYSICAL_NETWORK: physical_network,
pnet.SEGMENTATION_ID: segmentation_id}
with self.network() as test_network:
with self.network(as_admin=True) as test_network:
# network() implicitaly creates a single segment
single_segment_network = test_network['network']
observed_network = self.driver._build_original_network(

View File

@ -78,7 +78,8 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
mac_address = 'aa:aa:aa:aa:aa:aa'
host_arg = {portbindings.HOST_ID: host,
'mac_address': mac_address}
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
with self.port(name='name', is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
self._check_response(port['port'], vif_type, has_port_filter,
bound, status)
@ -152,12 +153,12 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
update_body = {'name': 'test_update'}
if new_host is not None:
update_body[portbindings.HOST_ID] = new_host
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
with self.port(name='name', is_admin=True,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
neutron_context = context.get_admin_context()
updated_port = self._update('ports', port['port']['id'],
{'port': update_body},
neutron_context=neutron_context)
as_admin=True)
port_data = updated_port['port']
if new_host is not None:
self.assertEqual(new_host,
@ -190,7 +191,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
ctx = context.get_admin_context()
plugin = directory.get_plugin()
host_id = {portbindings.HOST_ID: 'host1'}
with self.port(**host_id) as port:
with self.port(is_admin=True, **host_id) as port:
# Since the port is DOWN at first
# It's necessary to make its status ACTIVE for this test
plugin.update_port_status(ctx, port['port']['id'],
@ -221,7 +222,8 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
def test_distributed_binding(self):
ctx = context.get_admin_context()
with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
with self.port(is_admin=True,
device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
port_id = port['port']['id']
# Verify port's VIF type and status.
@ -235,7 +237,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
'device_id': 'router1'}})
# Get port and verify VIF type and status unchanged.
port = self._show('ports', port_id)
port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('DOWN', port['port']['status'])
@ -247,7 +249,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
self.assertEqual('local', details['network_type'])
# Get port and verify VIF type and changed status.
port = self._show('ports', port_id)
port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('BUILD', port['port']['status'])
@ -258,7 +260,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
host='host-ovs-no_filter')
# Get port and verify VIF type and changed status.
port = self._show('ports', port_id)
port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('ACTIVE', port['port']['status'])
@ -269,7 +271,7 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
host='host-ovs-no_filter')
# Get port and verify VIF type and changed status.
port = self._show('ports', port_id)
port = self._show('ports', port_id, as_admin=True)
self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
port['port'][portbindings.VIF_TYPE])
self.assertEqual('DOWN', port['port']['status'])
@ -382,7 +384,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
data = {'binding': kwargs}
binding_req = self.new_update_request('ports', data, port_id, fmt,
subresource='bindings',
sub_id=host)
sub_id=host,
as_admin=True)
return binding_req.get_response(self.api)
def _do_update_port_binding(self, fmt, port_id, host, **kwargs):
@ -457,7 +460,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
def test_create_duplicate_port_binding(self):
device_owner = '%s%s' % (const.DEVICE_OWNER_COMPUTE_PREFIX, 'nova')
host_arg = {portbindings.HOST_ID: self.host}
with self.port(device_owner=device_owner,
with self.port(is_admin=True,
device_owner=device_owner,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
response = self._create_port_binding(self.fmt, port['port']['id'],
@ -540,7 +544,7 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
active_binding = self._activate_port_binding(
port['id'], self.host, raw_response=False)
self._assert_bound_port_binding(active_binding)
updated_port = self._show('ports', port['id'])['port']
updated_port = self._show('ports', port['id'], as_admin=True)['port']
updated_bound_drivers = updated_port[portbindings.VIF_DETAILS].pop(
portbindings.VIF_DETAILS_BOUND_DRIVERS)
self.assertEqual({'0': 'test'}, updated_bound_drivers)
@ -711,7 +715,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
with mock.patch.object(
mechanism_test.TestMechanismDriver, '_check_port_context'
):
req = self.new_update_request('ports', update_body, port_id)
req = self.new_update_request('ports', update_body, port_id,
as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
def test_bind_non_pf_port_with_mac_port_not_updated(self):
@ -851,7 +856,8 @@ class ExtendedPortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
with mock.patch.object(
mechanism_test.TestMechanismDriver, '_check_port_context'
):
req = self.new_update_request('ports', update_body, port['id'])
req = self.new_update_request('ports', update_body, port['id'],
as_admin=True)
self.assertEqual(200, req.get_response(self.api).status_int)
# Neutron expected to reset the MAC to a generated one so that the

View File

@ -159,7 +159,8 @@ class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase,
self.assertFalse(self.was_active)
self._delete(
'security-groups',
self._list('security-groups')['security_groups'][0]['id'])
self._list('security-groups')['security_groups'][0]['id'],
as_admin=True)
with self.port(subnet=s):
self.assertFalse(self.was_active)

View File

@ -233,9 +233,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_networks_clears_dirty(self):
self._test_init('network')
net = self._make_network('json', 'meh', True)['network']
self.ctx.project_id = net['project_id']
self._list('networks', neutron_context=self.ctx)
self._make_network('json', 'meh', True)['network']
self._list('networks', as_admin=True)
self._verify_dirty_bit('network', expected_value=False)
def test_create_delete_port_marks_dirty(self):
@ -252,9 +251,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_ports_clears_dirty(self):
self._test_init('port')
net = self._make_network('json', 'meh', True)['network']
port = self._make_port('json', net['id'])['port']
self.ctx.project_id = port['project_id']
self._list('ports', neutron_context=self.ctx)
self._make_port('json', net['id'])['port']
self._list('ports', as_admin=True)
self._verify_dirty_bit('port', expected_value=False)
def test_create_delete_subnet_marks_dirty(self):
@ -286,17 +284,14 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_subnets_clears_dirty(self):
self._test_init('subnet')
net = self._make_network('json', 'meh', True)
subnet = self._make_subnet('json', net, '10.0.0.1',
'10.0.0.0/24')['subnet']
self.ctx.project_id = subnet['project_id']
self._list('subnets', neutron_context=self.ctx)
self._make_subnet('json', net, '10.0.0.1', '10.0.0.0/24')['subnet']
self._list('subnets', as_admin=True)
self._verify_dirty_bit('subnet', expected_value=False)
def test_create_delete_subnetpool_marks_dirty(self):
self._test_init('subnetpool')
pool = self._make_subnetpool('json', ['10.0.0.0/8'],
name='meh',
tenant_id=self._project_id)['subnetpool']
name='meh')['subnetpool']
self._verify_dirty_bit('subnetpool')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
@ -306,17 +301,14 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_subnetpools_clears_dirty(self):
self._test_init('subnetpool')
pool = self._make_subnetpool('json', ['10.0.0.0/8'],
name='meh',
tenant_id=self._project_id)['subnetpool']
self.ctx.project_id = pool['project_id']
self._list('subnetpools', neutron_context=self.ctx)
self._make_subnetpool('json', ['10.0.0.0/8'], name='meh')['subnetpool']
self._list('subnetpools', as_admin=True)
self._verify_dirty_bit('subnetpool', expected_value=False)
def test_create_delete_securitygroup_marks_dirty(self):
self._test_init('security_group')
sec_group = self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
'json', 'meh', 'meh')['security_group']
self._verify_dirty_bit('security_group')
# Clear the dirty bit
quota_db_api.set_quota_usage_dirty(
@ -327,17 +319,16 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_securitygroups_clears_dirty(self):
self._test_init('security_group')
self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
self.ctx.project_id = self._project_id
self._list('security-groups', neutron_context=self.ctx)
'json', 'meh', 'meh',)['security_group']
self._list('security-groups', as_admin=True)
self._verify_dirty_bit('security_group', expected_value=False)
def test_create_delete_securitygrouprule_marks_dirty(self):
self._test_init('security_group_rule')
sec_group = self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
'json', 'meh', 'meh')['security_group']
rule_req = self._build_security_group_rule(
sec_group['id'], 'ingress', 'TCP', tenant_id=self._project_id)
sec_group['id'], 'ingress', 'TCP')
sec_group_rule = self._make_security_group_rule(
'json', rule_req)['security_group_rule']
self._verify_dirty_bit('security_group_rule')
@ -349,10 +340,8 @@ class TestTrackedResources(BaseTestTrackedResources):
def test_list_securitygrouprules_clears_dirty(self):
self._test_init('security_group_rule')
self._make_security_group(
'json', 'meh', 'meh', tenant_id=self._project_id)['security_group']
self._make_security_group('json', 'meh', 'meh')['security_group']
# As the security group create operation also creates 2 security group
# rules there is no need to explicitly create any rule
self.ctx.project_id = self._project_id
self._list('security-group-rules', neutron_context=self.ctx)
self._list('security-group-rules', as_admin=True)
self._verify_dirty_bit('security_group_rule', expected_value=False)

View File

@ -209,13 +209,13 @@ class L3SchedulerBaseMixin(object):
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=uuidutils.generate_uuid(),
fmt=None, tenant_id=None,
external_gateway_info=None,
subnet=None, set_context=False,
**kwargs):
subnet=None, **kwargs):
tenant_id = tenant_id or self._tenant_id
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
**kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
@ -1380,6 +1380,7 @@ class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
subnet_ids = []
subnet_ids.append(subnet['subnet']['id'])
with self.port(subnet=subnet,
is_admin=True,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=('admin_state_up',
portbindings.PROFILE,), **host_args):

View File

@ -17,7 +17,6 @@ from unittest import mock
from neutron_lib.agent import topics
from neutron_lib.api.definitions import metering as metering_apidef
from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.tests import tools
@ -60,20 +59,6 @@ class MeteringTestExtensionManager(object):
return []
# TODO(akamyshnikova):we need this temporary FakeContext class while Context
# checking for existence of session attribute.
class FakeContext(context.ContextBaseWithSession):
def __init__(self, *args, **kwargs):
super(FakeContext, self).__init__(*args, **kwargs)
self._session = None
@property
def session(self):
if self._session is None:
self._session = db_api.get_writer_session()
return self._session
class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
test_metering_db.MeteringPluginDbTestCaseMixin):
@ -97,11 +82,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
self.project_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = FakeContext('', self.project_id, is_admin=True)
self.context_patch = mock.patch('neutron_lib.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.ctx = context.Context('', self._tenant_id).elevated()
self.topic = topics.METERING_AGENT
@ -159,7 +140,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@ -171,11 +152,9 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
with self.router(name='router2', tenant_id=tenant_id_2,
set_context=True):
self.mock_uuid.return_value = self.uuid
with self.router(name='router1', tenant_id=self.project_id,
set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True):
self.mock_add.assert_called_with(self.ctx, expected)
with self.router(name='router1'):
with self.metering_label():
self.mock_add.assert_called_with(mock.ANY, expected)
def test_add_metering_label_shared_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
@ -184,7 +163,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@ -195,14 +174,11 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
with self.router(name='router1', tenant_id=self.project_id,
shared=True, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True):
with self.router(name='router1', shared=True):
with self.metering_label():
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=tenant_id_2, shared=True,
set_context=True):
self.mock_add.assert_called_with(self.ctx, expected)
with self.metering_label(tenant_id=tenant_id_2, shared=True):
self.mock_add.assert_called_with(mock.ANY, expected)
def test_remove_metering_label_rpc_call(self):
expected = [{'status': 'ACTIVE',
@ -210,19 +186,19 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.router():
with self.metering_label() as label:
self.mock_add.assert_called_with(mock.ANY, expected)
self._delete('metering-labels',
label['metering_label']['id'])
label['metering_label']['id'],
as_admin=True)
self.mock_remove.assert_called_with(mock.ANY, expected)
def test_remove_one_metering_label_rpc_call(self):
@ -232,7 +208,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid, 'shared': False,
@ -246,22 +222,21 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
'name': 'label'}],
'id': self.uuid}]
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True):
with self.router():
with self.metering_label():
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.metering_label() as label:
self.mock_add.assert_called_with(mock.ANY, expected_add)
self._delete('metering-labels',
label['metering_label']['id'])
label['metering_label']['id'],
as_admin=True)
self.mock_remove.assert_called_with(mock.ANY, expected_remove)
def test_add_and_remove_metering_label_rule_rpc_call(self):
@ -271,7 +246,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix':
@ -291,7 +266,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'remote_ip_prefix':
@ -307,15 +282,15 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
remote_ip_prefix = {'remote_ip_prefix': '10.0.0.0/24'}
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.router():
with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'], **remote_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
self._delete('metering-label-rules', second_uuid)
self._delete('metering-label-rules', second_uuid,
as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
@ -326,7 +301,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'source_ip_prefix':
@ -346,7 +321,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'source_ip_prefix':
@ -362,16 +337,16 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
source_ip_prefix = {'source_ip_prefix': '10.0.0.0/24'}
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.router():
with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**source_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
self._delete('metering-label-rules', second_uuid)
self._delete('metering-label-rules', second_uuid,
as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
@ -382,7 +357,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@ -402,7 +377,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@ -418,16 +393,16 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'id': self.uuid}]
source_ip_prefix = {'destination_ip_prefix': '10.0.0.0/24'}
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.router():
with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**source_ip_prefix):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
self._delete('metering-label-rules', second_uuid)
self._delete('metering-label-rules', second_uuid,
as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
@ -438,7 +413,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@ -459,7 +434,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rule': {
'destination_ip_prefix':
@ -477,23 +452,22 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
ip_prefixes = {'source_ip_prefix': '10.0.0.0/24',
'destination_ip_prefix': '0.0.0.0/0'}
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.router():
with self.metering_label() as label:
la = label['metering_label']
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(la['id'],
**ip_prefixes):
self.mock_add_rule.assert_called_with(mock.ANY,
expected_add)
self._delete('metering-label-rules', second_uuid)
self._delete('metering-label-rules', second_uuid,
as_admin=True)
self.mock_remove_rule.assert_called_with(mock.ANY,
expected_del)
def test_add_and_remove_metering_label_rule_src_and_remote_ip(self):
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.router():
with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@ -514,9 +488,8 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
"NeutronError"]["message"])
def test_add_and_remove_metering_label_rule_dest_and_remote_ip(self):
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.router():
with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@ -537,9 +510,8 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
"NeutronError"]["message"])
def test_add_and_remove_metering_label_rule_no_ip_prefix_entered(self):
with self.router(tenant_id=self.project_id, set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True) as label:
with self.router():
with self.metering_label() as label:
la = label['metering_label']
res = self._create_metering_label_rule(
@ -567,12 +539,15 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
# 1b9e9a6c2ccf7f9bc06429f53e5126f356ae3d4a/neutron/api/v2/base.py#L563
self.ctx.GUARD_TRANSACTION = False
with self.metering_label(tenant_id=tenant_id) as metering_label:
with self.router(tenant_id=tenant_id, set_context=True) as r:
router = self._show('routers', r['router']['id'])
with self.router(tenant_id=tenant_id) as r:
router = self._show('routers', r['router']['id'],
tenant_id=tenant_id)
self.assertEqual(tenant_id, router['router']['tenant_id'])
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
router = self._show('routers', r['router']['id'])
self._delete('metering-labels', metering_label_id, 204,
as_admin=True)
router = self._show('routers', r['router']['id'],
tenant_id=tenant_id)
self.assertEqual(tenant_id, router['router']['tenant_id'])
@ -609,11 +584,7 @@ class TestMeteringPluginL3AgentScheduler(
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
self.project_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = FakeContext('', self.project_id, is_admin=True)
self.context_patch = mock.patch('neutron_lib.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.ctx = context.Context('', self._tenant_id).elevated()
self.l3routers_patch = mock.patch(scheduler +
'.get_l3_agents_hosting_routers')
@ -640,7 +611,7 @@ class TestMeteringPluginL3AgentScheduler(
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
@ -651,7 +622,7 @@ class TestMeteringPluginL3AgentScheduler(
'gw_port_id': None,
'admin_state_up': True,
'distributed': False,
'project_id': self.project_id,
'project_id': self._tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid, 'shared': False,
@ -670,15 +641,12 @@ class TestMeteringPluginL3AgentScheduler(
self.l3routers_mock.side_effect = side_effect
with self.router(name='router1', tenant_id=self.project_id,
set_context=True):
with self.router(name='router1'):
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=self.project_id,
set_context=True):
with self.metering_label(tenant_id=self.project_id,
set_context=True):
with self.router(name='router2'):
with self.metering_label():
self.mock_add.assert_called_with(
self.ctx, tools.UnorderedList(expected))
mock.ANY, tools.UnorderedList(expected))
class TestMeteringPluginL3AgentSchedulerServicePlugin(
@ -727,7 +695,6 @@ class TestMeteringPluginRpcFromL3Agent(
self.meter_plugin = directory.get_plugin(constants.METERING)
self.tenant_id = 'admin_tenant_id'
self.tenant_id_1 = 'tenant_id_1'
self.tenant_id_2 = 'tenant_id_2'
@ -759,8 +726,7 @@ class TestMeteringPluginRpcFromL3Agent(
def test_get_sync_data_metering_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
with self.metering_label(tenant_id=self.tenant_id,
shared=True):
with self.metering_label(shared=True):
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
@ -773,7 +739,7 @@ class TestMeteringPluginRpcFromL3Agent(
def test_get_sync_data_metering_not_shared(self):
with self.router(name='router1', tenant_id=self.tenant_id_1):
with self.router(name='router2', tenant_id=self.tenant_id_2):
with self.metering_label(tenant_id=self.tenant_id):
with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(self.adminContext)
@ -786,13 +752,11 @@ class TestMeteringPluginRpcFromL3Agent(
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
with self.router(
name='router1', tenant_id=self.tenant_id
) as router1:
with self.router(name='router1') as router1:
self._add_external_gateway_to_router(
router1['router']['id'], s['network_id'])
with self.router(name='router2', tenant_id=self.tenant_id):
with self.metering_label(tenant_id=self.tenant_id):
with self.router(name='router2'):
with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(
@ -807,18 +771,15 @@ class TestMeteringPluginRpcFromL3Agent(
with self.subnet() as subnet:
s = subnet['subnet']
self._set_net_external(s['network_id'])
with self.router(
name='router1', tenant_id=self.tenant_id
) as router1:
with self.router(name='router1') as router1:
self._add_external_gateway_to_router(
router1['router']['id'], s['network_id'])
with self.router(
name='router2', tenant_id=self.tenant_id,
admin_state_up=False
name='router2', admin_state_up=False
) as router2:
self._add_external_gateway_to_router(
router2['router']['id'], s['network_id'])
with self.metering_label(tenant_id=self.tenant_id):
with self.metering_label():
callbacks = metering_rpc.MeteringRpcCallbacks(
self.meter_plugin)
data = callbacks.get_sync_data_metering(

View File

@ -1927,7 +1927,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
data = {'alias_%s_rule' % rule_type: kwargs}
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_update_request(resource, data, rule_id, self.fmt)
request = self.new_update_request(resource, data, rule_id, self.fmt,
as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@ -1936,7 +1937,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _show_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
request = self.new_show_request(resource, rule_id, self.fmt,
as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@ -1945,7 +1947,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _delete_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_delete_request(resource, rule_id, self.fmt)
request = self.new_delete_request(resource, rule_id, self.fmt,
as_admin=True)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@ -2014,7 +2017,8 @@ class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
return_value=None):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
request = self.new_show_request(resource, rule_id, self.fmt,
as_admin=True)
res = request.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)

View File

@ -97,7 +97,7 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
# with the flush process that occurs with these two connected objects,
# creating two copies of the Network object in the Session and putting
# it into an invalid state.
with self.network(shared=True):
with self.network(shared=True, as_admin=True):
pass
def test_port_name_update_revises(self):
@ -279,7 +279,8 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'port': {'qos_policy_id': qos_obj['id']}}
response = self._update('ports', port['port']['id'], data)
response = self._update('ports', port['port']['id'], data,
as_admin=True)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
@ -292,7 +293,8 @@ class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'network': {'qos_policy_id': qos_obj['id']}}
response = self._update('networks', network['network']['id'], data)
response = self._update('networks', network['network']['id'], data,
as_admin=True)
new_rev = response['network']['revision_number']
self.assertGreater(new_rev, rev)

View File

@ -0,0 +1,25 @@
---
upgrade:
- |
The Neutron service enable the API policies (RBAC) new defaults and scope
by default. The Default value of config options
``[oslo_policy] enforce_scope`` and
``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed
to ``True``.
This means if you are using system scope token to access Neutron API then
the request will be failed with 403 error code. Also, new defaults will be
enforced by default. To know about the new defaults of each policy
rule, refer to the `Policy New Defaults`_. For more detail about
the Neutron API policies changes, refer to `Policy Concepts`_.
If you want to disable them then modify the below config options value in
``neutron.conf`` file::
[oslo_policy]
enforce_new_defaults=False
enforce_scope=False
.. _`Policy New Defaults`: https://docs.openstack.org/neutron/latest/configuration/policy.html
.. _`Policy Concepts`: https://docs.openstack.org/neutron/latest/contributor/internals/policy.html