Add Bobcat Support

Change-Id: I3b53ec07b635908b3d980b7c4d1e660daa815d4e
This commit is contained in:
Nisar Khan 2024-07-01 07:03:55 +00:00 committed by Nisar Khan
parent f1355f85d6
commit 53a942c2b8
15 changed files with 549 additions and 400 deletions

View File

@ -15,40 +15,40 @@
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py38:
nodeset: ubuntu-focal
# Ignore py38 results until the gate is fixed
voting: false
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py39:
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py310:
nodeset: ubuntu-jammy
# Ignore py310 results until the gate is fixed
voting: false
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py311:
nodeset: ubuntu-jammy
# Ignore py311 results until the gate is fixed
voting: false
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py312:
nodeset: ubuntu-jammy
# Ignore py311 results until the gate is fixed
voting: false
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- legacy-group-based-policy-dsvm-functional:
voting: false
- legacy-group-based-policy-dsvm-aim:
@ -61,37 +61,37 @@
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py38:
nodeset: ubuntu-focal
# Ignore py38 results until the gate is fixed
voting: false
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py39:
nodeset: ubuntu-focal
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py310:
nodeset: ubuntu-jammy
# Ignore py310 results until the gate is fixed
voting: false
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py311:
nodeset: ubuntu-jammy
# Ignore py311 results until the gate is fixed
voting: false
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2
- openstack-tox-py312:
nodeset: ubuntu-jammy
# Ignore py311 results until the gate is fixed
voting: false
required-projects:
- name: openstack/requirements
override-checkout: stable/2023.1
override-checkout: stable/2023.2

View File

@ -43,11 +43,11 @@ if [[ $ENABLE_NFP = True ]]; then
# Make sure that your public interface is not attached to any bridge.
PUBLIC_INTERFACE=
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/2023.1
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/2023.1
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/2023.1
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/2023.1
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/2023.1
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/2023.2
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/2023.2
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/2023.2
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/2023.2
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/2023.2
fi
fi

View File

@ -3323,8 +3323,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
original_port = payload.states[0]
port = payload.states[1]
if payload.metadata:
orig_binding = payload.metadata['orig_binding']
new_binding = payload.metadata['new_binding']
orig_binding = payload.metadata.get('orig_binding')
new_binding = payload.metadata.get('new_binding')
if self._is_port_bound(original_port) and 'fixed_ips' in port:
# When a bound port is updated with a subnet, if the port

View File

@ -95,10 +95,9 @@ class ApiManagerMixin(object):
defaults = kwargs
data = {type: {'tenant_id': self._tenant_id}}
data[type].update(defaults)
req = self.new_create_request(plural, data, self.fmt)
req.environ['neutron.context'] = context.Context(
'', kwargs.get('tenant_id', self._tenant_id) if not
is_admin_context else self._tenant_id, is_admin_context)
req = self.new_create_request(plural, data, self.fmt,
tenant_id=kwargs.get('tenant_id', self._tenant_id),
as_admin=is_admin_context)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
@ -115,12 +114,9 @@ class ApiManagerMixin(object):
data = {type: kwargs}
tenant_id = kwargs.pop('tenant_id', self._tenant_id)
# Create PT with bound port
req = self.new_update_request(plural, data, id, self.fmt)
req.environ['neutron.context'] = context.Context(
'', tenant_id if not is_admin_context else self._tenant_id,
is_admin_context)
req = self.new_update_request(plural, data, id, self.fmt,
tenant_id=tenant_id, as_admin=is_admin_context)
res = req.get_response(api or self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
elif deserialize and res.status_int >= webob.exc.HTTPClientError.code:
@ -130,9 +126,9 @@ class ApiManagerMixin(object):
def _show_resource(self, id, plural, expected_res_status=None,
is_admin_context=False, tenant_id=None,
deserialize=True):
req = self.new_show_request(plural, id, fmt=self.fmt)
req.environ['neutron.context'] = context.Context(
'', tenant_id or self._tenant_id, is_admin_context)
req = self.new_show_request(plural, id, fmt=self.fmt,
tenant_id='' or self._tenant_id, as_admin=is_admin_context)
res = req.get_response(self.ext_api)
if expected_res_status:
@ -144,7 +140,8 @@ class ApiManagerMixin(object):
def _delete_resource(self, id, plural, is_admin_context=False,
expected_res_status=None, tenant_id=None,
deserialize=True):
req = self.new_delete_request(plural, id)
req = self.new_delete_request(plural, id,
as_admin=is_admin_context)
req.environ['neutron.context'] = context.Context(
'', tenant_id or self._tenant_id, is_admin_context)
res = req.get_response(self.ext_api)
@ -192,7 +189,7 @@ class ApiManagerMixin(object):
'device_id': 'b'}}
# Create EP with bound port
req = self.new_update_request('ports', data, port_id,
self.fmt)
self.fmt, as_admin=True)
return self.deserialize(self.fmt, req.get_response(self.api))
def _bind_subport(self, ctx, trunk, port):
@ -206,7 +203,7 @@ class ApiManagerMixin(object):
def _unbind_port(self, port_id):
data = {'port': {'binding:host_id': ''}}
req = self.new_update_request('ports', data, port_id,
self.fmt)
self.fmt, as_admin=True)
return self.deserialize(self.fmt, req.get_response(self.api))
@ -298,7 +295,6 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
resource_plural = self._get_resource_plural(resource)
res = self._list(resource_plural,
neutron_context=neutron_context,
query_params=query_params)
params = None
if query_params:

View File

@ -42,7 +42,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
data = {'subnetpool': {'prefixes': ['10.0.0.0/8'],
'name': 'sp1',
'tenant_id': tenant_id}}
req = self.new_create_request('subnetpools', data)
req = self.new_create_request('subnetpools', data, as_admin=True)
res = req.get_response(self.api)
self.assertEqual(code, res.status_int)
@ -57,7 +57,8 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
sp_id = subnetpool['subnetpool']['id']
new_name = 'a_brand_new_name'
data = {'subnetpool': {'name': new_name}}
req = self.new_update_request('subnetpools', data, sp_id)
req = self.new_update_request('subnetpools', data, sp_id,
as_admin=True)
res = req.get_response(self.api)
self.assertEqual(code, res.status_int)
error = self.deserialize(self.fmt, res)
@ -99,7 +100,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
self.assertIsNotNone(ent)
# Test list subnetpools
res = self._list('subnetpools')
res = self._list('subnetpools', as_admin=True)
val = res['subnetpools'][0].get('subnetpool_extension')
self.assertEqual('Test_SubnetPool_Extension_extend', val)
@ -108,7 +109,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
{'subnetpool_extension':
'Test_SubnetPool_Extension_Update'}}
res = self._update('subnetpools', subnetpool['subnetpool']['id'],
data)
data, as_admin=True)
val = res['subnetpool'].get('subnetpool_extension')
self.assertEqual('Test_SubnetPool_Extension_Update_update', val)
@ -132,7 +133,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
data = {'address_scope': {'ip_version': 4,
'name': 'as1',
'tenant_id': tenant_id}}
req = self.new_create_request('address-scopes', data)
req = self.new_create_request('address-scopes', data, as_admin=True)
res = req.get_response(self.ext_api)
self.assertEqual(code, res.status_int)
@ -147,7 +148,8 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
as_id = address_scope['address_scope']['id']
new_name = 'a_brand_new_name'
data = {'address_scope': {'name': new_name}}
req = self.new_update_request('address-scopes', data, as_id)
req = self.new_update_request('address-scopes', data,
as_id, as_admin=True)
res = req.get_response(self.ext_api)
self.assertEqual(code, res.status_int)
error = self.deserialize(self.fmt, res)
@ -190,7 +192,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
self.assertIsNotNone(ent)
# Test list address_scopes
res = self._list('address-scopes')
res = self._list('address-scopes', as_admin=True)
val = res['address_scopes'][0].get('address_scope_extension')
self.assertEqual('Test_AddressScope_Extension_extend', val)
@ -199,7 +201,8 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
{'address_scope_extension':
'Test_AddressScope_Extension_Update'}}
res = self._update('address-scopes',
address_scope['address_scope']['id'], data)
address_scope['address_scope']['id'], data,
as_admin=True)
val = res['address_scope'].get('address_scope_extension')
self.assertEqual('Test_AddressScope_Extension_Update_update', val)
@ -235,12 +238,12 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
sp_id = subnetpool['subnetpool']['id']
val = subnetpool['subnetpool']['subnetpool_extension']
self.assertEqual("", val)
res = self._show('subnetpools', sp_id)
res = self._show('subnetpools', sp_id, as_admin=True)
val = res['subnetpool']['subnetpool_extension']
self.assertEqual("", val)
# Test list.
res = self._list('subnetpools')
res = self._list('subnetpools', as_admin=True)
val = res['subnetpools'][0]['subnetpool_extension']
self.assertEqual("", val)
@ -250,22 +253,23 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
'name': 'sp2',
'tenant_id': 't1',
'subnetpool_extension': 'abc'}}
req = self.new_create_request('subnetpools', data, self.fmt)
req = self.new_create_request('subnetpools', data, self.fmt,
as_admin=True)
res = req.get_response(self.api)
subnetpool = self.deserialize(self.fmt, res)
subnetpool_id = subnetpool['subnetpool']['id']
val = subnetpool['subnetpool']['subnetpool_extension']
self.assertEqual("abc", val)
res = self._show('subnetpools', subnetpool_id)
res = self._show('subnetpools', subnetpool_id, as_admin=True)
val = res['subnetpool']['subnetpool_extension']
self.assertEqual("abc", val)
# Test update.
data = {'subnetpool': {'subnetpool_extension': "def"}}
res = self._update('subnetpools', subnetpool_id, data)
res = self._update('subnetpools', subnetpool_id, data, as_admin=True)
val = res['subnetpool']['subnetpool_extension']
self.assertEqual("def", val)
res = self._show('subnetpools', subnetpool_id)
res = self._show('subnetpools', subnetpool_id, as_admin=True)
val = res['subnetpool']['subnetpool_extension']
self.assertEqual("def", val)
@ -276,12 +280,12 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
as_id = address_scope['address_scope']['id']
val = address_scope['address_scope']['address_scope_extension']
self.assertEqual("", val)
res = self._show('address-scopes', as_id)
res = self._show('address-scopes', as_id, as_admin=True)
val = res['address_scope']['address_scope_extension']
self.assertEqual("", val)
# Test list.
res = self._list('address-scopes')
res = self._list('address-scopes', as_admin=True)
val = res['address_scopes'][0]['address_scope_extension']
self.assertEqual("", val)
@ -291,21 +295,23 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
'name': 'as2',
'tenant_id': 't1',
'address_scope_extension': 'abc'}}
req = self.new_create_request('address-scopes', data, self.fmt)
req = self.new_create_request('address-scopes', data, self.fmt,
as_admin=True)
res = req.get_response(self.ext_api)
address_scope = self.deserialize(self.fmt, res)
address_scope_id = address_scope['address_scope']['id']
val = address_scope['address_scope']['address_scope_extension']
self.assertEqual("abc", val)
res = self._show('address-scopes', address_scope_id)
res = self._show('address-scopes', address_scope_id, as_admin=True)
val = res['address_scope']['address_scope_extension']
self.assertEqual("abc", val)
# Test update.
data = {'address_scope': {'address_scope_extension': "def"}}
res = self._update('address-scopes', address_scope_id, data)
res = self._update('address-scopes', address_scope_id, data,
as_admin=True)
val = res['address_scope']['address_scope_extension']
self.assertEqual("def", val)
res = self._show('address-scopes', address_scope_id)
res = self._show('address-scopes', address_scope_id, as_admin=True)
val = res['address_scope']['address_scope_extension']
self.assertEqual("def", val)

View File

@ -149,7 +149,7 @@ class TestCiscoApicAimL3Plugin(test_aim_mapping_driver.AIMBaseTestCase):
# there will be four calls in total to the event handler
self._verify_event_handler_calls(floatingip,
expected_call_count=2)
self._delete('floatingips', floatingip['id'])
self._delete('floatingips', floatingip['id'], as_admin=True)
# Expecting 2 more calls - 1 for the port, 1 for the floatingip
self._verify_event_handler_calls(
[internal_port, floatingip], expected_call_count=4)

View File

@ -121,7 +121,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
'tenant_id': 't2'}},
{'network': {'name': 'n3',
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'network', networks)
res = self._create_bulk_from_list(self.fmt, 'network', networks,
as_admin=True)
self.assertEqual(201, res.status_int)
et.assert_has_calls([mock.call(mock.ANY, 't1'),
mock.call(mock.ANY, 't2')],
@ -134,7 +135,7 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
'ensure_tenant') as et:
self._make_subnet(self.fmt, net, None, '10.0.0.0/24',
tenant_id='t1')
tenant_id='t1', as_admin=True)
et.assert_called_once_with(mock.ANY, 't1')
def test_subnet_bulk(self):
@ -158,7 +159,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
'ip_version': 4,
'cidr': '10.0.3.0/24',
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'subnet', subnets)
res = self._create_bulk_from_list(self.fmt, 'subnet', subnets,
as_admin=True)
self.assertEqual(201, res.status_int)
et.assert_has_calls([mock.call(mock.ANY, 't1'),
mock.call(mock.ANY, 't2')],
@ -170,7 +172,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
'ensure_tenant') as et:
self._make_port(self.fmt, net['network']['id'], tenant_id='t1')
self._make_port(self.fmt, net['network']['id'], tenant_id='t1',
as_admin=True)
et.assert_has_calls([mock.call(mock.ANY, 't1')])
self.assertEqual(2, et.call_count)
@ -189,7 +192,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
{'port': {'name': 'n3',
'network_id': network_id,
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'port', ports)
res = self._create_bulk_from_list(self.fmt, 'port', ports,
as_admin=True)
self.assertEqual(201, res.status_int)
et.assert_has_calls([mock.call(mock.ANY, 't1'),
mock.call(mock.ANY, 't2')],
@ -238,7 +242,7 @@ class TestSubnetPool(Ml2PlusPluginV2TestCase):
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
'update_subnetpool_postcommit') as post:
res = self._update('subnetpools', subnetpool['id'],
data)['subnetpool']
data, as_admin=True)['subnetpool']
self.assertEqual('newnameforsubnetpool', res['name'])
self.assertEqual(1, pre.call_count)
@ -262,7 +266,7 @@ class TestSubnetPool(Ml2PlusPluginV2TestCase):
self.plugin.get_subnetpool)
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
'delete_subnetpool_postcommit') as post:
self._delete('subnetpools', subnetpool['id'])
self._delete('subnetpools', subnetpool['id'], as_admin=True)
self.assertEqual(1, pre.call_count)
self.assertEqual('sp1',
@ -303,7 +307,7 @@ class TestAddressScope(Ml2PlusPluginV2TestCase):
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
'update_address_scope_postcommit') as post:
res = self._update('address-scopes', address_scope['id'],
data)['address_scope']
data, as_admin=True)['address_scope']
self.assertEqual('newnameforaddress_scope', res['name'])
self.assertEqual(1, pre.call_count)
@ -326,7 +330,8 @@ class TestAddressScope(Ml2PlusPluginV2TestCase):
pre.side_effect = self.exist_checker(self.plugin.get_address_scope)
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
'delete_address_scope_postcommit') as post:
self._delete('address-scopes', address_scope['id'])
self._delete('address-scopes', address_scope['id'],
as_admin=True)
self.assertEqual(1, pre.call_count)
self.assertEqual('as1',

View File

@ -225,7 +225,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
extn_attr = ('router:external', DN,
'apic:nat_type', 'apic:snat_host_pool')
net = self._make_network(self.fmt, name, True,
net = self._make_network(self.fmt, name, True, as_admin=True,
arg_list=extn_attr,
**kwargs)['network']
subnet = self._make_subnet(
@ -259,11 +259,9 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
attrs.update(kwargs)
req = self.new_create_request('address-scopes',
{'address_scope': attrs}, self.fmt)
if not admin:
neutron_context = nctx.Context('', kwargs.get('tenant_id',
self._tenant_id))
req.environ['neutron.context'] = neutron_context
{'address_scope': attrs}, self.fmt,
tenant_id=kwargs.get('tenant_id',
self._tenant_id), as_admin=admin)
res = req.get_response(self.ext_api)
if expected_status:
@ -323,7 +321,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
req.get_response(self.api))['subnet']
def _show_port(self, id):
req = self.new_show_request('ports', id, fmt=self.fmt)
req = self.new_show_request('ports', id, fmt=self.fmt, as_admin=True)
return self.deserialize(self.fmt, req.get_response(self.api))['port']
def _show_network(self, id):
@ -332,7 +330,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
req.get_response(self.api))['network']
def _show_subnetpool(self, id):
req = self.new_show_request('subnetpools', id, fmt=self.fmt)
req = self.new_show_request('subnetpools', id, as_admin=True)
return self.deserialize(self.fmt,
req.get_response(self.api))['subnetpool']
@ -593,7 +591,8 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
for version in subnetpools_versions:
sp_id = l3p[version][0]
subpool = self._show_subnetpool(sp_id)
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt)
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt,
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
subpool = res['subnetpool']
self.assertIn(subpool['prefixes'][0], l3p['ip_pool'])
@ -629,7 +628,8 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
for version in subnetpools_versions:
sp_id = l3p[version][0]
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt)
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt,
as_admin=True)
res = req.get_response(self.api)
if explicit_subnetpool or (
version == 'subnetpools_v4' and v4_default) or (
@ -681,7 +681,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
if tenant_id:
kwargs['tenant_id'] = tenant_id
net = self._make_network(self.fmt, network_name, True,
net = self._make_network(self.fmt, network_name, True, as_admin=True,
arg_list=self.extension_attributes,
**kwargs)['network']
gw = str(netaddr.IPAddress(netaddr.IPNetwork(cidr).first + 1))
@ -3353,7 +3353,7 @@ class TestPolicyTarget(AIMBaseTestCase,
kwargs[DN] = {EXTERNAL_NETWORK: dn}
extn_attr = ('router:external', DN)
net = self._make_network(self.fmt, name, True,
net = self._make_network(self.fmt, name, True, as_admin=True,
arg_list=extn_attr,
**kwargs)['network']
subnet = self._make_subnet(
@ -5688,29 +5688,34 @@ class TestNeutronPortOperation(AIMBaseTestCase):
device_owner='compute:',
fixed_ips=[{'subnet_id': t2sub1['id']},
{'subnet_id': t2sub2['id']}],
allowed_address_pairs=allow_addr_active_aap)
allowed_address_pairs=allow_addr_active_aap,
as_admin=True)
# create 2 ports configured with the same allowed-addresses
p1 = self._make_port(self.fmt, net['network']['id'],
arg_list=('allowed_address_pairs',),
device_owner='compute:',
fixed_ips=[{'subnet_id': sub1['id']}],
allowed_address_pairs=allow_addr)['port']
allowed_address_pairs=allow_addr,
as_admin=True)['port']
t2p1 = self._make_port(self.fmt, t2net['network']['id'],
arg_list=('allowed_address_pairs',),
device_owner='compute:',
fixed_ips=[{'subnet_id': t2sub1['id']}],
allowed_address_pairs=allow_addr)['port']
allowed_address_pairs=allow_addr,
as_admin=True)['port']
p2 = self._make_port(self.fmt, net['network']['id'],
arg_list=('allowed_address_pairs',),
device_owner='compute:',
fixed_ips=[{'subnet_id': sub1['id']}],
allowed_address_pairs=allow_addr)['port']
allowed_address_pairs=allow_addr,
as_admin=True)['port']
t2p2 = self._make_port(self.fmt, t2net['network']['id'],
arg_list=('allowed_address_pairs',),
device_owner='compute:',
fixed_ips=[{'subnet_id': t2sub1['id']}],
allowed_address_pairs=allow_addr)['port']
allowed_address_pairs=allow_addr,
as_admin=True)['port']
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(t2p1['id'], 'h1')
self._bind_port_to_host(p_active_aap['id'], 'h1')
@ -5721,8 +5726,8 @@ class TestNeutronPortOperation(AIMBaseTestCase):
# belong to a different active_acitve_aap mode.
self._update('ports', p_active_aap['id'],
{'port': {'allowed_address_pairs': allow_addr}},
neutron_context=self._neutron_admin_context,
expected_code=webob.exc.HTTPBadRequest.code)
expected_code=webob.exc.HTTPBadRequest.code,
as_admin=True)
# Call agent => plugin RPC to get the details for each port. The
# results should only have the configured AAPs, with none of them
@ -5824,19 +5829,23 @@ class TestNeutronPortOperation(AIMBaseTestCase):
p3 = self._make_port(self.fmt, net['network']['id'],
device_owner='compute:',
fixed_ips=[{'subnet_id': sub2['id'],
'ip_address': '1.2.3.250'}])['port']
'ip_address': '1.2.3.250'}],
as_admin=True)['port']
t2p3 = self._make_port(self.fmt, t2net['network']['id'],
device_owner='compute:',
fixed_ips=[{'subnet_id': t2sub2['id'],
'ip_address': '1.2.3.250'}])['port']
'ip_address': '1.2.3.250'}],
as_admin=True)['port']
p4 = self._make_port(self.fmt, net['network']['id'],
device_owner='compute:',
fixed_ips=[{'subnet_id': sub2['id'],
'ip_address': '1.2.3.251'}])['port']
'ip_address': '1.2.3.251'}],
as_admin=True)['port']
t2p4 = self._make_port(self.fmt, t2net['network']['id'],
device_owner='compute:',
fixed_ips=[{'subnet_id': t2sub2['id'],
'ip_address': '1.2.3.251'}])['port']
'ip_address': '1.2.3.251'}],
as_admin=True)['port']
self.l3_plugin.add_router_interface(
self._neutron_admin_context, rtr['id'], {'subnet_id': sub1['id']})
self.l3_plugin.add_router_interface(
@ -5848,9 +5857,11 @@ class TestNeutronPortOperation(AIMBaseTestCase):
self._neutron_admin_context, t2rtr['id'],
{'subnet_id': t2sub2['id']})
fip1 = self._make_floatingip(self.fmt, t2net_ext['id'],
port_id=t2p3['id'])['floatingip']
port_id=t2p3['id'],
as_admin=True)['floatingip']
fip2 = self._make_floatingip(self.fmt, t2net_ext['id'],
port_id=t2p4['id'])['floatingip']
port_id=t2p4['id'],
as_admin=True)['floatingip']
details = self.mech_driver.get_gbp_details(
self._neutron_admin_context, device='tap%s' % t2p1['id'],
host='h1')
@ -5904,7 +5915,7 @@ class TestNeutronPortOperation(AIMBaseTestCase):
# from the old pair are removed from the mapping table
p1 = self._update('ports', p1['id'],
{'port': {'allowed_address_pairs': update_addr}},
neutron_context=self._neutron_admin_context)['port']
as_admin=True)['port']
ips = self.mech_driver.get_ha_ipaddresses_for_port(p1['id'])
self.assertEqual(ips, [])
# Request ownership of the new AAP
@ -5922,7 +5933,7 @@ class TestNeutronPortOperation(AIMBaseTestCase):
p2 = self._update('ports', p2['id'],
{'port': {'allowed_address_pairs': update_addr}},
neutron_context=self._neutron_admin_context)['port']
as_admin=True)['port']
ips = self.mech_driver.get_ha_ipaddresses_for_port(p2['id'])
self.assertEqual(ips, [])
# Request ownership of the new AAP

View File

@ -172,7 +172,7 @@ class TestNeutronMapping(AimValidationTestCase):
def _test_routed_subnet(self, subnet_id, gw_ip):
# Get the AIM Subnet.
subnet = self._show('subnets', subnet_id)['subnet']
subnet = self._show('subnets', subnet_id, as_admin=True)['subnet']
sn_dn = subnet['apic:distinguished_names'][gw_ip]
sn = aim_resource.Subnet.from_dn(sn_dn)
@ -181,7 +181,7 @@ class TestNeutronMapping(AimValidationTestCase):
def _test_unscoped_vrf(self, net_id):
# Get the network's AIM VRF.
net = self._show('networks', net_id)['network']
net = self._show('networks', net_id, as_admin=True)['network']
vrf_dn = net['apic:distinguished_names']['VRF']
vrf = aim_resource.VRF.from_dn(vrf_dn)
@ -283,7 +283,7 @@ class TestNeutronMapping(AimValidationTestCase):
# Test subnet.
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
tenant_id='subnet_proj')['subnet']
as_admin=True, tenant_id='subnet_proj')['subnet']
self._test_project_resources(subnet['project_id'])
# Test port. Since Neutron creates the default SG for the
@ -292,12 +292,12 @@ class TestNeutronMapping(AimValidationTestCase):
# resource owned by port_prog.
port = self._make_port(
self.fmt, net['id'], security_groups=[],
tenant_id='port_proj')['port']
as_admin=True, tenant_id='port_proj')['port']
sgs = self._list(
'security-groups',
query_params='project_id=port_proj')['security_groups']
query_params='project_id=port_proj',
as_admin=True)['security_groups']
self.assertEqual(1, len(sgs))
self._delete('security-groups', sgs[0]['id'])
self._test_project_resources(port['project_id'])
# Test security group.
@ -319,8 +319,8 @@ class TestNeutronMapping(AimValidationTestCase):
# Test floatingip.
kwargs = {'router:external': True}
ext_net_resp = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)
self.fmt, 'ext_net', True, as_admin=True,
arg_list=self.extension_attributes, **kwargs)
ext_net = ext_net_resp['network']
self._make_subnet(
self.fmt, ext_net_resp, '100.100.100.1', '100.100.100.0/24')
@ -542,8 +542,8 @@ class TestNeutronMapping(AimValidationTestCase):
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
net_resp = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)
self.fmt, 'ext_net', True, as_admin=True,
arg_list=self.extension_attributes, **kwargs)
net = net_resp['network']
self._validate()
@ -774,7 +774,8 @@ class TestNeutronMapping(AimValidationTestCase):
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
ext_net = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
self.fmt, 'ext_net', True, as_admin=True,
arg_list=self.extension_attributes,
**kwargs)['network']
# Create extra external network to test CloneL3Out record below.
@ -782,7 +783,7 @@ class TestNeutronMapping(AimValidationTestCase):
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l2/instP-n2'}}
self._make_network(
self.fmt, 'extra_ext_net', True,
self.fmt, 'extra_ext_net', True, as_admin=True,
arg_list=self.extension_attributes, **kwargs)
# Create router as tenant_2.
@ -860,7 +861,8 @@ class TestNeutronMapping(AimValidationTestCase):
def test_unscoped_routing(self):
# Create shared network and unscoped subnet as tenant_1.
net_resp = self._make_network(
self.fmt, 'net1', True, tenant_id='tenant_1', shared=True)
self.fmt, 'net1', True, tenant_id='tenant_1',
as_admin=True, shared=True)
net1_id = net_resp['network']['id']
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
@ -886,8 +888,8 @@ class TestNeutronMapping(AimValidationTestCase):
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
ext_net = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)['network']
self.fmt, 'ext_net', True, as_admin=True,
arg_list=self.extension_attributes, **kwargs)['network']
# Create router as tenant_2.
kwargs = {'apic:external_provided_contracts': ['p1', 'p2'],
@ -1181,7 +1183,6 @@ class TestNeutronMapping(AimValidationTestCase):
# delete BridgeDomain.
bd = aim_resource.BridgeDomain.from_dn(bd_dn)
self.aim_mgr.delete(self.aim_ctx, bd)
# delete EndpointGroup.
epg = aim_resource.EndpointGroup.from_dn(epg_dn)
self.aim_mgr.delete(self.aim_ctx, epg)
@ -1242,7 +1243,7 @@ class TestNeutronMapping(AimValidationTestCase):
sg['id'], 'ingress', 'tcp', '22', '23')
rules = {'security_group_rules': [rule1['security_group_rule']]}
sg_rule = self._make_security_group_rule(
self.fmt, rules)['security_group_rules'][0]
self.fmt, rules, as_admin=True)['security_group_rules'][0]
# Test the AIM SecurityGroup.
tenant_name = self.driver.aim_mech_driver.name_mapper.project(
@ -1384,8 +1385,8 @@ class TestGbpMapping(AimValidationTestCase):
'apic:distinguished_names':
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
net_resp = self._make_network(
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
**kwargs)
self.fmt, 'ext_net', True, as_admin=True,
arg_list=self.extension_attributes, **kwargs)
subnet = self._make_subnet(
self.fmt, net_resp, '10.0.0.1', '10.0.0.0/24')['subnet']

View File

@ -1324,7 +1324,8 @@ class TestPolicyTargetGroup(ResourceMappingTestCase):
data = {'policy_target_group': {'l2_policy_id': l2p_id,
'tenant_id': 'admin'}}
req = self.new_create_request('policy_target_groups', data)
req = self.new_create_request('policy_target_groups',
data, as_admin=True)
data = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual('CrossTenantPolicyTargetGroupL2PolicyNotSupported',
data['NeutronError']['type'])
@ -1452,7 +1453,7 @@ class TestL2Policy(ResourceMappingTestCase):
def _test_explicit_network_lifecycle(self, shared=False):
# Create L2 policy with explicit network.
with self.network(shared=shared) as network:
with self.network(shared=shared, as_admin=True) as network:
network_id = network['network']['id']
l2p = self.create_l2_policy(name="l2p1", network_id=network_id,
shared=shared)
@ -1583,10 +1584,11 @@ class TestL3Policy(ResourceMappingTestCase,
self.assertEqual(router_id, routers[0])
# Verify deleting L3 policy does not cleanup router.
req = self.new_delete_request('l3_policies', l3p_id)
req = self.new_delete_request('l3_policies', l3p_id, as_admin=True)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
req = self.new_show_request('routers', router_id, fmt=self.fmt)
req = self.new_show_request('routers', router_id, fmt=self.fmt,
as_admin=True)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
@ -1660,8 +1662,8 @@ class TestL3Policy(ResourceMappingTestCase,
def test_create_l3p_es(self):
# Simple test to verify l3p created with 1-N ES
with self.network(router__external=True) as net1:
with self.network(router__external=True) as net2:
with self.network(router__external=True, as_admin=True) as net1:
with self.network(router__external=True, as_admin=True) as net2:
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
with self.subnet(cidr='10.10.2.0/24',
network=net2) as sub2:
@ -1688,8 +1690,8 @@ class TestL3Policy(ResourceMappingTestCase,
def test_update_l3p_es(self):
# Simple test to verify l3p updated with 1-N ES
with self.network(router__external=True) as net1:
with self.network(router__external=True) as net2:
with self.network(router__external=True, as_admin=True) as net1:
with self.network(router__external=True, as_admin=True) as net2:
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
with self.subnet(cidr='10.10.2.0/24',
network=net2) as sub2:
@ -1718,8 +1720,8 @@ class TestL3Policy(ResourceMappingTestCase,
res['NeutronError']['type'])
def test_es_router_plumbing(self):
with self.network(router__external=True) as net1:
with self.network(router__external=True) as net2:
with self.network(router__external=True, as_admin=True) as net1:
with self.network(router__external=True, as_admin=True) as net2:
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
with self.subnet(cidr='10.10.2.0/24',
network=net2) as sub2:
@ -1775,8 +1777,8 @@ class TestL3Policy(ResourceMappingTestCase,
{'destination': '172.0.0.0/16', 'nexthop': '10.10.1.1'}]
routes2 = [{'destination': '0.0.0.0/0', 'nexthop': '10.10.2.1'},
{'destination': '172.0.0.0/16', 'nexthop': '10.10.2.1'}]
with self.network(router__external=True) as net1:
with self.network(router__external=True) as net2:
with self.network(router__external=True, as_admin=True) as net1:
with self.network(router__external=True, as_admin=True) as net2:
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
with self.subnet(cidr='10.10.2.0/24',
network=net2) as sub2:
@ -1823,7 +1825,8 @@ class TestL3Policy(ResourceMappingTestCase,
res['NeutronError']['type'])
def _show_subnetpool(self, id):
req = self.new_show_request('subnetpools', id, fmt=self.fmt)
req = self.new_show_request('subnetpools', id, fmt=self.fmt,
as_admin=True)
return self.deserialize(self.fmt,
req.get_response(self.api))['subnetpool']
@ -1874,7 +1877,8 @@ class TestL3Policy(ResourceMappingTestCase,
for version in subnetpools_versions:
sp_id = l3p[version][0]
subpool = self._show_subnetpool(sp_id)
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt)
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt,
as_admin=True)
res = self.deserialize(self.fmt, req.get_response(self.api))
subpool = res['subnetpool']
self.assertIn(subpool['prefixes'][0], l3p['ip_pool'])
@ -1903,7 +1907,8 @@ class TestL3Policy(ResourceMappingTestCase,
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
for version in subnetpools_versions:
sp_id = l3p[version][0]
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt)
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt,
as_admin=True)
res = req.get_response(self.api)
if explicit_subnetpool or (
version == 'subnetpools_v4' and v4_default) or (
@ -2581,7 +2586,7 @@ class TestPolicyRuleSet(ResourceMappingTestCase):
pr = self._create_ssh_allow_rule()
prs = self.create_policy_rule_set(
policy_rules=[pr['id']])['policy_rule_set']
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
es = self.create_external_segment(
subnet_id=sub['subnet']['id'],
@ -2795,7 +2800,7 @@ class TestExternalSegment(ResourceMappingTestCase):
def test_explicit_subnet_lifecycle(self):
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
es = self.create_external_segment(
subnet_id=sub['subnet']['id'])['external_segment']
@ -2809,7 +2814,7 @@ class TestExternalSegment(ResourceMappingTestCase):
es['ip_version'])
def test_update(self, proxy_ip_pool1=None, proxy_ip_pool2=None):
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
changes = {'port_address_translation': True}
es = self.create_external_segment(
@ -2903,7 +2908,7 @@ class TestExternalSegment(ResourceMappingTestCase):
def test_update_different_tenant(self):
with self.network(router__external=True, shared=True,
tenant_id='admin') as net:
tenant_id='admin', as_admin=True) as net:
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
es = self.create_external_segment(
subnet_id=sub['subnet']['id'],
@ -2931,7 +2936,7 @@ class TestExternalSegment(ResourceMappingTestCase):
self._verify_prs_rules(prs['id'])
def test_implicit_es(self):
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -2952,7 +2957,8 @@ class TestExternalSegment(ResourceMappingTestCase):
expected_res_status=200)
def test_implicit_es_shared(self):
with self.network(router__external=True, shared=True) as net:
with self.network(router__external=True, shared=True,
as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
shared=True,
@ -2974,7 +2980,8 @@ class TestExternalSegment(ResourceMappingTestCase):
expected_res_status=200)
def test_delete(self):
with self.network(router__external=True, shared=True) as net:
with self.network(router__external=True, shared=True,
as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -2983,7 +2990,8 @@ class TestExternalSegment(ResourceMappingTestCase):
self.show_external_segment(es['id'], expected_res_status=404)
def test_delete_in_use(self):
with self.network(router__external=True, shared=True) as net:
with self.network(router__external=True, shared=True,
as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -2997,7 +3005,8 @@ class TestExternalSegment(ResourceMappingTestCase):
self.show_external_segment(es['id'], expected_res_status=200)
def test_update_l3p_remove_es(self):
with self.network(router__external=True, shared=True) as net:
with self.network(router__external=True, shared=True,
as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
self.create_external_segment(
name="default", subnet_id=sub['subnet']['id'])
@ -3011,7 +3020,7 @@ class TestExternalSegment(ResourceMappingTestCase):
class TestExternalPolicy(ResourceMappingTestCase):
def test_create(self):
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='10.10.1.0/24', network=net) as sub1:
with self.subnet(cidr='10.10.2.0/24', network=net) as sub2:
es1 = self.create_external_segment(
@ -3046,7 +3055,7 @@ class TestExternalPolicy(ResourceMappingTestCase):
res['NeutronError']['type'])
def test_update(self):
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='10.10.1.0/24', network=net) as sub1:
with self.subnet(cidr='10.10.2.0/24', network=net) as sub2:
route = {'destination': '172.0.0.0/8', 'nexthop': None}
@ -3245,7 +3254,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
def test_create_nsp_ip_pool_multiple_ptgs(self):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -3308,7 +3317,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
def test_nsp_fip_single(self):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -3353,7 +3362,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
def test_nsp_fip_single_different_pool(self):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -3408,7 +3417,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
def test_nsp_rejected_without_nat_pool(self):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
self.create_external_segment(
name="default",
@ -3467,8 +3476,8 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
"name": "test"}],
expected_res_status=webob.exc.HTTPCreated.code)[
'network_service_policy']
with self.network(router__external=True) as net1:
with self.network(router__external=True) as net2:
with self.network(router__external=True, as_admin=True) as net1:
with self.network(router__external=True, as_admin=True) as net2:
with self.subnet(cidr='192.168.1.0/24', network=net1) as sub1:
with self.subnet(
cidr='192.168.2.0/24', network=net2) as sub2:
@ -3500,7 +3509,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
def test_nsp_delete_nat_pool_rejected(self):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -3526,7 +3535,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
def test_update_nsp_nat_pool_after_pt_create(self):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -3789,7 +3798,8 @@ class TestNatPool(ResourceMappingTestCase):
def _test_overlapping_peer_rejected(self, shared1=False, shared2=False):
shared_net = shared1 or shared2
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True, shared=shared_net) as net:
with self.network(router__external=True, shared=shared_net,
as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -3825,7 +3835,8 @@ class TestNatPool(ResourceMappingTestCase):
def _test_implicit_subnet_created(self, shared=False):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True, shared=shared) as net:
with self.network(router__external=True, shared=shared,
as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default",
@ -3850,7 +3861,8 @@ class TestNatPool(ResourceMappingTestCase):
def _test_partially_overlapping_subnets_rejected(self, shared=False):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True, shared=shared) as net:
with self.network(router__external=True, shared=shared,
as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
with self.subnet(cidr='192.168.1.0/28', network=net):
es = self.create_external_segment(
@ -3875,7 +3887,8 @@ class TestNatPool(ResourceMappingTestCase):
def _test_overlapping_subnets(self, shared=False):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True, shared=shared) as net:
with self.network(router__external=True, shared=shared,
as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
with self.subnet(cidr='192.168.1.0/24', network=net) as sub2:
es = self.create_external_segment(
@ -3901,7 +3914,7 @@ class TestNatPool(ResourceMappingTestCase):
def _test_subnet_swap(self, owned=True):
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
es = self.create_external_segment(
name="default", subnet_id=sub['subnet']['id'],
@ -3932,7 +3945,8 @@ class TestNatPool(ResourceMappingTestCase):
ip_version=4, ip_pool=ip_pool,
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
sub_id = nat_pool['subnet_id']
with self.network(router__external=True) as net2:
with self.network(router__external=True,
as_admin=True) as net2:
with self.subnet(cidr='192.167.0.0/24',
network=net2) as sub2:
es2 = self.create_external_segment(
@ -3973,7 +3987,7 @@ class TestNatPool(ResourceMappingTestCase):
result['NeutronError']['type'])
def test_delete_with_fip_allocated(self):
with self.network(router__external=True) as net:
with self.network(router__external=True, as_admin=True) as net:
with self.subnet(cidr='192.168.0.0/30', enable_dhcp=False,
network=net) as sub:
es = self.create_external_segment(

View File

@ -211,7 +211,8 @@ class TestQosPolicy(TestAIMQosBase):
kwargs['qos_policy_id'] = net_qos_id
resp = self._create_network(
self.fmt, 'net', True, arg_list=tuple(list(kwargs.keys())),
self.fmt, 'net', True, as_admin=True,
arg_list=tuple(list(kwargs.keys())),
**kwargs)
result = self.deserialize(self.fmt, resp)
self.assertEqual(

View File

@ -746,12 +746,12 @@ class TestPortPairOpflexAgent(TestAIMServiceFunctionChainingBase):
def test_port_pair_with_opflex_agent_vlan_nets(self):
# Correct work flow with both nets of type vlan.
kwargs = {'provider:network_type': 'vlan'}
net1 = self._make_network(self.fmt, 'net1', True,
net1 = self._make_network(self.fmt, 'net1', True, as_admin=True,
arg_list=tuple(list(kwargs.keys())), **kwargs)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
net2 = self._make_network(self.fmt, 'net2', True,
net2 = self._make_network(self.fmt, 'net2', True, as_admin=True,
arg_list=tuple(list(kwargs.keys())), **kwargs)
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
@ -766,7 +766,7 @@ class TestPortPairOpflexAgent(TestAIMServiceFunctionChainingBase):
def test_port_pair_invalid_with_opflex_agent_opflex_nets(self):
# Validate that opflex type nets are invalid.
kwargs = {'provider:network_type': 'vlan'}
net1 = self._make_network(self.fmt, 'net1', True,
net1 = self._make_network(self.fmt, 'net1', True, as_admin=True,
arg_list=tuple(list(kwargs.keys())), **kwargs)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']

View File

@ -4,19 +4,19 @@
hacking>=6.0.1 # Apache-2.0
# Since version numbers for these are specified in
# https://releases.openstack.org/constraints/upper/2023.1, they cannot be
# https://releases.openstack.org/constraints/upper/2023.2, they cannot be
# referenced as GIT URLs.
neutron
python-heatclient
python-keystoneclient
-e git+https://opendev.org/openstack/networking-sfc.git@stable/2023.1#egg=networking-sfc
-e git+https://opendev.org/openstack/networking-sfc.git@stable/2023.2#egg=networking-sfc
-e git+https://github.com/noironetworks/apicapi.git@master#egg=apicapi
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/2023.1#egg=neutron-opflex-agent
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/2023.2#egg=neutron-opflex-agent
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/2023.1#egg=python-group-based-policy-client
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/2023.2#egg=python-group-based-policy-client
coverage!=4.4,>=4.0 # Apache-2.0
flake8-import-order==0.12 # LGPLv3

View File

@ -1,5 +1,5 @@
[tox]
envlist = py38,py39,py310,pep8
envlist = py38,py39,py310,py311,pep8
minversion = 3.18.0
skipsdist = False
ignore_basepython_conflict = True
@ -24,7 +24,7 @@ usedevelop = True
install_command =
pip install {opts} {packages}
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2023.1}
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2023.2}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals = sh