Add Bobcat Support
Change-Id: I3b53ec07b635908b3d980b7c4d1e660daa815d4e
This commit is contained in:
parent
f1355f85d6
commit
53a942c2b8
24
.zuul.yaml
24
.zuul.yaml
@ -15,40 +15,40 @@
|
|||||||
nodeset: ubuntu-focal
|
nodeset: ubuntu-focal
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py38:
|
- openstack-tox-py38:
|
||||||
nodeset: ubuntu-focal
|
nodeset: ubuntu-focal
|
||||||
# Ignore py38 results until the gate is fixed
|
# Ignore py38 results until the gate is fixed
|
||||||
voting: false
|
voting: false
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py39:
|
- openstack-tox-py39:
|
||||||
nodeset: ubuntu-focal
|
nodeset: ubuntu-focal
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py310:
|
- openstack-tox-py310:
|
||||||
nodeset: ubuntu-jammy
|
nodeset: ubuntu-jammy
|
||||||
# Ignore py310 results until the gate is fixed
|
# Ignore py310 results until the gate is fixed
|
||||||
voting: false
|
voting: false
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py311:
|
- openstack-tox-py311:
|
||||||
nodeset: ubuntu-jammy
|
nodeset: ubuntu-jammy
|
||||||
# Ignore py311 results until the gate is fixed
|
# Ignore py311 results until the gate is fixed
|
||||||
voting: false
|
voting: false
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py312:
|
- openstack-tox-py312:
|
||||||
nodeset: ubuntu-jammy
|
nodeset: ubuntu-jammy
|
||||||
# Ignore py311 results until the gate is fixed
|
# Ignore py311 results until the gate is fixed
|
||||||
voting: false
|
voting: false
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- legacy-group-based-policy-dsvm-functional:
|
- legacy-group-based-policy-dsvm-functional:
|
||||||
voting: false
|
voting: false
|
||||||
- legacy-group-based-policy-dsvm-aim:
|
- legacy-group-based-policy-dsvm-aim:
|
||||||
@ -61,37 +61,37 @@
|
|||||||
nodeset: ubuntu-focal
|
nodeset: ubuntu-focal
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py38:
|
- openstack-tox-py38:
|
||||||
nodeset: ubuntu-focal
|
nodeset: ubuntu-focal
|
||||||
# Ignore py38 results until the gate is fixed
|
# Ignore py38 results until the gate is fixed
|
||||||
voting: false
|
voting: false
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py39:
|
- openstack-tox-py39:
|
||||||
nodeset: ubuntu-focal
|
nodeset: ubuntu-focal
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py310:
|
- openstack-tox-py310:
|
||||||
nodeset: ubuntu-jammy
|
nodeset: ubuntu-jammy
|
||||||
# Ignore py310 results until the gate is fixed
|
# Ignore py310 results until the gate is fixed
|
||||||
voting: false
|
voting: false
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py311:
|
- openstack-tox-py311:
|
||||||
nodeset: ubuntu-jammy
|
nodeset: ubuntu-jammy
|
||||||
# Ignore py311 results until the gate is fixed
|
# Ignore py311 results until the gate is fixed
|
||||||
voting: false
|
voting: false
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
- openstack-tox-py312:
|
- openstack-tox-py312:
|
||||||
nodeset: ubuntu-jammy
|
nodeset: ubuntu-jammy
|
||||||
# Ignore py311 results until the gate is fixed
|
# Ignore py311 results until the gate is fixed
|
||||||
voting: false
|
voting: false
|
||||||
required-projects:
|
required-projects:
|
||||||
- name: openstack/requirements
|
- name: openstack/requirements
|
||||||
override-checkout: stable/2023.1
|
override-checkout: stable/2023.2
|
||||||
|
@ -43,11 +43,11 @@ if [[ $ENABLE_NFP = True ]]; then
|
|||||||
# Make sure that your public interface is not attached to any bridge.
|
# Make sure that your public interface is not attached to any bridge.
|
||||||
PUBLIC_INTERFACE=
|
PUBLIC_INTERFACE=
|
||||||
|
|
||||||
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/2023.1
|
enable_plugin neutron-fwaas http://opendev.org/openstack/neutron-fwaas.git stable/2023.2
|
||||||
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/2023.1
|
enable_plugin neutron-lbaas https://opendev.org/openstack/neutron-lbaas.git stable/2023.2
|
||||||
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/2023.1
|
enable_plugin neutron https://opendev.org/openstack/neutron.git stable/2023.2
|
||||||
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/2023.1
|
enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas.git stable/2023.2
|
||||||
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/2023.1
|
enable_plugin octavia https://opendev.org/openstack/octavia.git stable/2023.2
|
||||||
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@ -3323,8 +3323,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
|
|||||||
original_port = payload.states[0]
|
original_port = payload.states[0]
|
||||||
port = payload.states[1]
|
port = payload.states[1]
|
||||||
if payload.metadata:
|
if payload.metadata:
|
||||||
orig_binding = payload.metadata['orig_binding']
|
orig_binding = payload.metadata.get('orig_binding')
|
||||||
new_binding = payload.metadata['new_binding']
|
new_binding = payload.metadata.get('new_binding')
|
||||||
|
|
||||||
if self._is_port_bound(original_port) and 'fixed_ips' in port:
|
if self._is_port_bound(original_port) and 'fixed_ips' in port:
|
||||||
# When a bound port is updated with a subnet, if the port
|
# When a bound port is updated with a subnet, if the port
|
||||||
|
@ -95,10 +95,9 @@ class ApiManagerMixin(object):
|
|||||||
defaults = kwargs
|
defaults = kwargs
|
||||||
data = {type: {'tenant_id': self._tenant_id}}
|
data = {type: {'tenant_id': self._tenant_id}}
|
||||||
data[type].update(defaults)
|
data[type].update(defaults)
|
||||||
req = self.new_create_request(plural, data, self.fmt)
|
req = self.new_create_request(plural, data, self.fmt,
|
||||||
req.environ['neutron.context'] = context.Context(
|
tenant_id=kwargs.get('tenant_id', self._tenant_id),
|
||||||
'', kwargs.get('tenant_id', self._tenant_id) if not
|
as_admin=is_admin_context)
|
||||||
is_admin_context else self._tenant_id, is_admin_context)
|
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
if expected_res_status:
|
if expected_res_status:
|
||||||
self.assertEqual(expected_res_status, res.status_int)
|
self.assertEqual(expected_res_status, res.status_int)
|
||||||
@ -115,12 +114,9 @@ class ApiManagerMixin(object):
|
|||||||
data = {type: kwargs}
|
data = {type: kwargs}
|
||||||
tenant_id = kwargs.pop('tenant_id', self._tenant_id)
|
tenant_id = kwargs.pop('tenant_id', self._tenant_id)
|
||||||
# Create PT with bound port
|
# Create PT with bound port
|
||||||
req = self.new_update_request(plural, data, id, self.fmt)
|
req = self.new_update_request(plural, data, id, self.fmt,
|
||||||
req.environ['neutron.context'] = context.Context(
|
tenant_id=tenant_id, as_admin=is_admin_context)
|
||||||
'', tenant_id if not is_admin_context else self._tenant_id,
|
|
||||||
is_admin_context)
|
|
||||||
res = req.get_response(api or self.ext_api)
|
res = req.get_response(api or self.ext_api)
|
||||||
|
|
||||||
if expected_res_status:
|
if expected_res_status:
|
||||||
self.assertEqual(expected_res_status, res.status_int)
|
self.assertEqual(expected_res_status, res.status_int)
|
||||||
elif deserialize and res.status_int >= webob.exc.HTTPClientError.code:
|
elif deserialize and res.status_int >= webob.exc.HTTPClientError.code:
|
||||||
@ -130,9 +126,9 @@ class ApiManagerMixin(object):
|
|||||||
def _show_resource(self, id, plural, expected_res_status=None,
|
def _show_resource(self, id, plural, expected_res_status=None,
|
||||||
is_admin_context=False, tenant_id=None,
|
is_admin_context=False, tenant_id=None,
|
||||||
deserialize=True):
|
deserialize=True):
|
||||||
req = self.new_show_request(plural, id, fmt=self.fmt)
|
|
||||||
req.environ['neutron.context'] = context.Context(
|
req = self.new_show_request(plural, id, fmt=self.fmt,
|
||||||
'', tenant_id or self._tenant_id, is_admin_context)
|
tenant_id='' or self._tenant_id, as_admin=is_admin_context)
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
|
|
||||||
if expected_res_status:
|
if expected_res_status:
|
||||||
@ -144,7 +140,8 @@ class ApiManagerMixin(object):
|
|||||||
def _delete_resource(self, id, plural, is_admin_context=False,
|
def _delete_resource(self, id, plural, is_admin_context=False,
|
||||||
expected_res_status=None, tenant_id=None,
|
expected_res_status=None, tenant_id=None,
|
||||||
deserialize=True):
|
deserialize=True):
|
||||||
req = self.new_delete_request(plural, id)
|
req = self.new_delete_request(plural, id,
|
||||||
|
as_admin=is_admin_context)
|
||||||
req.environ['neutron.context'] = context.Context(
|
req.environ['neutron.context'] = context.Context(
|
||||||
'', tenant_id or self._tenant_id, is_admin_context)
|
'', tenant_id or self._tenant_id, is_admin_context)
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
@ -192,7 +189,7 @@ class ApiManagerMixin(object):
|
|||||||
'device_id': 'b'}}
|
'device_id': 'b'}}
|
||||||
# Create EP with bound port
|
# Create EP with bound port
|
||||||
req = self.new_update_request('ports', data, port_id,
|
req = self.new_update_request('ports', data, port_id,
|
||||||
self.fmt)
|
self.fmt, as_admin=True)
|
||||||
return self.deserialize(self.fmt, req.get_response(self.api))
|
return self.deserialize(self.fmt, req.get_response(self.api))
|
||||||
|
|
||||||
def _bind_subport(self, ctx, trunk, port):
|
def _bind_subport(self, ctx, trunk, port):
|
||||||
@ -206,7 +203,7 @@ class ApiManagerMixin(object):
|
|||||||
def _unbind_port(self, port_id):
|
def _unbind_port(self, port_id):
|
||||||
data = {'port': {'binding:host_id': ''}}
|
data = {'port': {'binding:host_id': ''}}
|
||||||
req = self.new_update_request('ports', data, port_id,
|
req = self.new_update_request('ports', data, port_id,
|
||||||
self.fmt)
|
self.fmt, as_admin=True)
|
||||||
return self.deserialize(self.fmt, req.get_response(self.api))
|
return self.deserialize(self.fmt, req.get_response(self.api))
|
||||||
|
|
||||||
|
|
||||||
@ -298,7 +295,6 @@ class GroupPolicyDBTestBase(ApiManagerMixin):
|
|||||||
resource_plural = self._get_resource_plural(resource)
|
resource_plural = self._get_resource_plural(resource)
|
||||||
|
|
||||||
res = self._list(resource_plural,
|
res = self._list(resource_plural,
|
||||||
neutron_context=neutron_context,
|
|
||||||
query_params=query_params)
|
query_params=query_params)
|
||||||
params = None
|
params = None
|
||||||
if query_params:
|
if query_params:
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -42,7 +42,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
data = {'subnetpool': {'prefixes': ['10.0.0.0/8'],
|
data = {'subnetpool': {'prefixes': ['10.0.0.0/8'],
|
||||||
'name': 'sp1',
|
'name': 'sp1',
|
||||||
'tenant_id': tenant_id}}
|
'tenant_id': tenant_id}}
|
||||||
req = self.new_create_request('subnetpools', data)
|
req = self.new_create_request('subnetpools', data, as_admin=True)
|
||||||
res = req.get_response(self.api)
|
res = req.get_response(self.api)
|
||||||
self.assertEqual(code, res.status_int)
|
self.assertEqual(code, res.status_int)
|
||||||
|
|
||||||
@ -57,7 +57,8 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
sp_id = subnetpool['subnetpool']['id']
|
sp_id = subnetpool['subnetpool']['id']
|
||||||
new_name = 'a_brand_new_name'
|
new_name = 'a_brand_new_name'
|
||||||
data = {'subnetpool': {'name': new_name}}
|
data = {'subnetpool': {'name': new_name}}
|
||||||
req = self.new_update_request('subnetpools', data, sp_id)
|
req = self.new_update_request('subnetpools', data, sp_id,
|
||||||
|
as_admin=True)
|
||||||
res = req.get_response(self.api)
|
res = req.get_response(self.api)
|
||||||
self.assertEqual(code, res.status_int)
|
self.assertEqual(code, res.status_int)
|
||||||
error = self.deserialize(self.fmt, res)
|
error = self.deserialize(self.fmt, res)
|
||||||
@ -99,7 +100,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
self.assertIsNotNone(ent)
|
self.assertIsNotNone(ent)
|
||||||
|
|
||||||
# Test list subnetpools
|
# Test list subnetpools
|
||||||
res = self._list('subnetpools')
|
res = self._list('subnetpools', as_admin=True)
|
||||||
val = res['subnetpools'][0].get('subnetpool_extension')
|
val = res['subnetpools'][0].get('subnetpool_extension')
|
||||||
self.assertEqual('Test_SubnetPool_Extension_extend', val)
|
self.assertEqual('Test_SubnetPool_Extension_extend', val)
|
||||||
|
|
||||||
@ -108,7 +109,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
{'subnetpool_extension':
|
{'subnetpool_extension':
|
||||||
'Test_SubnetPool_Extension_Update'}}
|
'Test_SubnetPool_Extension_Update'}}
|
||||||
res = self._update('subnetpools', subnetpool['subnetpool']['id'],
|
res = self._update('subnetpools', subnetpool['subnetpool']['id'],
|
||||||
data)
|
data, as_admin=True)
|
||||||
val = res['subnetpool'].get('subnetpool_extension')
|
val = res['subnetpool'].get('subnetpool_extension')
|
||||||
self.assertEqual('Test_SubnetPool_Extension_Update_update', val)
|
self.assertEqual('Test_SubnetPool_Extension_Update_update', val)
|
||||||
|
|
||||||
@ -132,7 +133,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
data = {'address_scope': {'ip_version': 4,
|
data = {'address_scope': {'ip_version': 4,
|
||||||
'name': 'as1',
|
'name': 'as1',
|
||||||
'tenant_id': tenant_id}}
|
'tenant_id': tenant_id}}
|
||||||
req = self.new_create_request('address-scopes', data)
|
req = self.new_create_request('address-scopes', data, as_admin=True)
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
self.assertEqual(code, res.status_int)
|
self.assertEqual(code, res.status_int)
|
||||||
|
|
||||||
@ -147,7 +148,8 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
as_id = address_scope['address_scope']['id']
|
as_id = address_scope['address_scope']['id']
|
||||||
new_name = 'a_brand_new_name'
|
new_name = 'a_brand_new_name'
|
||||||
data = {'address_scope': {'name': new_name}}
|
data = {'address_scope': {'name': new_name}}
|
||||||
req = self.new_update_request('address-scopes', data, as_id)
|
req = self.new_update_request('address-scopes', data,
|
||||||
|
as_id, as_admin=True)
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
self.assertEqual(code, res.status_int)
|
self.assertEqual(code, res.status_int)
|
||||||
error = self.deserialize(self.fmt, res)
|
error = self.deserialize(self.fmt, res)
|
||||||
@ -190,7 +192,7 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
self.assertIsNotNone(ent)
|
self.assertIsNotNone(ent)
|
||||||
|
|
||||||
# Test list address_scopes
|
# Test list address_scopes
|
||||||
res = self._list('address-scopes')
|
res = self._list('address-scopes', as_admin=True)
|
||||||
val = res['address_scopes'][0].get('address_scope_extension')
|
val = res['address_scopes'][0].get('address_scope_extension')
|
||||||
self.assertEqual('Test_AddressScope_Extension_extend', val)
|
self.assertEqual('Test_AddressScope_Extension_extend', val)
|
||||||
|
|
||||||
@ -199,7 +201,8 @@ class ExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
{'address_scope_extension':
|
{'address_scope_extension':
|
||||||
'Test_AddressScope_Extension_Update'}}
|
'Test_AddressScope_Extension_Update'}}
|
||||||
res = self._update('address-scopes',
|
res = self._update('address-scopes',
|
||||||
address_scope['address_scope']['id'], data)
|
address_scope['address_scope']['id'], data,
|
||||||
|
as_admin=True)
|
||||||
val = res['address_scope'].get('address_scope_extension')
|
val = res['address_scope'].get('address_scope_extension')
|
||||||
self.assertEqual('Test_AddressScope_Extension_Update_update', val)
|
self.assertEqual('Test_AddressScope_Extension_Update_update', val)
|
||||||
|
|
||||||
@ -235,12 +238,12 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
sp_id = subnetpool['subnetpool']['id']
|
sp_id = subnetpool['subnetpool']['id']
|
||||||
val = subnetpool['subnetpool']['subnetpool_extension']
|
val = subnetpool['subnetpool']['subnetpool_extension']
|
||||||
self.assertEqual("", val)
|
self.assertEqual("", val)
|
||||||
res = self._show('subnetpools', sp_id)
|
res = self._show('subnetpools', sp_id, as_admin=True)
|
||||||
val = res['subnetpool']['subnetpool_extension']
|
val = res['subnetpool']['subnetpool_extension']
|
||||||
self.assertEqual("", val)
|
self.assertEqual("", val)
|
||||||
|
|
||||||
# Test list.
|
# Test list.
|
||||||
res = self._list('subnetpools')
|
res = self._list('subnetpools', as_admin=True)
|
||||||
val = res['subnetpools'][0]['subnetpool_extension']
|
val = res['subnetpools'][0]['subnetpool_extension']
|
||||||
self.assertEqual("", val)
|
self.assertEqual("", val)
|
||||||
|
|
||||||
@ -250,22 +253,23 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
'name': 'sp2',
|
'name': 'sp2',
|
||||||
'tenant_id': 't1',
|
'tenant_id': 't1',
|
||||||
'subnetpool_extension': 'abc'}}
|
'subnetpool_extension': 'abc'}}
|
||||||
req = self.new_create_request('subnetpools', data, self.fmt)
|
req = self.new_create_request('subnetpools', data, self.fmt,
|
||||||
|
as_admin=True)
|
||||||
res = req.get_response(self.api)
|
res = req.get_response(self.api)
|
||||||
subnetpool = self.deserialize(self.fmt, res)
|
subnetpool = self.deserialize(self.fmt, res)
|
||||||
subnetpool_id = subnetpool['subnetpool']['id']
|
subnetpool_id = subnetpool['subnetpool']['id']
|
||||||
val = subnetpool['subnetpool']['subnetpool_extension']
|
val = subnetpool['subnetpool']['subnetpool_extension']
|
||||||
self.assertEqual("abc", val)
|
self.assertEqual("abc", val)
|
||||||
res = self._show('subnetpools', subnetpool_id)
|
res = self._show('subnetpools', subnetpool_id, as_admin=True)
|
||||||
val = res['subnetpool']['subnetpool_extension']
|
val = res['subnetpool']['subnetpool_extension']
|
||||||
self.assertEqual("abc", val)
|
self.assertEqual("abc", val)
|
||||||
|
|
||||||
# Test update.
|
# Test update.
|
||||||
data = {'subnetpool': {'subnetpool_extension': "def"}}
|
data = {'subnetpool': {'subnetpool_extension': "def"}}
|
||||||
res = self._update('subnetpools', subnetpool_id, data)
|
res = self._update('subnetpools', subnetpool_id, data, as_admin=True)
|
||||||
val = res['subnetpool']['subnetpool_extension']
|
val = res['subnetpool']['subnetpool_extension']
|
||||||
self.assertEqual("def", val)
|
self.assertEqual("def", val)
|
||||||
res = self._show('subnetpools', subnetpool_id)
|
res = self._show('subnetpools', subnetpool_id, as_admin=True)
|
||||||
val = res['subnetpool']['subnetpool_extension']
|
val = res['subnetpool']['subnetpool_extension']
|
||||||
self.assertEqual("def", val)
|
self.assertEqual("def", val)
|
||||||
|
|
||||||
@ -276,12 +280,12 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
as_id = address_scope['address_scope']['id']
|
as_id = address_scope['address_scope']['id']
|
||||||
val = address_scope['address_scope']['address_scope_extension']
|
val = address_scope['address_scope']['address_scope_extension']
|
||||||
self.assertEqual("", val)
|
self.assertEqual("", val)
|
||||||
res = self._show('address-scopes', as_id)
|
res = self._show('address-scopes', as_id, as_admin=True)
|
||||||
val = res['address_scope']['address_scope_extension']
|
val = res['address_scope']['address_scope_extension']
|
||||||
self.assertEqual("", val)
|
self.assertEqual("", val)
|
||||||
|
|
||||||
# Test list.
|
# Test list.
|
||||||
res = self._list('address-scopes')
|
res = self._list('address-scopes', as_admin=True)
|
||||||
val = res['address_scopes'][0]['address_scope_extension']
|
val = res['address_scopes'][0]['address_scope_extension']
|
||||||
self.assertEqual("", val)
|
self.assertEqual("", val)
|
||||||
|
|
||||||
@ -291,21 +295,23 @@ class DBExtensionDriverTestCase(test_plugin.Ml2PlusPluginV2TestCase):
|
|||||||
'name': 'as2',
|
'name': 'as2',
|
||||||
'tenant_id': 't1',
|
'tenant_id': 't1',
|
||||||
'address_scope_extension': 'abc'}}
|
'address_scope_extension': 'abc'}}
|
||||||
req = self.new_create_request('address-scopes', data, self.fmt)
|
req = self.new_create_request('address-scopes', data, self.fmt,
|
||||||
|
as_admin=True)
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
address_scope = self.deserialize(self.fmt, res)
|
address_scope = self.deserialize(self.fmt, res)
|
||||||
address_scope_id = address_scope['address_scope']['id']
|
address_scope_id = address_scope['address_scope']['id']
|
||||||
val = address_scope['address_scope']['address_scope_extension']
|
val = address_scope['address_scope']['address_scope_extension']
|
||||||
self.assertEqual("abc", val)
|
self.assertEqual("abc", val)
|
||||||
res = self._show('address-scopes', address_scope_id)
|
res = self._show('address-scopes', address_scope_id, as_admin=True)
|
||||||
val = res['address_scope']['address_scope_extension']
|
val = res['address_scope']['address_scope_extension']
|
||||||
self.assertEqual("abc", val)
|
self.assertEqual("abc", val)
|
||||||
|
|
||||||
# Test update.
|
# Test update.
|
||||||
data = {'address_scope': {'address_scope_extension': "def"}}
|
data = {'address_scope': {'address_scope_extension': "def"}}
|
||||||
res = self._update('address-scopes', address_scope_id, data)
|
res = self._update('address-scopes', address_scope_id, data,
|
||||||
|
as_admin=True)
|
||||||
val = res['address_scope']['address_scope_extension']
|
val = res['address_scope']['address_scope_extension']
|
||||||
self.assertEqual("def", val)
|
self.assertEqual("def", val)
|
||||||
res = self._show('address-scopes', address_scope_id)
|
res = self._show('address-scopes', address_scope_id, as_admin=True)
|
||||||
val = res['address_scope']['address_scope_extension']
|
val = res['address_scope']['address_scope_extension']
|
||||||
self.assertEqual("def", val)
|
self.assertEqual("def", val)
|
||||||
|
@ -149,7 +149,7 @@ class TestCiscoApicAimL3Plugin(test_aim_mapping_driver.AIMBaseTestCase):
|
|||||||
# there will be four calls in total to the event handler
|
# there will be four calls in total to the event handler
|
||||||
self._verify_event_handler_calls(floatingip,
|
self._verify_event_handler_calls(floatingip,
|
||||||
expected_call_count=2)
|
expected_call_count=2)
|
||||||
self._delete('floatingips', floatingip['id'])
|
self._delete('floatingips', floatingip['id'], as_admin=True)
|
||||||
# Expecting 2 more calls - 1 for the port, 1 for the floatingip
|
# Expecting 2 more calls - 1 for the port, 1 for the floatingip
|
||||||
self._verify_event_handler_calls(
|
self._verify_event_handler_calls(
|
||||||
[internal_port, floatingip], expected_call_count=4)
|
[internal_port, floatingip], expected_call_count=4)
|
||||||
|
@ -121,7 +121,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
|
|||||||
'tenant_id': 't2'}},
|
'tenant_id': 't2'}},
|
||||||
{'network': {'name': 'n3',
|
{'network': {'name': 'n3',
|
||||||
'tenant_id': 't1'}}]
|
'tenant_id': 't1'}}]
|
||||||
res = self._create_bulk_from_list(self.fmt, 'network', networks)
|
res = self._create_bulk_from_list(self.fmt, 'network', networks,
|
||||||
|
as_admin=True)
|
||||||
self.assertEqual(201, res.status_int)
|
self.assertEqual(201, res.status_int)
|
||||||
et.assert_has_calls([mock.call(mock.ANY, 't1'),
|
et.assert_has_calls([mock.call(mock.ANY, 't1'),
|
||||||
mock.call(mock.ANY, 't2')],
|
mock.call(mock.ANY, 't2')],
|
||||||
@ -134,7 +135,7 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
|
|||||||
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
||||||
'ensure_tenant') as et:
|
'ensure_tenant') as et:
|
||||||
self._make_subnet(self.fmt, net, None, '10.0.0.0/24',
|
self._make_subnet(self.fmt, net, None, '10.0.0.0/24',
|
||||||
tenant_id='t1')
|
tenant_id='t1', as_admin=True)
|
||||||
et.assert_called_once_with(mock.ANY, 't1')
|
et.assert_called_once_with(mock.ANY, 't1')
|
||||||
|
|
||||||
def test_subnet_bulk(self):
|
def test_subnet_bulk(self):
|
||||||
@ -158,7 +159,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
|
|||||||
'ip_version': 4,
|
'ip_version': 4,
|
||||||
'cidr': '10.0.3.0/24',
|
'cidr': '10.0.3.0/24',
|
||||||
'tenant_id': 't1'}}]
|
'tenant_id': 't1'}}]
|
||||||
res = self._create_bulk_from_list(self.fmt, 'subnet', subnets)
|
res = self._create_bulk_from_list(self.fmt, 'subnet', subnets,
|
||||||
|
as_admin=True)
|
||||||
self.assertEqual(201, res.status_int)
|
self.assertEqual(201, res.status_int)
|
||||||
et.assert_has_calls([mock.call(mock.ANY, 't1'),
|
et.assert_has_calls([mock.call(mock.ANY, 't1'),
|
||||||
mock.call(mock.ANY, 't2')],
|
mock.call(mock.ANY, 't2')],
|
||||||
@ -170,7 +172,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
|
|||||||
|
|
||||||
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
||||||
'ensure_tenant') as et:
|
'ensure_tenant') as et:
|
||||||
self._make_port(self.fmt, net['network']['id'], tenant_id='t1')
|
self._make_port(self.fmt, net['network']['id'], tenant_id='t1',
|
||||||
|
as_admin=True)
|
||||||
et.assert_has_calls([mock.call(mock.ANY, 't1')])
|
et.assert_has_calls([mock.call(mock.ANY, 't1')])
|
||||||
self.assertEqual(2, et.call_count)
|
self.assertEqual(2, et.call_count)
|
||||||
|
|
||||||
@ -189,7 +192,8 @@ class TestEnsureTenant(Ml2PlusPluginV2TestCase):
|
|||||||
{'port': {'name': 'n3',
|
{'port': {'name': 'n3',
|
||||||
'network_id': network_id,
|
'network_id': network_id,
|
||||||
'tenant_id': 't1'}}]
|
'tenant_id': 't1'}}]
|
||||||
res = self._create_bulk_from_list(self.fmt, 'port', ports)
|
res = self._create_bulk_from_list(self.fmt, 'port', ports,
|
||||||
|
as_admin=True)
|
||||||
self.assertEqual(201, res.status_int)
|
self.assertEqual(201, res.status_int)
|
||||||
et.assert_has_calls([mock.call(mock.ANY, 't1'),
|
et.assert_has_calls([mock.call(mock.ANY, 't1'),
|
||||||
mock.call(mock.ANY, 't2')],
|
mock.call(mock.ANY, 't2')],
|
||||||
@ -238,7 +242,7 @@ class TestSubnetPool(Ml2PlusPluginV2TestCase):
|
|||||||
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
||||||
'update_subnetpool_postcommit') as post:
|
'update_subnetpool_postcommit') as post:
|
||||||
res = self._update('subnetpools', subnetpool['id'],
|
res = self._update('subnetpools', subnetpool['id'],
|
||||||
data)['subnetpool']
|
data, as_admin=True)['subnetpool']
|
||||||
self.assertEqual('newnameforsubnetpool', res['name'])
|
self.assertEqual('newnameforsubnetpool', res['name'])
|
||||||
|
|
||||||
self.assertEqual(1, pre.call_count)
|
self.assertEqual(1, pre.call_count)
|
||||||
@ -262,7 +266,7 @@ class TestSubnetPool(Ml2PlusPluginV2TestCase):
|
|||||||
self.plugin.get_subnetpool)
|
self.plugin.get_subnetpool)
|
||||||
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
||||||
'delete_subnetpool_postcommit') as post:
|
'delete_subnetpool_postcommit') as post:
|
||||||
self._delete('subnetpools', subnetpool['id'])
|
self._delete('subnetpools', subnetpool['id'], as_admin=True)
|
||||||
|
|
||||||
self.assertEqual(1, pre.call_count)
|
self.assertEqual(1, pre.call_count)
|
||||||
self.assertEqual('sp1',
|
self.assertEqual('sp1',
|
||||||
@ -303,7 +307,7 @@ class TestAddressScope(Ml2PlusPluginV2TestCase):
|
|||||||
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
||||||
'update_address_scope_postcommit') as post:
|
'update_address_scope_postcommit') as post:
|
||||||
res = self._update('address-scopes', address_scope['id'],
|
res = self._update('address-scopes', address_scope['id'],
|
||||||
data)['address_scope']
|
data, as_admin=True)['address_scope']
|
||||||
self.assertEqual('newnameforaddress_scope', res['name'])
|
self.assertEqual('newnameforaddress_scope', res['name'])
|
||||||
|
|
||||||
self.assertEqual(1, pre.call_count)
|
self.assertEqual(1, pre.call_count)
|
||||||
@ -326,7 +330,8 @@ class TestAddressScope(Ml2PlusPluginV2TestCase):
|
|||||||
pre.side_effect = self.exist_checker(self.plugin.get_address_scope)
|
pre.side_effect = self.exist_checker(self.plugin.get_address_scope)
|
||||||
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
with mock.patch.object(mech_logger.LoggerPlusMechanismDriver,
|
||||||
'delete_address_scope_postcommit') as post:
|
'delete_address_scope_postcommit') as post:
|
||||||
self._delete('address-scopes', address_scope['id'])
|
self._delete('address-scopes', address_scope['id'],
|
||||||
|
as_admin=True)
|
||||||
|
|
||||||
self.assertEqual(1, pre.call_count)
|
self.assertEqual(1, pre.call_count)
|
||||||
self.assertEqual('as1',
|
self.assertEqual('as1',
|
||||||
|
@ -225,7 +225,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
|||||||
extn_attr = ('router:external', DN,
|
extn_attr = ('router:external', DN,
|
||||||
'apic:nat_type', 'apic:snat_host_pool')
|
'apic:nat_type', 'apic:snat_host_pool')
|
||||||
|
|
||||||
net = self._make_network(self.fmt, name, True,
|
net = self._make_network(self.fmt, name, True, as_admin=True,
|
||||||
arg_list=extn_attr,
|
arg_list=extn_attr,
|
||||||
**kwargs)['network']
|
**kwargs)['network']
|
||||||
subnet = self._make_subnet(
|
subnet = self._make_subnet(
|
||||||
@ -259,11 +259,9 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
|||||||
attrs.update(kwargs)
|
attrs.update(kwargs)
|
||||||
|
|
||||||
req = self.new_create_request('address-scopes',
|
req = self.new_create_request('address-scopes',
|
||||||
{'address_scope': attrs}, self.fmt)
|
{'address_scope': attrs}, self.fmt,
|
||||||
if not admin:
|
tenant_id=kwargs.get('tenant_id',
|
||||||
neutron_context = nctx.Context('', kwargs.get('tenant_id',
|
self._tenant_id), as_admin=admin)
|
||||||
self._tenant_id))
|
|
||||||
req.environ['neutron.context'] = neutron_context
|
|
||||||
|
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
if expected_status:
|
if expected_status:
|
||||||
@ -323,7 +321,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
|||||||
req.get_response(self.api))['subnet']
|
req.get_response(self.api))['subnet']
|
||||||
|
|
||||||
def _show_port(self, id):
|
def _show_port(self, id):
|
||||||
req = self.new_show_request('ports', id, fmt=self.fmt)
|
req = self.new_show_request('ports', id, fmt=self.fmt, as_admin=True)
|
||||||
return self.deserialize(self.fmt, req.get_response(self.api))['port']
|
return self.deserialize(self.fmt, req.get_response(self.api))['port']
|
||||||
|
|
||||||
def _show_network(self, id):
|
def _show_network(self, id):
|
||||||
@ -332,7 +330,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
|||||||
req.get_response(self.api))['network']
|
req.get_response(self.api))['network']
|
||||||
|
|
||||||
def _show_subnetpool(self, id):
|
def _show_subnetpool(self, id):
|
||||||
req = self.new_show_request('subnetpools', id, fmt=self.fmt)
|
req = self.new_show_request('subnetpools', id, as_admin=True)
|
||||||
return self.deserialize(self.fmt,
|
return self.deserialize(self.fmt,
|
||||||
req.get_response(self.api))['subnetpool']
|
req.get_response(self.api))['subnetpool']
|
||||||
|
|
||||||
@ -593,7 +591,8 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
|||||||
for version in subnetpools_versions:
|
for version in subnetpools_versions:
|
||||||
sp_id = l3p[version][0]
|
sp_id = l3p[version][0]
|
||||||
subpool = self._show_subnetpool(sp_id)
|
subpool = self._show_subnetpool(sp_id)
|
||||||
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt)
|
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt,
|
||||||
|
as_admin=True)
|
||||||
res = self.deserialize(self.fmt, req.get_response(self.api))
|
res = self.deserialize(self.fmt, req.get_response(self.api))
|
||||||
subpool = res['subnetpool']
|
subpool = res['subnetpool']
|
||||||
self.assertIn(subpool['prefixes'][0], l3p['ip_pool'])
|
self.assertIn(subpool['prefixes'][0], l3p['ip_pool'])
|
||||||
@ -629,7 +628,8 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
|||||||
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
|
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
|
||||||
for version in subnetpools_versions:
|
for version in subnetpools_versions:
|
||||||
sp_id = l3p[version][0]
|
sp_id = l3p[version][0]
|
||||||
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt)
|
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt,
|
||||||
|
as_admin=True)
|
||||||
res = req.get_response(self.api)
|
res = req.get_response(self.api)
|
||||||
if explicit_subnetpool or (
|
if explicit_subnetpool or (
|
||||||
version == 'subnetpools_v4' and v4_default) or (
|
version == 'subnetpools_v4' and v4_default) or (
|
||||||
@ -681,7 +681,7 @@ class AIMBaseTestCase(test_nr_base.CommonNeutronBaseTestCase,
|
|||||||
if tenant_id:
|
if tenant_id:
|
||||||
kwargs['tenant_id'] = tenant_id
|
kwargs['tenant_id'] = tenant_id
|
||||||
|
|
||||||
net = self._make_network(self.fmt, network_name, True,
|
net = self._make_network(self.fmt, network_name, True, as_admin=True,
|
||||||
arg_list=self.extension_attributes,
|
arg_list=self.extension_attributes,
|
||||||
**kwargs)['network']
|
**kwargs)['network']
|
||||||
gw = str(netaddr.IPAddress(netaddr.IPNetwork(cidr).first + 1))
|
gw = str(netaddr.IPAddress(netaddr.IPNetwork(cidr).first + 1))
|
||||||
@ -3353,7 +3353,7 @@ class TestPolicyTarget(AIMBaseTestCase,
|
|||||||
kwargs[DN] = {EXTERNAL_NETWORK: dn}
|
kwargs[DN] = {EXTERNAL_NETWORK: dn}
|
||||||
extn_attr = ('router:external', DN)
|
extn_attr = ('router:external', DN)
|
||||||
|
|
||||||
net = self._make_network(self.fmt, name, True,
|
net = self._make_network(self.fmt, name, True, as_admin=True,
|
||||||
arg_list=extn_attr,
|
arg_list=extn_attr,
|
||||||
**kwargs)['network']
|
**kwargs)['network']
|
||||||
subnet = self._make_subnet(
|
subnet = self._make_subnet(
|
||||||
@ -5688,29 +5688,34 @@ class TestNeutronPortOperation(AIMBaseTestCase):
|
|||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': t2sub1['id']},
|
fixed_ips=[{'subnet_id': t2sub1['id']},
|
||||||
{'subnet_id': t2sub2['id']}],
|
{'subnet_id': t2sub2['id']}],
|
||||||
allowed_address_pairs=allow_addr_active_aap)
|
allowed_address_pairs=allow_addr_active_aap,
|
||||||
|
as_admin=True)
|
||||||
|
|
||||||
# create 2 ports configured with the same allowed-addresses
|
# create 2 ports configured with the same allowed-addresses
|
||||||
p1 = self._make_port(self.fmt, net['network']['id'],
|
p1 = self._make_port(self.fmt, net['network']['id'],
|
||||||
arg_list=('allowed_address_pairs',),
|
arg_list=('allowed_address_pairs',),
|
||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': sub1['id']}],
|
fixed_ips=[{'subnet_id': sub1['id']}],
|
||||||
allowed_address_pairs=allow_addr)['port']
|
allowed_address_pairs=allow_addr,
|
||||||
|
as_admin=True)['port']
|
||||||
t2p1 = self._make_port(self.fmt, t2net['network']['id'],
|
t2p1 = self._make_port(self.fmt, t2net['network']['id'],
|
||||||
arg_list=('allowed_address_pairs',),
|
arg_list=('allowed_address_pairs',),
|
||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': t2sub1['id']}],
|
fixed_ips=[{'subnet_id': t2sub1['id']}],
|
||||||
allowed_address_pairs=allow_addr)['port']
|
allowed_address_pairs=allow_addr,
|
||||||
|
as_admin=True)['port']
|
||||||
p2 = self._make_port(self.fmt, net['network']['id'],
|
p2 = self._make_port(self.fmt, net['network']['id'],
|
||||||
arg_list=('allowed_address_pairs',),
|
arg_list=('allowed_address_pairs',),
|
||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': sub1['id']}],
|
fixed_ips=[{'subnet_id': sub1['id']}],
|
||||||
allowed_address_pairs=allow_addr)['port']
|
allowed_address_pairs=allow_addr,
|
||||||
|
as_admin=True)['port']
|
||||||
t2p2 = self._make_port(self.fmt, t2net['network']['id'],
|
t2p2 = self._make_port(self.fmt, t2net['network']['id'],
|
||||||
arg_list=('allowed_address_pairs',),
|
arg_list=('allowed_address_pairs',),
|
||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': t2sub1['id']}],
|
fixed_ips=[{'subnet_id': t2sub1['id']}],
|
||||||
allowed_address_pairs=allow_addr)['port']
|
allowed_address_pairs=allow_addr,
|
||||||
|
as_admin=True)['port']
|
||||||
self._bind_port_to_host(p1['id'], 'h1')
|
self._bind_port_to_host(p1['id'], 'h1')
|
||||||
self._bind_port_to_host(t2p1['id'], 'h1')
|
self._bind_port_to_host(t2p1['id'], 'h1')
|
||||||
self._bind_port_to_host(p_active_aap['id'], 'h1')
|
self._bind_port_to_host(p_active_aap['id'], 'h1')
|
||||||
@ -5721,8 +5726,8 @@ class TestNeutronPortOperation(AIMBaseTestCase):
|
|||||||
# belong to a different active_acitve_aap mode.
|
# belong to a different active_acitve_aap mode.
|
||||||
self._update('ports', p_active_aap['id'],
|
self._update('ports', p_active_aap['id'],
|
||||||
{'port': {'allowed_address_pairs': allow_addr}},
|
{'port': {'allowed_address_pairs': allow_addr}},
|
||||||
neutron_context=self._neutron_admin_context,
|
expected_code=webob.exc.HTTPBadRequest.code,
|
||||||
expected_code=webob.exc.HTTPBadRequest.code)
|
as_admin=True)
|
||||||
|
|
||||||
# Call agent => plugin RPC to get the details for each port. The
|
# Call agent => plugin RPC to get the details for each port. The
|
||||||
# results should only have the configured AAPs, with none of them
|
# results should only have the configured AAPs, with none of them
|
||||||
@ -5824,19 +5829,23 @@ class TestNeutronPortOperation(AIMBaseTestCase):
|
|||||||
p3 = self._make_port(self.fmt, net['network']['id'],
|
p3 = self._make_port(self.fmt, net['network']['id'],
|
||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': sub2['id'],
|
fixed_ips=[{'subnet_id': sub2['id'],
|
||||||
'ip_address': '1.2.3.250'}])['port']
|
'ip_address': '1.2.3.250'}],
|
||||||
|
as_admin=True)['port']
|
||||||
t2p3 = self._make_port(self.fmt, t2net['network']['id'],
|
t2p3 = self._make_port(self.fmt, t2net['network']['id'],
|
||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': t2sub2['id'],
|
fixed_ips=[{'subnet_id': t2sub2['id'],
|
||||||
'ip_address': '1.2.3.250'}])['port']
|
'ip_address': '1.2.3.250'}],
|
||||||
|
as_admin=True)['port']
|
||||||
p4 = self._make_port(self.fmt, net['network']['id'],
|
p4 = self._make_port(self.fmt, net['network']['id'],
|
||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': sub2['id'],
|
fixed_ips=[{'subnet_id': sub2['id'],
|
||||||
'ip_address': '1.2.3.251'}])['port']
|
'ip_address': '1.2.3.251'}],
|
||||||
|
as_admin=True)['port']
|
||||||
t2p4 = self._make_port(self.fmt, t2net['network']['id'],
|
t2p4 = self._make_port(self.fmt, t2net['network']['id'],
|
||||||
device_owner='compute:',
|
device_owner='compute:',
|
||||||
fixed_ips=[{'subnet_id': t2sub2['id'],
|
fixed_ips=[{'subnet_id': t2sub2['id'],
|
||||||
'ip_address': '1.2.3.251'}])['port']
|
'ip_address': '1.2.3.251'}],
|
||||||
|
as_admin=True)['port']
|
||||||
self.l3_plugin.add_router_interface(
|
self.l3_plugin.add_router_interface(
|
||||||
self._neutron_admin_context, rtr['id'], {'subnet_id': sub1['id']})
|
self._neutron_admin_context, rtr['id'], {'subnet_id': sub1['id']})
|
||||||
self.l3_plugin.add_router_interface(
|
self.l3_plugin.add_router_interface(
|
||||||
@ -5848,9 +5857,11 @@ class TestNeutronPortOperation(AIMBaseTestCase):
|
|||||||
self._neutron_admin_context, t2rtr['id'],
|
self._neutron_admin_context, t2rtr['id'],
|
||||||
{'subnet_id': t2sub2['id']})
|
{'subnet_id': t2sub2['id']})
|
||||||
fip1 = self._make_floatingip(self.fmt, t2net_ext['id'],
|
fip1 = self._make_floatingip(self.fmt, t2net_ext['id'],
|
||||||
port_id=t2p3['id'])['floatingip']
|
port_id=t2p3['id'],
|
||||||
|
as_admin=True)['floatingip']
|
||||||
fip2 = self._make_floatingip(self.fmt, t2net_ext['id'],
|
fip2 = self._make_floatingip(self.fmt, t2net_ext['id'],
|
||||||
port_id=t2p4['id'])['floatingip']
|
port_id=t2p4['id'],
|
||||||
|
as_admin=True)['floatingip']
|
||||||
details = self.mech_driver.get_gbp_details(
|
details = self.mech_driver.get_gbp_details(
|
||||||
self._neutron_admin_context, device='tap%s' % t2p1['id'],
|
self._neutron_admin_context, device='tap%s' % t2p1['id'],
|
||||||
host='h1')
|
host='h1')
|
||||||
@ -5904,7 +5915,7 @@ class TestNeutronPortOperation(AIMBaseTestCase):
|
|||||||
# from the old pair are removed from the mapping table
|
# from the old pair are removed from the mapping table
|
||||||
p1 = self._update('ports', p1['id'],
|
p1 = self._update('ports', p1['id'],
|
||||||
{'port': {'allowed_address_pairs': update_addr}},
|
{'port': {'allowed_address_pairs': update_addr}},
|
||||||
neutron_context=self._neutron_admin_context)['port']
|
as_admin=True)['port']
|
||||||
ips = self.mech_driver.get_ha_ipaddresses_for_port(p1['id'])
|
ips = self.mech_driver.get_ha_ipaddresses_for_port(p1['id'])
|
||||||
self.assertEqual(ips, [])
|
self.assertEqual(ips, [])
|
||||||
# Request ownership of the new AAP
|
# Request ownership of the new AAP
|
||||||
@ -5922,7 +5933,7 @@ class TestNeutronPortOperation(AIMBaseTestCase):
|
|||||||
|
|
||||||
p2 = self._update('ports', p2['id'],
|
p2 = self._update('ports', p2['id'],
|
||||||
{'port': {'allowed_address_pairs': update_addr}},
|
{'port': {'allowed_address_pairs': update_addr}},
|
||||||
neutron_context=self._neutron_admin_context)['port']
|
as_admin=True)['port']
|
||||||
ips = self.mech_driver.get_ha_ipaddresses_for_port(p2['id'])
|
ips = self.mech_driver.get_ha_ipaddresses_for_port(p2['id'])
|
||||||
self.assertEqual(ips, [])
|
self.assertEqual(ips, [])
|
||||||
# Request ownership of the new AAP
|
# Request ownership of the new AAP
|
||||||
|
@ -172,7 +172,7 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
|
|
||||||
def _test_routed_subnet(self, subnet_id, gw_ip):
|
def _test_routed_subnet(self, subnet_id, gw_ip):
|
||||||
# Get the AIM Subnet.
|
# Get the AIM Subnet.
|
||||||
subnet = self._show('subnets', subnet_id)['subnet']
|
subnet = self._show('subnets', subnet_id, as_admin=True)['subnet']
|
||||||
sn_dn = subnet['apic:distinguished_names'][gw_ip]
|
sn_dn = subnet['apic:distinguished_names'][gw_ip]
|
||||||
sn = aim_resource.Subnet.from_dn(sn_dn)
|
sn = aim_resource.Subnet.from_dn(sn_dn)
|
||||||
|
|
||||||
@ -181,7 +181,7 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
|
|
||||||
def _test_unscoped_vrf(self, net_id):
|
def _test_unscoped_vrf(self, net_id):
|
||||||
# Get the network's AIM VRF.
|
# Get the network's AIM VRF.
|
||||||
net = self._show('networks', net_id)['network']
|
net = self._show('networks', net_id, as_admin=True)['network']
|
||||||
vrf_dn = net['apic:distinguished_names']['VRF']
|
vrf_dn = net['apic:distinguished_names']['VRF']
|
||||||
vrf = aim_resource.VRF.from_dn(vrf_dn)
|
vrf = aim_resource.VRF.from_dn(vrf_dn)
|
||||||
|
|
||||||
@ -283,7 +283,7 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
# Test subnet.
|
# Test subnet.
|
||||||
subnet = self._make_subnet(
|
subnet = self._make_subnet(
|
||||||
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
|
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
|
||||||
tenant_id='subnet_proj')['subnet']
|
as_admin=True, tenant_id='subnet_proj')['subnet']
|
||||||
self._test_project_resources(subnet['project_id'])
|
self._test_project_resources(subnet['project_id'])
|
||||||
|
|
||||||
# Test port. Since Neutron creates the default SG for the
|
# Test port. Since Neutron creates the default SG for the
|
||||||
@ -292,12 +292,12 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
# resource owned by port_prog.
|
# resource owned by port_prog.
|
||||||
port = self._make_port(
|
port = self._make_port(
|
||||||
self.fmt, net['id'], security_groups=[],
|
self.fmt, net['id'], security_groups=[],
|
||||||
tenant_id='port_proj')['port']
|
as_admin=True, tenant_id='port_proj')['port']
|
||||||
sgs = self._list(
|
sgs = self._list(
|
||||||
'security-groups',
|
'security-groups',
|
||||||
query_params='project_id=port_proj')['security_groups']
|
query_params='project_id=port_proj',
|
||||||
|
as_admin=True)['security_groups']
|
||||||
self.assertEqual(1, len(sgs))
|
self.assertEqual(1, len(sgs))
|
||||||
self._delete('security-groups', sgs[0]['id'])
|
|
||||||
self._test_project_resources(port['project_id'])
|
self._test_project_resources(port['project_id'])
|
||||||
|
|
||||||
# Test security group.
|
# Test security group.
|
||||||
@ -319,8 +319,8 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
# Test floatingip.
|
# Test floatingip.
|
||||||
kwargs = {'router:external': True}
|
kwargs = {'router:external': True}
|
||||||
ext_net_resp = self._make_network(
|
ext_net_resp = self._make_network(
|
||||||
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
|
self.fmt, 'ext_net', True, as_admin=True,
|
||||||
**kwargs)
|
arg_list=self.extension_attributes, **kwargs)
|
||||||
ext_net = ext_net_resp['network']
|
ext_net = ext_net_resp['network']
|
||||||
self._make_subnet(
|
self._make_subnet(
|
||||||
self.fmt, ext_net_resp, '100.100.100.1', '100.100.100.0/24')
|
self.fmt, ext_net_resp, '100.100.100.1', '100.100.100.0/24')
|
||||||
@ -542,8 +542,8 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
'apic:distinguished_names':
|
'apic:distinguished_names':
|
||||||
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
||||||
net_resp = self._make_network(
|
net_resp = self._make_network(
|
||||||
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
|
self.fmt, 'ext_net', True, as_admin=True,
|
||||||
**kwargs)
|
arg_list=self.extension_attributes, **kwargs)
|
||||||
net = net_resp['network']
|
net = net_resp['network']
|
||||||
self._validate()
|
self._validate()
|
||||||
|
|
||||||
@ -774,7 +774,8 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
'apic:distinguished_names':
|
'apic:distinguished_names':
|
||||||
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
||||||
ext_net = self._make_network(
|
ext_net = self._make_network(
|
||||||
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
|
self.fmt, 'ext_net', True, as_admin=True,
|
||||||
|
arg_list=self.extension_attributes,
|
||||||
**kwargs)['network']
|
**kwargs)['network']
|
||||||
|
|
||||||
# Create extra external network to test CloneL3Out record below.
|
# Create extra external network to test CloneL3Out record below.
|
||||||
@ -782,7 +783,7 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
'apic:distinguished_names':
|
'apic:distinguished_names':
|
||||||
{'ExternalNetwork': 'uni/tn-common/out-l2/instP-n2'}}
|
{'ExternalNetwork': 'uni/tn-common/out-l2/instP-n2'}}
|
||||||
self._make_network(
|
self._make_network(
|
||||||
self.fmt, 'extra_ext_net', True,
|
self.fmt, 'extra_ext_net', True, as_admin=True,
|
||||||
arg_list=self.extension_attributes, **kwargs)
|
arg_list=self.extension_attributes, **kwargs)
|
||||||
|
|
||||||
# Create router as tenant_2.
|
# Create router as tenant_2.
|
||||||
@ -860,7 +861,8 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
def test_unscoped_routing(self):
|
def test_unscoped_routing(self):
|
||||||
# Create shared network and unscoped subnet as tenant_1.
|
# Create shared network and unscoped subnet as tenant_1.
|
||||||
net_resp = self._make_network(
|
net_resp = self._make_network(
|
||||||
self.fmt, 'net1', True, tenant_id='tenant_1', shared=True)
|
self.fmt, 'net1', True, tenant_id='tenant_1',
|
||||||
|
as_admin=True, shared=True)
|
||||||
net1_id = net_resp['network']['id']
|
net1_id = net_resp['network']['id']
|
||||||
subnet = self._make_subnet(
|
subnet = self._make_subnet(
|
||||||
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
|
self.fmt, net_resp, '10.0.1.1', '10.0.1.0/24',
|
||||||
@ -886,8 +888,8 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
'apic:distinguished_names':
|
'apic:distinguished_names':
|
||||||
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
||||||
ext_net = self._make_network(
|
ext_net = self._make_network(
|
||||||
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
|
self.fmt, 'ext_net', True, as_admin=True,
|
||||||
**kwargs)['network']
|
arg_list=self.extension_attributes, **kwargs)['network']
|
||||||
|
|
||||||
# Create router as tenant_2.
|
# Create router as tenant_2.
|
||||||
kwargs = {'apic:external_provided_contracts': ['p1', 'p2'],
|
kwargs = {'apic:external_provided_contracts': ['p1', 'p2'],
|
||||||
@ -1181,7 +1183,6 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
# delete BridgeDomain.
|
# delete BridgeDomain.
|
||||||
bd = aim_resource.BridgeDomain.from_dn(bd_dn)
|
bd = aim_resource.BridgeDomain.from_dn(bd_dn)
|
||||||
self.aim_mgr.delete(self.aim_ctx, bd)
|
self.aim_mgr.delete(self.aim_ctx, bd)
|
||||||
|
|
||||||
# delete EndpointGroup.
|
# delete EndpointGroup.
|
||||||
epg = aim_resource.EndpointGroup.from_dn(epg_dn)
|
epg = aim_resource.EndpointGroup.from_dn(epg_dn)
|
||||||
self.aim_mgr.delete(self.aim_ctx, epg)
|
self.aim_mgr.delete(self.aim_ctx, epg)
|
||||||
@ -1242,7 +1243,7 @@ class TestNeutronMapping(AimValidationTestCase):
|
|||||||
sg['id'], 'ingress', 'tcp', '22', '23')
|
sg['id'], 'ingress', 'tcp', '22', '23')
|
||||||
rules = {'security_group_rules': [rule1['security_group_rule']]}
|
rules = {'security_group_rules': [rule1['security_group_rule']]}
|
||||||
sg_rule = self._make_security_group_rule(
|
sg_rule = self._make_security_group_rule(
|
||||||
self.fmt, rules)['security_group_rules'][0]
|
self.fmt, rules, as_admin=True)['security_group_rules'][0]
|
||||||
|
|
||||||
# Test the AIM SecurityGroup.
|
# Test the AIM SecurityGroup.
|
||||||
tenant_name = self.driver.aim_mech_driver.name_mapper.project(
|
tenant_name = self.driver.aim_mech_driver.name_mapper.project(
|
||||||
@ -1384,8 +1385,8 @@ class TestGbpMapping(AimValidationTestCase):
|
|||||||
'apic:distinguished_names':
|
'apic:distinguished_names':
|
||||||
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
{'ExternalNetwork': 'uni/tn-common/out-l1/instP-n1'}}
|
||||||
net_resp = self._make_network(
|
net_resp = self._make_network(
|
||||||
self.fmt, 'ext_net', True, arg_list=self.extension_attributes,
|
self.fmt, 'ext_net', True, as_admin=True,
|
||||||
**kwargs)
|
arg_list=self.extension_attributes, **kwargs)
|
||||||
subnet = self._make_subnet(
|
subnet = self._make_subnet(
|
||||||
self.fmt, net_resp, '10.0.0.1', '10.0.0.0/24')['subnet']
|
self.fmt, net_resp, '10.0.0.1', '10.0.0.0/24')['subnet']
|
||||||
|
|
||||||
|
@ -1324,7 +1324,8 @@ class TestPolicyTargetGroup(ResourceMappingTestCase):
|
|||||||
|
|
||||||
data = {'policy_target_group': {'l2_policy_id': l2p_id,
|
data = {'policy_target_group': {'l2_policy_id': l2p_id,
|
||||||
'tenant_id': 'admin'}}
|
'tenant_id': 'admin'}}
|
||||||
req = self.new_create_request('policy_target_groups', data)
|
req = self.new_create_request('policy_target_groups',
|
||||||
|
data, as_admin=True)
|
||||||
data = self.deserialize(self.fmt, req.get_response(self.ext_api))
|
data = self.deserialize(self.fmt, req.get_response(self.ext_api))
|
||||||
self.assertEqual('CrossTenantPolicyTargetGroupL2PolicyNotSupported',
|
self.assertEqual('CrossTenantPolicyTargetGroupL2PolicyNotSupported',
|
||||||
data['NeutronError']['type'])
|
data['NeutronError']['type'])
|
||||||
@ -1452,7 +1453,7 @@ class TestL2Policy(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def _test_explicit_network_lifecycle(self, shared=False):
|
def _test_explicit_network_lifecycle(self, shared=False):
|
||||||
# Create L2 policy with explicit network.
|
# Create L2 policy with explicit network.
|
||||||
with self.network(shared=shared) as network:
|
with self.network(shared=shared, as_admin=True) as network:
|
||||||
network_id = network['network']['id']
|
network_id = network['network']['id']
|
||||||
l2p = self.create_l2_policy(name="l2p1", network_id=network_id,
|
l2p = self.create_l2_policy(name="l2p1", network_id=network_id,
|
||||||
shared=shared)
|
shared=shared)
|
||||||
@ -1583,10 +1584,11 @@ class TestL3Policy(ResourceMappingTestCase,
|
|||||||
self.assertEqual(router_id, routers[0])
|
self.assertEqual(router_id, routers[0])
|
||||||
|
|
||||||
# Verify deleting L3 policy does not cleanup router.
|
# Verify deleting L3 policy does not cleanup router.
|
||||||
req = self.new_delete_request('l3_policies', l3p_id)
|
req = self.new_delete_request('l3_policies', l3p_id, as_admin=True)
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
|
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
|
||||||
req = self.new_show_request('routers', router_id, fmt=self.fmt)
|
req = self.new_show_request('routers', router_id, fmt=self.fmt,
|
||||||
|
as_admin=True)
|
||||||
res = req.get_response(self.ext_api)
|
res = req.get_response(self.ext_api)
|
||||||
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
|
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
|
||||||
|
|
||||||
@ -1660,8 +1662,8 @@ class TestL3Policy(ResourceMappingTestCase,
|
|||||||
|
|
||||||
def test_create_l3p_es(self):
|
def test_create_l3p_es(self):
|
||||||
# Simple test to verify l3p created with 1-N ES
|
# Simple test to verify l3p created with 1-N ES
|
||||||
with self.network(router__external=True) as net1:
|
with self.network(router__external=True, as_admin=True) as net1:
|
||||||
with self.network(router__external=True) as net2:
|
with self.network(router__external=True, as_admin=True) as net2:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
|
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
|
||||||
with self.subnet(cidr='10.10.2.0/24',
|
with self.subnet(cidr='10.10.2.0/24',
|
||||||
network=net2) as sub2:
|
network=net2) as sub2:
|
||||||
@ -1688,8 +1690,8 @@ class TestL3Policy(ResourceMappingTestCase,
|
|||||||
|
|
||||||
def test_update_l3p_es(self):
|
def test_update_l3p_es(self):
|
||||||
# Simple test to verify l3p updated with 1-N ES
|
# Simple test to verify l3p updated with 1-N ES
|
||||||
with self.network(router__external=True) as net1:
|
with self.network(router__external=True, as_admin=True) as net1:
|
||||||
with self.network(router__external=True) as net2:
|
with self.network(router__external=True, as_admin=True) as net2:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
|
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
|
||||||
with self.subnet(cidr='10.10.2.0/24',
|
with self.subnet(cidr='10.10.2.0/24',
|
||||||
network=net2) as sub2:
|
network=net2) as sub2:
|
||||||
@ -1718,8 +1720,8 @@ class TestL3Policy(ResourceMappingTestCase,
|
|||||||
res['NeutronError']['type'])
|
res['NeutronError']['type'])
|
||||||
|
|
||||||
def test_es_router_plumbing(self):
|
def test_es_router_plumbing(self):
|
||||||
with self.network(router__external=True) as net1:
|
with self.network(router__external=True, as_admin=True) as net1:
|
||||||
with self.network(router__external=True) as net2:
|
with self.network(router__external=True, as_admin=True) as net2:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
|
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
|
||||||
with self.subnet(cidr='10.10.2.0/24',
|
with self.subnet(cidr='10.10.2.0/24',
|
||||||
network=net2) as sub2:
|
network=net2) as sub2:
|
||||||
@ -1775,8 +1777,8 @@ class TestL3Policy(ResourceMappingTestCase,
|
|||||||
{'destination': '172.0.0.0/16', 'nexthop': '10.10.1.1'}]
|
{'destination': '172.0.0.0/16', 'nexthop': '10.10.1.1'}]
|
||||||
routes2 = [{'destination': '0.0.0.0/0', 'nexthop': '10.10.2.1'},
|
routes2 = [{'destination': '0.0.0.0/0', 'nexthop': '10.10.2.1'},
|
||||||
{'destination': '172.0.0.0/16', 'nexthop': '10.10.2.1'}]
|
{'destination': '172.0.0.0/16', 'nexthop': '10.10.2.1'}]
|
||||||
with self.network(router__external=True) as net1:
|
with self.network(router__external=True, as_admin=True) as net1:
|
||||||
with self.network(router__external=True) as net2:
|
with self.network(router__external=True, as_admin=True) as net2:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
|
with self.subnet(cidr='10.10.1.0/24', network=net1) as sub1:
|
||||||
with self.subnet(cidr='10.10.2.0/24',
|
with self.subnet(cidr='10.10.2.0/24',
|
||||||
network=net2) as sub2:
|
network=net2) as sub2:
|
||||||
@ -1823,7 +1825,8 @@ class TestL3Policy(ResourceMappingTestCase,
|
|||||||
res['NeutronError']['type'])
|
res['NeutronError']['type'])
|
||||||
|
|
||||||
def _show_subnetpool(self, id):
|
def _show_subnetpool(self, id):
|
||||||
req = self.new_show_request('subnetpools', id, fmt=self.fmt)
|
req = self.new_show_request('subnetpools', id, fmt=self.fmt,
|
||||||
|
as_admin=True)
|
||||||
return self.deserialize(self.fmt,
|
return self.deserialize(self.fmt,
|
||||||
req.get_response(self.api))['subnetpool']
|
req.get_response(self.api))['subnetpool']
|
||||||
|
|
||||||
@ -1874,7 +1877,8 @@ class TestL3Policy(ResourceMappingTestCase,
|
|||||||
for version in subnetpools_versions:
|
for version in subnetpools_versions:
|
||||||
sp_id = l3p[version][0]
|
sp_id = l3p[version][0]
|
||||||
subpool = self._show_subnetpool(sp_id)
|
subpool = self._show_subnetpool(sp_id)
|
||||||
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt)
|
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt,
|
||||||
|
as_admin=True)
|
||||||
res = self.deserialize(self.fmt, req.get_response(self.api))
|
res = self.deserialize(self.fmt, req.get_response(self.api))
|
||||||
subpool = res['subnetpool']
|
subpool = res['subnetpool']
|
||||||
self.assertIn(subpool['prefixes'][0], l3p['ip_pool'])
|
self.assertIn(subpool['prefixes'][0], l3p['ip_pool'])
|
||||||
@ -1903,7 +1907,8 @@ class TestL3Policy(ResourceMappingTestCase,
|
|||||||
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
|
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
|
||||||
for version in subnetpools_versions:
|
for version in subnetpools_versions:
|
||||||
sp_id = l3p[version][0]
|
sp_id = l3p[version][0]
|
||||||
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt)
|
req = self.new_show_request('subnetpools', sp_id, fmt=self.fmt,
|
||||||
|
as_admin=True)
|
||||||
res = req.get_response(self.api)
|
res = req.get_response(self.api)
|
||||||
if explicit_subnetpool or (
|
if explicit_subnetpool or (
|
||||||
version == 'subnetpools_v4' and v4_default) or (
|
version == 'subnetpools_v4' and v4_default) or (
|
||||||
@ -2581,7 +2586,7 @@ class TestPolicyRuleSet(ResourceMappingTestCase):
|
|||||||
pr = self._create_ssh_allow_rule()
|
pr = self._create_ssh_allow_rule()
|
||||||
prs = self.create_policy_rule_set(
|
prs = self.create_policy_rule_set(
|
||||||
policy_rules=[pr['id']])['policy_rule_set']
|
policy_rules=[pr['id']])['policy_rule_set']
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
|
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
subnet_id=sub['subnet']['id'],
|
subnet_id=sub['subnet']['id'],
|
||||||
@ -2795,7 +2800,7 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def test_explicit_subnet_lifecycle(self):
|
def test_explicit_subnet_lifecycle(self):
|
||||||
|
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
|
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
subnet_id=sub['subnet']['id'])['external_segment']
|
subnet_id=sub['subnet']['id'])['external_segment']
|
||||||
@ -2809,7 +2814,7 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
es['ip_version'])
|
es['ip_version'])
|
||||||
|
|
||||||
def test_update(self, proxy_ip_pool1=None, proxy_ip_pool2=None):
|
def test_update(self, proxy_ip_pool1=None, proxy_ip_pool2=None):
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
|
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
|
||||||
changes = {'port_address_translation': True}
|
changes = {'port_address_translation': True}
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
@ -2903,7 +2908,7 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def test_update_different_tenant(self):
|
def test_update_different_tenant(self):
|
||||||
with self.network(router__external=True, shared=True,
|
with self.network(router__external=True, shared=True,
|
||||||
tenant_id='admin') as net:
|
tenant_id='admin', as_admin=True) as net:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
|
with self.subnet(cidr='10.10.1.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
subnet_id=sub['subnet']['id'],
|
subnet_id=sub['subnet']['id'],
|
||||||
@ -2931,7 +2936,7 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
self._verify_prs_rules(prs['id'])
|
self._verify_prs_rules(prs['id'])
|
||||||
|
|
||||||
def test_implicit_es(self):
|
def test_implicit_es(self):
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -2952,7 +2957,8 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
expected_res_status=200)
|
expected_res_status=200)
|
||||||
|
|
||||||
def test_implicit_es_shared(self):
|
def test_implicit_es_shared(self):
|
||||||
with self.network(router__external=True, shared=True) as net:
|
with self.network(router__external=True, shared=True,
|
||||||
|
as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
shared=True,
|
shared=True,
|
||||||
@ -2974,7 +2980,8 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
expected_res_status=200)
|
expected_res_status=200)
|
||||||
|
|
||||||
def test_delete(self):
|
def test_delete(self):
|
||||||
with self.network(router__external=True, shared=True) as net:
|
with self.network(router__external=True, shared=True,
|
||||||
|
as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -2983,7 +2990,8 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
self.show_external_segment(es['id'], expected_res_status=404)
|
self.show_external_segment(es['id'], expected_res_status=404)
|
||||||
|
|
||||||
def test_delete_in_use(self):
|
def test_delete_in_use(self):
|
||||||
with self.network(router__external=True, shared=True) as net:
|
with self.network(router__external=True, shared=True,
|
||||||
|
as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -2997,7 +3005,8 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
self.show_external_segment(es['id'], expected_res_status=200)
|
self.show_external_segment(es['id'], expected_res_status=200)
|
||||||
|
|
||||||
def test_update_l3p_remove_es(self):
|
def test_update_l3p_remove_es(self):
|
||||||
with self.network(router__external=True, shared=True) as net:
|
with self.network(router__external=True, shared=True,
|
||||||
|
as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
self.create_external_segment(
|
self.create_external_segment(
|
||||||
name="default", subnet_id=sub['subnet']['id'])
|
name="default", subnet_id=sub['subnet']['id'])
|
||||||
@ -3011,7 +3020,7 @@ class TestExternalSegment(ResourceMappingTestCase):
|
|||||||
class TestExternalPolicy(ResourceMappingTestCase):
|
class TestExternalPolicy(ResourceMappingTestCase):
|
||||||
|
|
||||||
def test_create(self):
|
def test_create(self):
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net) as sub1:
|
with self.subnet(cidr='10.10.1.0/24', network=net) as sub1:
|
||||||
with self.subnet(cidr='10.10.2.0/24', network=net) as sub2:
|
with self.subnet(cidr='10.10.2.0/24', network=net) as sub2:
|
||||||
es1 = self.create_external_segment(
|
es1 = self.create_external_segment(
|
||||||
@ -3046,7 +3055,7 @@ class TestExternalPolicy(ResourceMappingTestCase):
|
|||||||
res['NeutronError']['type'])
|
res['NeutronError']['type'])
|
||||||
|
|
||||||
def test_update(self):
|
def test_update(self):
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='10.10.1.0/24', network=net) as sub1:
|
with self.subnet(cidr='10.10.1.0/24', network=net) as sub1:
|
||||||
with self.subnet(cidr='10.10.2.0/24', network=net) as sub2:
|
with self.subnet(cidr='10.10.2.0/24', network=net) as sub2:
|
||||||
route = {'destination': '172.0.0.0/8', 'nexthop': None}
|
route = {'destination': '172.0.0.0/8', 'nexthop': None}
|
||||||
@ -3245,7 +3254,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def test_create_nsp_ip_pool_multiple_ptgs(self):
|
def test_create_nsp_ip_pool_multiple_ptgs(self):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -3308,7 +3317,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def test_nsp_fip_single(self):
|
def test_nsp_fip_single(self):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -3353,7 +3362,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def test_nsp_fip_single_different_pool(self):
|
def test_nsp_fip_single_different_pool(self):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -3408,7 +3417,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def test_nsp_rejected_without_nat_pool(self):
|
def test_nsp_rejected_without_nat_pool(self):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
self.create_external_segment(
|
self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -3467,8 +3476,8 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
|
|||||||
"name": "test"}],
|
"name": "test"}],
|
||||||
expected_res_status=webob.exc.HTTPCreated.code)[
|
expected_res_status=webob.exc.HTTPCreated.code)[
|
||||||
'network_service_policy']
|
'network_service_policy']
|
||||||
with self.network(router__external=True) as net1:
|
with self.network(router__external=True, as_admin=True) as net1:
|
||||||
with self.network(router__external=True) as net2:
|
with self.network(router__external=True, as_admin=True) as net2:
|
||||||
with self.subnet(cidr='192.168.1.0/24', network=net1) as sub1:
|
with self.subnet(cidr='192.168.1.0/24', network=net1) as sub1:
|
||||||
with self.subnet(
|
with self.subnet(
|
||||||
cidr='192.168.2.0/24', network=net2) as sub2:
|
cidr='192.168.2.0/24', network=net2) as sub2:
|
||||||
@ -3500,7 +3509,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def test_nsp_delete_nat_pool_rejected(self):
|
def test_nsp_delete_nat_pool_rejected(self):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -3526,7 +3535,7 @@ class TestNetworkServicePolicy(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def test_update_nsp_nat_pool_after_pt_create(self):
|
def test_update_nsp_nat_pool_after_pt_create(self):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -3789,7 +3798,8 @@ class TestNatPool(ResourceMappingTestCase):
|
|||||||
def _test_overlapping_peer_rejected(self, shared1=False, shared2=False):
|
def _test_overlapping_peer_rejected(self, shared1=False, shared2=False):
|
||||||
shared_net = shared1 or shared2
|
shared_net = shared1 or shared2
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True, shared=shared_net) as net:
|
with self.network(router__external=True, shared=shared_net,
|
||||||
|
as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -3825,7 +3835,8 @@ class TestNatPool(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def _test_implicit_subnet_created(self, shared=False):
|
def _test_implicit_subnet_created(self, shared=False):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True, shared=shared) as net:
|
with self.network(router__external=True, shared=shared,
|
||||||
|
as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default",
|
name="default",
|
||||||
@ -3850,7 +3861,8 @@ class TestNatPool(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def _test_partially_overlapping_subnets_rejected(self, shared=False):
|
def _test_partially_overlapping_subnets_rejected(self, shared=False):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True, shared=shared) as net:
|
with self.network(router__external=True, shared=shared,
|
||||||
|
as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
with self.subnet(cidr='192.168.1.0/28', network=net):
|
with self.subnet(cidr='192.168.1.0/28', network=net):
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
@ -3875,7 +3887,8 @@ class TestNatPool(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def _test_overlapping_subnets(self, shared=False):
|
def _test_overlapping_subnets(self, shared=False):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True, shared=shared) as net:
|
with self.network(router__external=True, shared=shared,
|
||||||
|
as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
with self.subnet(cidr='192.168.1.0/24', network=net) as sub2:
|
with self.subnet(cidr='192.168.1.0/24', network=net) as sub2:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
@ -3901,7 +3914,7 @@ class TestNatPool(ResourceMappingTestCase):
|
|||||||
|
|
||||||
def _test_subnet_swap(self, owned=True):
|
def _test_subnet_swap(self, owned=True):
|
||||||
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
routes = [{'destination': '0.0.0.0/0', 'nexthop': None}]
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
with self.subnet(cidr='192.168.0.0/24', network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
name="default", subnet_id=sub['subnet']['id'],
|
name="default", subnet_id=sub['subnet']['id'],
|
||||||
@ -3932,7 +3945,8 @@ class TestNatPool(ResourceMappingTestCase):
|
|||||||
ip_version=4, ip_pool=ip_pool,
|
ip_version=4, ip_pool=ip_pool,
|
||||||
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
|
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
|
||||||
sub_id = nat_pool['subnet_id']
|
sub_id = nat_pool['subnet_id']
|
||||||
with self.network(router__external=True) as net2:
|
with self.network(router__external=True,
|
||||||
|
as_admin=True) as net2:
|
||||||
with self.subnet(cidr='192.167.0.0/24',
|
with self.subnet(cidr='192.167.0.0/24',
|
||||||
network=net2) as sub2:
|
network=net2) as sub2:
|
||||||
es2 = self.create_external_segment(
|
es2 = self.create_external_segment(
|
||||||
@ -3973,7 +3987,7 @@ class TestNatPool(ResourceMappingTestCase):
|
|||||||
result['NeutronError']['type'])
|
result['NeutronError']['type'])
|
||||||
|
|
||||||
def test_delete_with_fip_allocated(self):
|
def test_delete_with_fip_allocated(self):
|
||||||
with self.network(router__external=True) as net:
|
with self.network(router__external=True, as_admin=True) as net:
|
||||||
with self.subnet(cidr='192.168.0.0/30', enable_dhcp=False,
|
with self.subnet(cidr='192.168.0.0/30', enable_dhcp=False,
|
||||||
network=net) as sub:
|
network=net) as sub:
|
||||||
es = self.create_external_segment(
|
es = self.create_external_segment(
|
||||||
|
@ -211,7 +211,8 @@ class TestQosPolicy(TestAIMQosBase):
|
|||||||
kwargs['qos_policy_id'] = net_qos_id
|
kwargs['qos_policy_id'] = net_qos_id
|
||||||
|
|
||||||
resp = self._create_network(
|
resp = self._create_network(
|
||||||
self.fmt, 'net', True, arg_list=tuple(list(kwargs.keys())),
|
self.fmt, 'net', True, as_admin=True,
|
||||||
|
arg_list=tuple(list(kwargs.keys())),
|
||||||
**kwargs)
|
**kwargs)
|
||||||
result = self.deserialize(self.fmt, resp)
|
result = self.deserialize(self.fmt, resp)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
@ -746,12 +746,12 @@ class TestPortPairOpflexAgent(TestAIMServiceFunctionChainingBase):
|
|||||||
def test_port_pair_with_opflex_agent_vlan_nets(self):
|
def test_port_pair_with_opflex_agent_vlan_nets(self):
|
||||||
# Correct work flow with both nets of type vlan.
|
# Correct work flow with both nets of type vlan.
|
||||||
kwargs = {'provider:network_type': 'vlan'}
|
kwargs = {'provider:network_type': 'vlan'}
|
||||||
net1 = self._make_network(self.fmt, 'net1', True,
|
net1 = self._make_network(self.fmt, 'net1', True, as_admin=True,
|
||||||
arg_list=tuple(list(kwargs.keys())), **kwargs)
|
arg_list=tuple(list(kwargs.keys())), **kwargs)
|
||||||
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
|
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
|
||||||
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
|
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
|
||||||
|
|
||||||
net2 = self._make_network(self.fmt, 'net2', True,
|
net2 = self._make_network(self.fmt, 'net2', True, as_admin=True,
|
||||||
arg_list=tuple(list(kwargs.keys())), **kwargs)
|
arg_list=tuple(list(kwargs.keys())), **kwargs)
|
||||||
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
|
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
|
||||||
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
|
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
|
||||||
@ -766,7 +766,7 @@ class TestPortPairOpflexAgent(TestAIMServiceFunctionChainingBase):
|
|||||||
def test_port_pair_invalid_with_opflex_agent_opflex_nets(self):
|
def test_port_pair_invalid_with_opflex_agent_opflex_nets(self):
|
||||||
# Validate that opflex type nets are invalid.
|
# Validate that opflex type nets are invalid.
|
||||||
kwargs = {'provider:network_type': 'vlan'}
|
kwargs = {'provider:network_type': 'vlan'}
|
||||||
net1 = self._make_network(self.fmt, 'net1', True,
|
net1 = self._make_network(self.fmt, 'net1', True, as_admin=True,
|
||||||
arg_list=tuple(list(kwargs.keys())), **kwargs)
|
arg_list=tuple(list(kwargs.keys())), **kwargs)
|
||||||
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
|
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
|
||||||
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
|
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
|
||||||
|
@ -4,19 +4,19 @@
|
|||||||
hacking>=6.0.1 # Apache-2.0
|
hacking>=6.0.1 # Apache-2.0
|
||||||
|
|
||||||
# Since version numbers for these are specified in
|
# Since version numbers for these are specified in
|
||||||
# https://releases.openstack.org/constraints/upper/2023.1, they cannot be
|
# https://releases.openstack.org/constraints/upper/2023.2, they cannot be
|
||||||
# referenced as GIT URLs.
|
# referenced as GIT URLs.
|
||||||
neutron
|
neutron
|
||||||
python-heatclient
|
python-heatclient
|
||||||
python-keystoneclient
|
python-keystoneclient
|
||||||
|
|
||||||
-e git+https://opendev.org/openstack/networking-sfc.git@stable/2023.1#egg=networking-sfc
|
-e git+https://opendev.org/openstack/networking-sfc.git@stable/2023.2#egg=networking-sfc
|
||||||
|
|
||||||
-e git+https://github.com/noironetworks/apicapi.git@master#egg=apicapi
|
-e git+https://github.com/noironetworks/apicapi.git@master#egg=apicapi
|
||||||
|
|
||||||
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/2023.1#egg=neutron-opflex-agent
|
-e git+https://github.com/noironetworks/python-opflex-agent.git@stable/2023.2#egg=neutron-opflex-agent
|
||||||
|
|
||||||
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/2023.1#egg=python-group-based-policy-client
|
-e git+https://opendev.org/x/python-group-based-policy-client.git@stable/2023.2#egg=python-group-based-policy-client
|
||||||
|
|
||||||
coverage!=4.4,>=4.0 # Apache-2.0
|
coverage!=4.4,>=4.0 # Apache-2.0
|
||||||
flake8-import-order==0.12 # LGPLv3
|
flake8-import-order==0.12 # LGPLv3
|
||||||
|
4
tox.ini
4
tox.ini
@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py38,py39,py310,pep8
|
envlist = py38,py39,py310,py311,pep8
|
||||||
minversion = 3.18.0
|
minversion = 3.18.0
|
||||||
skipsdist = False
|
skipsdist = False
|
||||||
ignore_basepython_conflict = True
|
ignore_basepython_conflict = True
|
||||||
@ -24,7 +24,7 @@ usedevelop = True
|
|||||||
install_command =
|
install_command =
|
||||||
pip install {opts} {packages}
|
pip install {opts} {packages}
|
||||||
deps =
|
deps =
|
||||||
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2023.1}
|
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2023.2}
|
||||||
-r{toxinidir}/requirements.txt
|
-r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
whitelist_externals = sh
|
whitelist_externals = sh
|
||||||
|
Loading…
Reference in New Issue
Block a user