diff --git a/vmware_nsx/tests/unit/__init__.py b/vmware_nsx/tests/unit/__init__.py index c93cc643dd..6b33ed36f1 100644 --- a/vmware_nsx/tests/unit/__init__.py +++ b/vmware_nsx/tests/unit/__init__.py @@ -16,9 +16,10 @@ import os import time - from unittest import mock +from neutron_dynamic_routing.db import bgp_db # noqa + from vmware_nsx.api_client import client as nsx_client from vmware_nsx.api_client import eventlet_client from vmware_nsx import extensions diff --git a/vmware_nsx/tests/unit/extension_drivers/test_dns_integration.py b/vmware_nsx/tests/unit/extension_drivers/test_dns_integration.py index 6956bd973d..42cbfbed64 100644 --- a/vmware_nsx/tests/unit/extension_drivers/test_dns_integration.py +++ b/vmware_nsx/tests/unit/extension_drivers/test_dns_integration.py @@ -19,7 +19,6 @@ from neutron_lib.plugins import directory from oslo_config import cfg from vmware_nsx.extension_drivers import dns_integration -from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin @@ -84,15 +83,6 @@ class NsxDNSIntegrationTestCase(object): dns_assignment['fqdn']) -class NsxVDNSIntegrationTestCase(NsxDNSIntegrationTestCase, - test_v_plugin.NsxVPluginV2TestCase): - - def setUp(self): - cfg.CONF.set_override('nsx_extension_drivers', ['vmware_nsxv_dns']) - cfg.CONF.set_override('dns_domain', self._domain) - super(NsxVDNSIntegrationTestCase, self).setUp() - - class NsxV3DNSIntegrationTestCase(NsxDNSIntegrationTestCase, test_v3_plugin.NsxV3PluginTestCaseMixin): diff --git a/vmware_nsx/tests/unit/extensions/test_addresspairs.py b/vmware_nsx/tests/unit/extensions/test_addresspairs.py index d14531381e..183628b589 100644 --- a/vmware_nsx/tests/unit/extensions/test_addresspairs.py +++ b/vmware_nsx/tests/unit/extensions/test_addresspairs.py @@ -14,13 +14,11 @@ # under the License. from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef -from neutron_lib.api.definitions import port_security as psec from oslo_config import cfg from neutron.tests.unit.db import test_allowedaddresspairs_db as ext_pairs from vmware_nsx.tests.unit.nsx_p import test_plugin as test_p_plugin -from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsx_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin @@ -321,106 +319,3 @@ class TestAllowedAddressPairsNSXv3(test_v3_plugin.NsxV3PluginTestCaseMixin, def test_create_port_security_false_allowed_address_pairs(self): self.skipTest('TBD') - - -class TestAllowedAddressPairsNSXv(test_nsx_v_plugin.NsxVPluginV2TestCase, - ext_pairs.TestAllowedAddressPairs): - - def setUp(self, plugin='vmware_nsx.plugin.NsxVPlugin', - ext_mgr=None, - service_plugins=None): - super(TestAllowedAddressPairsNSXv, self).setUp( - plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) - - def test_create_port_security_false_allowed_address_pairs(self): - self.skipTest('TBD') - - def test_update_port_security_off_address_pairs(self): - self.skipTest('Not supported') - - def test_create_overlap_with_fixed_ip(self): - address_pairs = [{'ip_address': '10.0.0.2'}] - with self.network() as network: - with self.subnet(network=network, cidr='10.0.0.0/24', - enable_dhcp=False) as subnet: - fixed_ips = [{'subnet_id': subnet['subnet']['id'], - 'ip_address': '10.0.0.2'}] - res = self._create_port(self.fmt, network['network']['id'], - arg_list=(addr_apidef.ADDRESS_PAIRS, - 'fixed_ips'), - allowed_address_pairs=address_pairs, - fixed_ips=fixed_ips) - self.assertEqual(res.status_int, 201) - port = self.deserialize(self.fmt, res) - self._delete('ports', port['port']['id']) - - def test_create_port_allowed_address_pairs(self): - with self.network() as net: - address_pairs = [{'ip_address': '10.0.0.1'}] - res = self._create_port(self.fmt, net['network']['id'], - arg_list=(addr_apidef.ADDRESS_PAIRS,), - allowed_address_pairs=address_pairs) - port = self.deserialize(self.fmt, res) - address_pairs[0]['mac_address'] = port['port']['mac_address'] - self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], - address_pairs) - self._delete('ports', port['port']['id']) - - def _test_create_port_remove_allowed_address_pairs(self, update_value): - with self.network() as net: - address_pairs = [{'ip_address': '10.0.0.1'}] - res = self._create_port(self.fmt, net['network']['id'], - arg_list=(addr_apidef.ADDRESS_PAIRS,), - allowed_address_pairs=address_pairs) - port = self.deserialize(self.fmt, res) - update_port = {'port': {addr_apidef.ADDRESS_PAIRS: []}} - req = self.new_update_request('ports', update_port, - port['port']['id']) - port = self.deserialize(self.fmt, req.get_response(self.api)) - self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], []) - self._delete('ports', port['port']['id']) - - def test_update_add_address_pairs(self): - with self.network() as net: - res = self._create_port(self.fmt, net['network']['id']) - port = self.deserialize(self.fmt, res) - address_pairs = [{'ip_address': '10.0.0.1'}] - update_port = {'port': {addr_apidef.ADDRESS_PAIRS: - address_pairs}} - req = self.new_update_request('ports', update_port, - port['port']['id']) - port = self.deserialize(self.fmt, req.get_response(self.api)) - address_pairs[0]['mac_address'] = port['port']['mac_address'] - self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], - address_pairs) - self._delete('ports', port['port']['id']) - - def test_mac_configuration(self): - address_pairs = [{'mac_address': '00:00:00:00:00:01', - 'ip_address': '10.0.0.1'}] - self._create_port_with_address_pairs(address_pairs, 400) - - def test_equal_to_max_allowed_address_pair(self): - cfg.CONF.set_default('max_allowed_address_pair', 3) - address_pairs = [{'ip_address': '10.0.0.1'}, - {'ip_address': '10.0.0.2'}, - {'ip_address': '10.0.0.3'}] - self._create_port_with_address_pairs(address_pairs, 201) - - def test_create_port_security_true_allowed_address_pairs(self): - if self._skip_port_security: - self.skipTest("Plugin does not implement port-security extension") - - with self.network() as net: - address_pairs = [{'ip_address': '10.0.0.1'}] - res = self._create_port(self.fmt, net['network']['id'], - arg_list=('port_security_enabled', - addr_apidef.ADDRESS_PAIRS,), - port_security_enabled=True, - allowed_address_pairs=address_pairs) - port = self.deserialize(self.fmt, res) - self.assertTrue(port['port'][psec.PORTSECURITY]) - address_pairs[0]['mac_address'] = port['port']['mac_address'] - self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], - address_pairs) - self._delete('ports', port['port']['id']) diff --git a/vmware_nsx/tests/unit/extensions/test_dhcp_mtu.py b/vmware_nsx/tests/unit/extensions/test_dhcp_mtu.py index 4b78d8d7bb..062603ceed 100644 --- a/vmware_nsx/tests/unit/extensions/test_dhcp_mtu.py +++ b/vmware_nsx/tests/unit/extensions/test_dhcp_mtu.py @@ -12,16 +12,11 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock - from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db from neutron_lib.db import api as db_api from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu -from vmware_nsx.plugins.nsx_v.vshield import edge_utils -from vmware_nsx.tests.unit.nsx_v import test_plugin -from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' @@ -41,87 +36,6 @@ class DhcpMtuExtensionManager(object): return ext_dhcp_mtu.get_extended_resources(version) -class DhcpMtuExtensionTestCase(test_plugin.NsxVPluginV2TestCase): - """Test API extension dhcp-mtu attribute of subnets.""" - - @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') - def setUp(self, plugin=PLUGIN_NAME): - ext_mgr = DhcpMtuExtensionManager() - # This feature is enabled only since 6.2.3 - with mock.patch.object(fake_vcns.FakeVcns, - 'get_version', - return_value="6.2.3"): - super(DhcpMtuExtensionTestCase, self).setUp(ext_mgr=ext_mgr) - - def _create_subnet_with_dhcp_mtu(self, dhcp_mtu): - with self.network() as net: - tenant_id = net['network']['tenant_id'] - net_id = net['network']['id'] - data = {'subnet': {'network_id': net_id, - 'cidr': '10.0.0.0/24', - 'ip_version': 4, - 'name': 'test-mtu-subnet', - 'tenant_id': tenant_id, - 'dhcp_mtu': dhcp_mtu}} - subnet_req = self.new_create_request('subnets', data) - res = subnet_req.get_response(self.api) - return res - - def test_subnet_create_with_dhcp_mtu(self): - for mtu in (68, 2000, 65535): - res = self._create_subnet_with_dhcp_mtu(mtu) - sub = self.deserialize(self.fmt, res) - self.assertEqual(mtu, sub['subnet']['dhcp_mtu']) - - def test_subnet_create_with_invalid_dhcp_mtu_fail(self): - res = self._create_subnet_with_dhcp_mtu(67) - self.assertEqual(400, res.status_int) - - res = self._create_subnet_with_dhcp_mtu(100000) - self.assertEqual(400, res.status_int) - - def test_subnet_update_with_dhcp_mtu(self): - res = self._create_subnet_with_dhcp_mtu(2000) - sub = self.deserialize(self.fmt, res) - data = {'subnet': {'dhcp_mtu': 3000}} - req = self.new_update_request('subnets', data, sub['subnet']['id']) - updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) - self.assertEqual(3000, updated_sub['subnet']['dhcp_mtu']) - - def _create_subnet_with_dhcp_mtu_and_dns(self, dhcp_mtu, - dns_search_domain): - with self.network() as net: - tenant_id = net['network']['tenant_id'] - net_id = net['network']['id'] - data = {'subnet': {'network_id': net_id, - 'cidr': '10.0.0.0/24', - 'ip_version': 4, - 'name': 'test-mtu-subnet', - 'tenant_id': tenant_id, - 'dhcp_mtu': dhcp_mtu, - 'dns_search_domain': dns_search_domain}} - subnet_req = self.new_create_request('subnets', data) - res = subnet_req.get_response(self.api) - return res - - def test_subnet_create_with_dhcp_mtu_and_dns(self): - res = self._create_subnet_with_dhcp_mtu_and_dns(2000, 'vmware.com') - sub = self.deserialize(self.fmt, res) - self.assertEqual(2000, sub['subnet']['dhcp_mtu']) - self.assertEqual('vmware.com', sub['subnet']['dns_search_domain']) - - def test_subnet_update_with_dhcp_mtu_and_dns(self): - res = self._create_subnet_with_dhcp_mtu_and_dns(2000, 'vmware.com') - sub = self.deserialize(self.fmt, res) - data = {'subnet': {'dhcp_mtu': 3000, - 'dns_search_domain': 'eng.vmware.com'}} - req = self.new_update_request('subnets', data, sub['subnet']['id']) - updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) - self.assertEqual(3000, updated_sub['subnet']['dhcp_mtu']) - self.assertEqual('eng.vmware.com', - updated_sub['subnet']['dns_search_domain']) - - class DhcpMtuDBTestCase(test_db.NeutronDbPluginV2TestCase): def setUp(self): diff --git a/vmware_nsx/tests/unit/extensions/test_dns_search_domain.py b/vmware_nsx/tests/unit/extensions/test_dns_search_domain.py index e7a6053bde..cadb60fd87 100644 --- a/vmware_nsx/tests/unit/extensions/test_dns_search_domain.py +++ b/vmware_nsx/tests/unit/extensions/test_dns_search_domain.py @@ -12,15 +12,11 @@ # License for the specific language governing permissions and limitations # under the License. -from unittest import mock - from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db from neutron_lib.db import api as db_api from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain -from vmware_nsx.plugins.nsx_v.vshield import edge_utils -from vmware_nsx.tests.unit.nsx_v import test_plugin PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' @@ -40,47 +36,6 @@ class DnsSearchDomainExtensionManager(object): return ext_dns_search_domain.get_extended_resources(version) -class DnsSearchDomainExtensionTestCase(test_plugin.NsxVPluginV2TestCase): - """Test API extension dns-search-domain attribute.""" - - @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') - def setUp(self, plugin=PLUGIN_NAME): - ext_mgr = DnsSearchDomainExtensionManager() - super(DnsSearchDomainExtensionTestCase, self).setUp(ext_mgr=ext_mgr) - - def _create_subnet_with_dns_search_domain(self, dns_search_domain): - with self.network() as net: - tenant_id = net['network']['tenant_id'] - net_id = net['network']['id'] - data = {'subnet': {'network_id': net_id, - 'cidr': '10.0.0.0/24', - 'ip_version': 4, - 'name': 'test-dns-search-domain-subnet', - 'tenant_id': tenant_id, - 'dns_search_domain': dns_search_domain}} - subnet_req = self.new_create_request('subnets', data) - res = subnet_req.get_response(self.api) - return res - - def test_subnet_create_with_dns_search_domain(self): - res = self._create_subnet_with_dns_search_domain('vmware.com') - sub = self.deserialize(self.fmt, res) - self.assertEqual('vmware.com', sub['subnet']['dns_search_domain']) - - def test_subnet_create_with_invalid_dns_search_domain_fail(self): - res = self._create_subnet_with_dns_search_domain('vmw@re.com') - self.assertEqual(400, res.status_int) - - def test_subnet_update_with_dns_search_domain(self): - res = self._create_subnet_with_dns_search_domain('vmware.com') - sub = self.deserialize(self.fmt, res) - data = {'subnet': {'dns_search_domain': 'eng.vmware.com'}} - req = self.new_update_request('subnets', data, sub['subnet']['id']) - updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) - self.assertEqual('eng.vmware.com', - updated_sub['subnet']['dns_search_domain']) - - class DnsSearchDomainDBTestCase(test_db.NeutronDbPluginV2TestCase): def setUp(self): diff --git a/vmware_nsx/tests/unit/extensions/test_provider_security_groups.py b/vmware_nsx/tests/unit/extensions/test_provider_security_groups.py index 1dfab22b4d..fbded04472 100644 --- a/vmware_nsx/tests/unit/extensions/test_provider_security_groups.py +++ b/vmware_nsx/tests/unit/extensions/test_provider_security_groups.py @@ -26,7 +26,6 @@ import webob.exc from vmware_nsx.db import extended_security_group from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.tests.unit.nsx_p import test_plugin as test_nsxp_plugin -from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3_plugin @@ -362,38 +361,6 @@ class TestNSXv3ProviderSecurityGrp(test_nsxv3_plugin.NsxV3PluginTestCaseMixin, port['port']['security_groups']) -class TestNSXvProviderSecurityGroup(test_nsxv_plugin.NsxVPluginV2TestCase, - ProviderSecurityGroupExtTestCase): - def test_create_provider_security_group(self): - _create_section_tmp = self.fc2.create_section - - def _create_section(*args, **kwargs): - return _create_section_tmp(*args, **kwargs) - - with mock.patch.object(self.fc2, 'create_section', - side_effect=_create_section) as create_sec_mock: - super(TestNSXvProviderSecurityGroup, - self).test_create_provider_security_group() - create_sec_mock.assert_called_with('ip', mock.ANY, - insert_top=True, - insert_before=mock.ANY) - - def test_create_provider_security_group_rule(self): - provider_secgroup = self._create_provider_security_group() - sg_id = provider_secgroup['security_group']['id'] - _create_nsx_rule_tmp = self.plugin._create_nsx_rule - - def m_create_nsx_rule(*args, **kwargs): - return _create_nsx_rule_tmp(*args, **kwargs) - - with mock.patch.object(self.plugin, '_create_nsx_rule', - side_effect=m_create_nsx_rule) as create_rule_m: - with self.security_group_rule(security_group_id=sg_id): - create_rule_m.assert_called_with(mock.ANY, mock.ANY, - logged=mock.ANY, - action='deny') - - class TestNSXpProviderSecurityGrp(test_nsxp_plugin.NsxPPluginTestCaseMixin, ProviderSecurityGroupExtTestCase): diff --git a/vmware_nsx/tests/unit/extensions/test_providernet.py b/vmware_nsx/tests/unit/extensions/test_providernet.py deleted file mode 100644 index 99d61bd64a..0000000000 --- a/vmware_nsx/tests/unit/extensions/test_providernet.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import webob.exc - -from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef -from neutron_lib.api.definitions import provider_net as pnet -from vmware_nsx.tests import unit as vmware -from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv - - -class TestProvidernet(test_nsxv.NsxVPluginV2TestCase): - - def test_create_delete_provider_network_default_physical_net(self): - '''Leaves physical_net unspecified''' - data = {'network': {'name': 'net1', - 'admin_state_up': True, - 'tenant_id': 'admin', - pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 411}} - network_req = self.new_create_request('networks', data, self.fmt) - net = self.deserialize(self.fmt, network_req.get_response(self.api)) - self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') - self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) - req = self.new_delete_request('networks', net['network']['id']) - res = req.get_response(self.api) - self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) - - def test_create_delete_provider_network_default_physical_net_2(self): - '''Uses the 'default' keyword as physical_net''' - data = {'network': {'name': 'net1', - 'admin_state_up': True, - 'tenant_id': 'admin', - pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 411, - pnet.PHYSICAL_NETWORK: 'default'}} - network_req = self.new_create_request('networks', data, self.fmt) - net = self.deserialize(self.fmt, network_req.get_response(self.api)) - self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') - self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) - req = self.new_delete_request('networks', net['network']['id']) - res = req.get_response(self.api) - self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) - - def test_create_provider_network(self): - data = {'network': {'name': 'net1', - 'admin_state_up': True, - 'tenant_id': 'admin', - pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 411, - pnet.PHYSICAL_NETWORK: 'physnet1'}} - network_req = self.new_create_request('networks', data, self.fmt) - net = self.deserialize(self.fmt, network_req.get_response(self.api)) - self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') - self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) - self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet1') - - # Test that we can create another provider network using the same - # vlan_id on another physical network. - data['network'][pnet.PHYSICAL_NETWORK] = 'physnet2' - network_req = self.new_create_request('networks', data, self.fmt) - net = self.deserialize(self.fmt, network_req.get_response(self.api)) - self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') - self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) - self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet2') - - -class TestMultiProviderNetworks(test_nsxv.NsxVPluginV2TestCase): - - def setUp(self, plugin=None): - cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) - super(TestMultiProviderNetworks, self).setUp() - - def test_create_network_provider(self): - data = {'network': {'name': 'net1', - pnet.NETWORK_TYPE: 'vlan', - pnet.PHYSICAL_NETWORK: 'physnet1', - pnet.SEGMENTATION_ID: 1, - 'tenant_id': 'tenant_one'}} - network_req = self.new_create_request('networks', data) - network = self.deserialize(self.fmt, - network_req.get_response(self.api)) - self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan') - self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1') - self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1) - self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) - - def test_create_network_provider_flat(self): - data = {'network': {'name': 'net1', - pnet.NETWORK_TYPE: 'flat', - pnet.PHYSICAL_NETWORK: 'physnet1', - 'tenant_id': 'tenant_one'}} - network_req = self.new_create_request('networks', data) - network = self.deserialize(self.fmt, - network_req.get_response(self.api)) - self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE]) - self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) - self.assertEqual(0, network['network'][pnet.SEGMENTATION_ID]) - self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) - - def test_create_network_single_multiple_provider(self): - data = {'network': {'name': 'net1', - mpnet_apidef.SEGMENTS: - [{pnet.NETWORK_TYPE: 'vlan', - pnet.PHYSICAL_NETWORK: 'physnet1', - pnet.SEGMENTATION_ID: 1}], - 'tenant_id': 'tenant_one'}} - net_req = self.new_create_request('networks', data) - network = self.deserialize(self.fmt, net_req.get_response(self.api)) - for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, - pnet.SEGMENTATION_ID]: - self.assertNotIn(provider_field, network['network']) - tz = network['network'][mpnet_apidef.SEGMENTS][0] - self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan') - self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1') - self.assertEqual(tz[pnet.SEGMENTATION_ID], 1) - - # Tests get_network() - net_req = self.new_show_request('networks', network['network']['id']) - network = self.deserialize(self.fmt, net_req.get_response(self.api)) - tz = network['network'][mpnet_apidef.SEGMENTS][0] - self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan') - self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1') - self.assertEqual(tz[pnet.SEGMENTATION_ID], 1) - - def test_create_network_multprovider(self): - data = {'network': {'name': 'net1', - mpnet_apidef.SEGMENTS: - [{pnet.NETWORK_TYPE: 'vlan', - pnet.PHYSICAL_NETWORK: 'physnet1', - pnet.SEGMENTATION_ID: 1}, - {pnet.NETWORK_TYPE: 'vlan', - pnet.PHYSICAL_NETWORK: 'physnet2', - pnet.SEGMENTATION_ID: 2}], - 'tenant_id': 'tenant_one'}} - network_req = self.new_create_request('networks', data) - network = self.deserialize(self.fmt, - network_req.get_response(self.api)) - tz = network['network'][mpnet_apidef.SEGMENTS] - for tz in data['network'][mpnet_apidef.SEGMENTS]: - for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, - pnet.SEGMENTATION_ID]: - self.assertEqual(tz.get(field), tz.get(field)) - - # Tests get_network() - net_req = self.new_show_request('networks', network['network']['id']) - network = self.deserialize(self.fmt, net_req.get_response(self.api)) - tz = network['network'][mpnet_apidef.SEGMENTS] - for tz in data['network'][mpnet_apidef.SEGMENTS]: - for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, - pnet.SEGMENTATION_ID]: - self.assertEqual(tz.get(field), tz.get(field)) - - def test_create_network_with_provider_and_multiprovider_fail(self): - data = {'network': {'name': 'net1', - mpnet_apidef.SEGMENTS: - [{pnet.NETWORK_TYPE: 'vlan', - pnet.PHYSICAL_NETWORK: 'physnet1', - pnet.SEGMENTATION_ID: 1}], - pnet.NETWORK_TYPE: 'vlan', - pnet.PHYSICAL_NETWORK: 'physnet1', - pnet.SEGMENTATION_ID: 1, - 'tenant_id': 'tenant_one'}} - - network_req = self.new_create_request('networks', data) - res = network_req.get_response(self.api) - self.assertEqual(res.status_int, 400) diff --git a/vmware_nsx/tests/unit/extensions/test_secgroup_rule_local_ip_prefix.py b/vmware_nsx/tests/unit/extensions/test_secgroup_rule_local_ip_prefix.py index 0e16776111..b398c2f10f 100644 --- a/vmware_nsx/tests/unit/extensions/test_secgroup_rule_local_ip_prefix.py +++ b/vmware_nsx/tests/unit/extensions/test_secgroup_rule_local_ip_prefix.py @@ -21,15 +21,12 @@ from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.extensions import test_securitygroup from neutron_lib import constants as const from neutron_lib.db import api as db_api -from neutron_lib.plugins import directory from oslo_utils import uuidutils import webob.exc from vmware_nsx.db import extended_security_group_rule as ext_rule_db from vmware_nsx.extensions import secgroup_rule_local_ip_prefix as ext_loip -from vmware_nsx.plugins.nsx_v.vshield import securitygroup_utils from vmware_nsx.tests.unit.nsx_p import test_plugin as test_nsxp_plugin -from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3_plugin @@ -95,24 +92,6 @@ class LocalIPPrefixExtTestCase(test_securitygroup.SecurityGroupDBTestCase): res['security_group_rule']['local_ip_prefix']) -class TestNsxVExtendedSGRule(test_nsxv_plugin.NsxVSecurityGroupsTestCase, - LocalIPPrefixExtTestCase): - def test_create_rule_with_local_ip_prefix(self): - sg_utils = securitygroup_utils.NsxSecurityGroupUtils(None) - local_ip_prefix = '239.255.0.0/16' - plugin = directory.get_plugin() - dest = {'type': 'Ipv4Address', 'value': local_ip_prefix} - - plugin.nsx_sg_utils.get_rule_config = mock.Mock( - side_effect=sg_utils.get_rule_config) - super(TestNsxVExtendedSGRule, - self).test_create_rule_with_local_ip_prefix() - plugin.nsx_sg_utils.get_rule_config.assert_called_with( - source=mock.ANY, destination=dest, services=mock.ANY, - name=mock.ANY, applied_to_ids=mock.ANY, flags=mock.ANY, - logged=mock.ANY, action=mock.ANY, tag=mock.ANY, notes=mock.ANY) - - class TestNSXv3ExtendedSGRule(test_nsxv3_plugin.NsxV3PluginTestCaseMixin, LocalIPPrefixExtTestCase): def test_create_rule_with_local_ip_prefix(self): diff --git a/vmware_nsx/tests/unit/extensions/test_security_group_policy.py b/vmware_nsx/tests/unit/extensions/test_security_group_policy.py deleted file mode 100644 index 10d377fce4..0000000000 --- a/vmware_nsx/tests/unit/extensions/test_security_group_policy.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright 2016 VMware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg -import webob.exc - -from neutron.extensions import securitygroup as ext_sg -from neutron.tests.unit.api import test_extensions -from neutron.tests.unit.db import test_db_base_plugin_v2 -from neutron.tests.unit.extensions import test_securitygroup -from neutron_lib import constants -from neutron_lib import context -from neutron_lib import exceptions as n_exc - -from vmware_nsx.extensions import nsxpolicy -from vmware_nsx.extensions import securitygrouplogging as ext_logging -from vmware_nsx.extensions import securitygrouppolicy as ext_policy -from vmware_nsx.tests.unit.nsx_v import test_plugin -from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns - -PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' - - -class SecGroupPolicyExtensionTestCase( - test_plugin.NsxVPluginV2TestCase, - test_securitygroup.SecurityGroupDBTestCase): - def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): - cfg.CONF.set_override('use_nsx_policies', True, group='nsxv') - cfg.CONF.set_override('default_policy_id', 'policy-1', group='nsxv') - # This feature is enabled only since 6.2 - with mock.patch.object(fake_vcns.FakeVcns, - 'get_version', - return_value="6.2.3"): - super(SecGroupPolicyExtensionTestCase, self).setUp( - plugin=plugin, ext_mgr=ext_mgr) - self._tenant_id = test_db_base_plugin_v2.TEST_TENANT_ID - # add policy & logging security group attribute - ext_sg.Securitygroup().update_attributes_map( - ext_policy.RESOURCE_ATTRIBUTE_MAP) - ext_sg.Securitygroup().update_attributes_map( - ext_logging.RESOURCE_ATTRIBUTE_MAP) - - def _create_secgroup_with_policy(self, policy_id, description=None, - logging=False): - body = {'security_group': - {'name': 'sg-policy', - 'tenant_id': self._tenant_id, - 'policy': policy_id, - 'description': description if description else '', - 'logging': logging}} - return self._create_security_group_response(self.fmt, body) - - def _get_secgroup_with_policy(self): - policy_id = 'policy-5' - res = self._create_secgroup_with_policy(policy_id) - return self.deserialize(self.fmt, res) - - def test_secgroup_create_with_policy(self): - policy_id = 'policy-5' - res = self._create_secgroup_with_policy(policy_id) - sg = self.deserialize(self.fmt, res) - self.assertEqual(policy_id, sg['security_group']['policy']) - self.assertEqual('dummy', sg['security_group']['description']) - - def test_secgroup_create_with_policyand_desc(self): - policy_id = 'policy-5' - desc = 'test' - res = self._create_secgroup_with_policy(policy_id, description=desc) - sg = self.deserialize(self.fmt, res) - self.assertEqual(policy_id, sg['security_group']['policy']) - self.assertEqual(desc, sg['security_group']['description']) - - def test_secgroup_create_without_policy(self): - res = self._create_secgroup_with_policy(None) - self.assertEqual(400, res.status_int) - - def test_secgroup_create_with_illegal_policy(self): - policy_id = 'bad-policy' - with mock.patch(PLUGIN_NAME + '.get_nsx_policy', - side_effect=n_exc.ObjectNotFound(id=policy_id)): - res = self._create_secgroup_with_policy(policy_id) - self.assertEqual(400, res.status_int) - - def test_secgroup_create_with_policy_and_logging(self): - # We do not support policy & logging together - policy_id = 'policy-5' - res = self._create_secgroup_with_policy(policy_id, logging=True) - self.assertEqual(400, res.status_int) - - def test_secgroup_update_with_policy(self): - # Test that updating the policy is allowed - old_policy = 'policy-5' - new_policy = 'policy-6' - res = self._create_secgroup_with_policy(old_policy) - sg = self.deserialize(self.fmt, res) - data = {'security_group': {'policy': new_policy}} - req = self.new_update_request('security-groups', data, - sg['security_group']['id']) - updated_sg = self.deserialize(self.fmt, req.get_response(self.ext_api)) - self.assertEqual(new_policy, updated_sg['security_group']['policy']) - # Verify the same result in 'get' - req = self.new_show_request('security-groups', - sg['security_group']['id']) - shown_sg = self.deserialize(self.fmt, req.get_response(self.ext_api)) - self.assertEqual(new_policy, shown_sg['security_group']['policy']) - - def test_secgroup_update_no_policy_change(self): - # Test updating without changing the policy - old_policy = 'policy-5' - desc = 'abc' - res = self._create_secgroup_with_policy(old_policy) - sg = self.deserialize(self.fmt, res) - data = {'security_group': {'description': desc}} - req = self.new_update_request('security-groups', data, - sg['security_group']['id']) - updated_sg = self.deserialize(self.fmt, req.get_response(self.ext_api)) - self.assertEqual(old_policy, updated_sg['security_group']['policy']) - self.assertEqual(desc, updated_sg['security_group']['description']) - - def test_secgroup_update_remove_policy(self): - # removing the policy is not allowed - sg = self._get_secgroup_with_policy() - data = {'security_group': {'policy': None}} - req = self.new_update_request('security-groups', data, - sg['security_group']['id']) - res = req.get_response(self.ext_api) - self.assertEqual(400, res.status_int) - - def test_secgroup_update_add_logging(self): - # We do not support policy & logging together - sg = self._get_secgroup_with_policy() - data = {'security_group': {'logging': True}} - req = self.new_update_request('security-groups', data, - sg['security_group']['id']) - res = req.get_response(self.ext_api) - self.assertEqual(400, res.status_int) - - def test_non_admin_cannot_delete_policy_sg_and_admin_can(self): - sg = self._get_secgroup_with_policy() - sg_id = sg['security_group']['id'] - - # Try deleting the request as a normal user returns forbidden - # as a tenant is not allowed to delete this. - ctx = context.Context('', self._tenant_id) - self._delete('security-groups', sg_id, - expected_code=webob.exc.HTTPForbidden.code, - neutron_context=ctx) - # can be deleted though as admin - self._delete('security-groups', sg_id, - expected_code=webob.exc.HTTPNoContent.code) - - def test_create_rule(self): - sg = self._get_secgroup_with_policy() - rule = self._build_security_group_rule( - sg['security_group']['id'], 'ingress', - constants.PROTO_NAME_TCP, '22', '22') - res = self._create_security_group_rule(self.fmt, rule) - self.deserialize(self.fmt, res) - self.assertEqual(400, res.status_int) - - -class SecGroupPolicyExtensionTestCaseWithRules( - SecGroupPolicyExtensionTestCase): - - def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): - cfg.CONF.set_override('allow_tenant_rules_with_policy', - True, group='nsxv') - super(SecGroupPolicyExtensionTestCaseWithRules, self).setUp( - plugin=plugin, ext_mgr=ext_mgr) - - def test_secgroup_create_without_policy(self): - # in case allow_tenant_rules_with_policy is True, it is allowed to - # create a regular sg - desc = 'test' - res = self._create_secgroup_with_policy(None, description=desc) - sg = self.deserialize(self.fmt, res) - self.assertIsNone(sg['security_group']['policy']) - self.assertEqual(desc, sg['security_group']['description']) - - def test_secgroup_create_without_policy_update_policy(self): - # Create a regular security group. adding the policy later should fail - res = self._create_secgroup_with_policy(None) - sg = self.deserialize(self.fmt, res) - data = {'security_group': {'policy': 'policy-1'}} - req = self.new_update_request('security-groups', data, - sg['security_group']['id']) - res = req.get_response(self.ext_api) - self.assertEqual(400, res.status_int) - - def test_secgroup_create_without_policy_and_rule(self): - # Test that regular security groups can have rules - res = self._create_secgroup_with_policy(None) - sg = self.deserialize(self.fmt, res) - self.assertIsNone(sg['security_group']['policy']) - - rule = self._build_security_group_rule( - sg['security_group']['id'], 'ingress', - constants.PROTO_NAME_TCP, '22', '22') - res = self._create_security_group_rule(self.fmt, rule) - rule_data = self.deserialize(self.fmt, res) - self.assertEqual( - sg['security_group']['id'], - rule_data['security_group_rule']['security_group_id']) - - -class NsxPolExtensionManager(object): - - def get_resources(self): - return nsxpolicy.Nsxpolicy.get_resources() - - def get_actions(self): - return [] - - def get_request_extensions(self): - return [] - - -class TestNsxPolicies(test_plugin.NsxVPluginV2TestCase): - - def setUp(self, plugin=None): - super(TestNsxPolicies, self).setUp() - ext_mgr = NsxPolExtensionManager() - self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) - - def test_get_policy(self): - id = 'policy-1' - req = self.new_show_request('nsx-policies', id) - res = self.deserialize( - self.fmt, req.get_response(self.ext_api) - ) - policy = res['nsx_policy'] - self.assertEqual(id, policy['id']) - - def test_list_policies(self): - req = self.new_list_request('nsx-policies') - res = self.deserialize( - self.fmt, req.get_response(self.ext_api) - ) - self.assertIn('nsx_policies', res) - # the fake_vcns api returns 3 policies - self.assertEqual(3, len(res['nsx_policies'])) diff --git a/vmware_nsx/tests/unit/nsx_tvd/__init__.py b/vmware_nsx/tests/unit/nsx_tvd/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vmware_nsx/tests/unit/nsx_tvd/test_plugin.py b/vmware_nsx/tests/unit/nsx_tvd/test_plugin.py deleted file mode 100644 index 4ee5185dc8..0000000000 --- a/vmware_nsx/tests/unit/nsx_tvd/test_plugin.py +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright (c) 2017 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from oslo_config import cfg -from oslo_utils import uuidutils - -from neutron.objects import subnet as subnet_obj -from neutron_lib import context -from neutron_lib import exceptions as n_exc -from neutron_lib.plugins import directory - -from vmware_nsx.tests.unit.dvs import test_plugin as dvs_tests -from vmware_nsx.tests.unit.nsx_v import test_plugin as v_tests -from vmware_nsx.tests.unit.nsx_v3 import test_plugin as t_tests - -PLUGIN_NAME = 'vmware_nsx.plugin.NsxTVDPlugin' -_uuid = uuidutils.generate_uuid - - -class NsxTVDPluginTestCase(v_tests.NsxVPluginV2TestCase, - t_tests.NsxV3PluginTestCaseMixin, - dvs_tests.NeutronSimpleDvsTestCase): - - def setUp(self, - plugin=PLUGIN_NAME, - ext_mgr=None, - service_plugins=None): - - # set the default plugin - if self.plugin_type: - cfg.CONF.set_override('default_plugin', self.plugin_type, - group="nsx_tvd") - - # set the default availability zones - cfg.CONF.set_override('nsx_v_default_availability_zones', - ['default'], - group="nsx_tvd") - cfg.CONF.set_override('nsx_v3_default_availability_zones', - ['defaultv3'], - group="nsx_tvd") - - super(NsxTVDPluginTestCase, self).setUp( - plugin=plugin, - ext_mgr=ext_mgr, - with_md_proxy=False) - self._project_id = _uuid() - self.core_plugin = directory.get_plugin() - - # create a context with this tenant - self.context = context.get_admin_context() - self.context.tenant_id = self.project_id - - # create a default user for this plugin - self.core_plugin.create_project_plugin_map(self.context, - {'project_plugin_map': {'plugin': self.plugin_type, - 'project': self.project_id}}) - self.sub_plugin = self.core_plugin.get_plugin_by_type(self.plugin_type) - - @property - def project_id(self): - return self._project_id - - @property - def plugin_type(self): - pass - - def _test_plugin_initialized(self): - self.assertTrue(self.core_plugin.is_tvd_plugin()) - self.assertIsNotNone(self.sub_plugin) - - def _test_call_create(self, obj_name, calls_count=1, project_id=None, - is_bulk=False): - method_name = single_name = 'create_%s' % obj_name - if is_bulk: - method_name = method_name + '_bulk' - func_to_call = getattr(self.core_plugin, method_name) - if not project_id: - project_id = self.project_id - with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ - mock.patch.object(self.sub_plugin, single_name) as single_func: - if is_bulk: - func_to_call(self.context, - {obj_name + 's': [{obj_name: - {'tenant_id': project_id}}]}) - else: - func_to_call(self.context, - {obj_name: {'tenant_id': project_id}}) - self.assertEqual(calls_count, - sub_func.call_count or single_func.call_count) - - def _test_call_create_with_net_id(self, obj_name, field_name='network_id', - calls_count=1, is_bulk=False): - method_name = 'create_%s' % obj_name - if is_bulk: - method_name = method_name + '_bulk' - func_to_call = getattr(self.core_plugin, method_name) - net_id = _uuid() - - with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ - mock.patch.object(self.core_plugin, '_get_network', - return_value={'tenant_id': self.project_id}): - if is_bulk: - func_to_call(self.context, - {obj_name + 's': [{obj_name: - {'tenant_id': self.project_id, - field_name: net_id}}]}) - else: - func_to_call(self.context, - {obj_name: {'tenant_id': self.project_id, - field_name: net_id}}) - self.assertEqual(calls_count, sub_func.call_count) - - def _test_call_delete(self, obj_name): - method_name = 'delete_%s' % obj_name - func_to_call = getattr(self.core_plugin, method_name) - obj_id = _uuid() - with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ - mock.patch.object(self.core_plugin, '_get_%s' % obj_name, - return_value={'tenant_id': self.project_id}): - func_to_call(self.context, obj_id) - sub_func.assert_called_once() - - def _test_call_delete_with_net(self, obj_name, field_name='network_id'): - method_name = 'delete_%s' % obj_name - func_to_call = getattr(self.core_plugin, method_name) - obj_id = _uuid() - net_id = _uuid() - if obj_name == 'subnet': - mock_name = '_get_subnet_object' - ret_val = subnet_obj.Subnet(network_id=net_id) - else: - mock_name = '_get_%s' % obj_name - ret_val = {field_name: net_id} - with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ - mock.patch.object(self.core_plugin, mock_name, - return_value=ret_val),\ - mock.patch.object(self.core_plugin, '_get_network', - return_value={'tenant_id': self.project_id}): - func_to_call(self.context, obj_id) - sub_func.assert_called_once() - - def _test_call_update(self, obj_name): - method_name = 'update_%s' % obj_name - func_to_call = getattr(self.core_plugin, method_name) - obj_id = _uuid() - with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ - mock.patch.object(self.core_plugin, '_get_%s' % obj_name, - return_value={'tenant_id': self.project_id}): - func_to_call(self.context, obj_id, {obj_name: {}}) - sub_func.assert_called_once() - - def _test_call_update_with_net(self, obj_name, field_name='network_id'): - method_name = 'update_%s' % obj_name - func_to_call = getattr(self.core_plugin, method_name) - obj_id = _uuid() - net_id = _uuid() - if obj_name == 'subnet': - mock_name = '_get_subnet_object' - ret_val = subnet_obj.Subnet(network_id=net_id) - else: - mock_name = '_get_%s' % obj_name - ret_val = {field_name: net_id} - with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ - mock.patch.object(self.core_plugin, mock_name, - return_value=ret_val),\ - mock.patch.object(self.core_plugin, '_get_network', - return_value={'tenant_id': self.project_id}): - func_to_call(self.context, obj_id, {obj_name: {}}) - sub_func.assert_called_once() - - def _test_call_get(self, obj_name): - method_name = 'get_%s' % obj_name - func_to_call = getattr(self.core_plugin, method_name) - obj_id = _uuid() - with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ - mock.patch.object(self.core_plugin, '_get_%s' % obj_name, - return_value={'tenant_id': self.project_id}): - func_to_call(self.context, obj_id) - sub_func.assert_called_once() - - def _test_call_get_with_net(self, obj_name, field_name='network_id'): - method_name = 'get_%s' % obj_name - func_to_call = getattr(self.core_plugin, method_name) - obj_id = _uuid() - net_id = _uuid() - if obj_name == 'subnet': - mock_name = '_get_subnet_object' - ret_val = subnet_obj.Subnet(network_id=net_id) - else: - mock_name = '_get_%s' % obj_name - ret_val = {field_name: net_id} - with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ - mock.patch.object(self.core_plugin, mock_name, - return_value=ret_val),\ - mock.patch.object(self.core_plugin, '_get_network', - return_value={'tenant_id': self.project_id}): - func_to_call(self.context, obj_id) - sub_func.assert_called_once() - - -class TestPluginWithDefaultPlugin(NsxTVDPluginTestCase): - """Test TVD plugin with the NSX-T (default) sub plugin""" - - @property - def plugin_type(self): - return 'nsx-t' - - def test_plugin_initialized(self): - self._test_plugin_initialized() - - # no unsupported extensions for the nsx_t plugin - self.assertCountEqual( - ['router_type', 'router_size'], - self.core_plugin._unsupported_fields[self.plugin_type]['router']) - self.assertEqual( - [], - self.core_plugin._unsupported_fields[self.plugin_type]['port']) - - def test_create_network(self): - self._test_call_create('network') - - def test_create_subnet(self): - self._test_call_create_with_net_id('subnet') - - def test_create_port(self): - self._test_call_create_with_net_id('port') - - def test_create_router(self): - self._test_call_create('router') - - def test_create_floatingip(self): - self._test_call_create_with_net_id( - 'floatingip', field_name='floating_network_id') - - def test_create_security_group(self): - # plugin will be called twice because of the default sg - self._test_call_create('security_group', calls_count=2) - - def test_create_security_group_rule(self): - self._test_call_create('security_group_rule') - - def test_create_network_bulk(self): - self._test_call_create('network', is_bulk=True) - - def test_create_subnet_bulk(self): - self._test_call_create_with_net_id('subnet', is_bulk=True) - - def test_create_security_group_rule_bulk(self): - self._test_call_create('security_group_rule', is_bulk=True) - - def test_delete_network(self): - self._test_call_delete('network') - - def test_delete_subnet(self): - self._test_call_delete_with_net('subnet') - - def test_delete_port(self): - self._test_call_delete_with_net('port') - - def test_delete_router(self): - self._test_call_delete('router') - - def test_delete_floatingip(self): - self._test_call_delete_with_net( - 'floatingip', field_name='floating_network_id') - - def test_delete_security_group(self): - self._test_call_delete('security_group') - - def test_update_network(self): - self._test_call_update('network') - - def test_update_subnet(self): - self._test_call_update_with_net('subnet') - - def test_update_port(self): - self._test_call_update_with_net('port') - - def test_update_router(self): - self._test_call_update('router') - - def test_update_floatingip(self): - self._test_call_update_with_net( - 'floatingip', field_name='floating_network_id') - - def test_update_security_group(self): - self._test_call_update('security_group') - - def test_unsupported_extensions(self): - self.assertRaises(n_exc.InvalidInput, - self.core_plugin.create_router, - self.context, - {'router': {'tenant_id': self.project_id, - 'router_type': 'exclusive'}}) - - def test_get_network(self): - self._test_call_get('network') - - def test_get_subnet(self): - self._test_call_get_with_net('subnet') - - def test_get_port(self): - self._test_call_get_with_net('port') - - def test_get_router(self): - self._test_call_get('router') - - def test_get_floatingip(self): - self._test_call_get_with_net( - 'floatingip', field_name='floating_network_id') - - def test_get_security_group(self): - self._test_call_get('security_group') - - def test_add_router_interface(self): - rtr_id = _uuid() - port_id = _uuid() - net_id = _uuid() - with mock.patch.object(self.sub_plugin, - 'add_router_interface') as sub_func,\ - mock.patch.object(self.core_plugin, '_get_router', - return_value={'tenant_id': self.project_id}),\ - mock.patch.object(self.core_plugin, '_get_port', - return_value={'network_id': net_id}),\ - mock.patch.object(self.core_plugin, '_get_network', - return_value={'tenant_id': self.project_id}),\ - mock.patch.object(self.core_plugin, '_validate_interface_info', - return_value=(True, False)): - self.core_plugin.add_router_interface(self.context, rtr_id, - {'port_id': port_id}) - sub_func.assert_called_once() - - def test_add_invalid_router_interface(self): - # Test that the plugin prevents adding interface from one plugin - # to a router of another plugin - rtr_id = _uuid() - port_id = _uuid() - net_id = _uuid() - another_tenant_id = _uuid() - another_plugin = 'nsx-v' if self.plugin_type == 'nsx-t' else 'nsx-t' - self.core_plugin.create_project_plugin_map(self.context, - {'project_plugin_map': {'plugin': another_plugin, - 'project': another_tenant_id}}) - - with mock.patch.object(self.core_plugin, '_get_router', - return_value={'tenant_id': self.project_id}),\ - mock.patch.object(self.core_plugin, '_get_port', - return_value={'network_id': net_id}),\ - mock.patch.object(self.core_plugin, '_get_network', - return_value={'tenant_id': another_tenant_id}),\ - mock.patch.object(self.core_plugin, '_validate_interface_info', - return_value=(True, False)): - self.assertRaises(n_exc.InvalidInput, - self.core_plugin.add_router_interface, - self.context, rtr_id, {'port_id': port_id}) - - def test_remove_router_interface(self): - rtr_id = _uuid() - with mock.patch.object(self.sub_plugin, - 'remove_router_interface') as sub_func,\ - mock.patch.object(self.core_plugin, '_get_router', - return_value={'tenant_id': self.project_id}): - self.core_plugin.remove_router_interface(self.context, rtr_id, {}) - sub_func.assert_called_once() - - def test_disassociate_floatingips(self): - port_id = _uuid() - net_id = _uuid() - with mock.patch.object(self.sub_plugin, - 'disassociate_floatingips') as sub_func,\ - mock.patch.object(self.core_plugin, '_get_port', - return_value={'network_id': net_id}),\ - mock.patch.object(self.core_plugin, '_get_network', - return_value={'tenant_id': self.project_id}): - self.core_plugin.disassociate_floatingips(self.context, port_id) - sub_func.assert_called_once() - - def test_new_user(self): - project_id = _uuid() - self._test_call_create('network', project_id=project_id) - - -class TestPluginWithNsxv(TestPluginWithDefaultPlugin): - """Test TVD plugin with the NSX-V sub plugin""" - - @property - def plugin_type(self): - return 'nsx-v' - - def test_plugin_initialized(self): - self._test_plugin_initialized() - - # no unsupported extensions for the nsx_v plugin - self.assertEqual( - [], - self.core_plugin._unsupported_fields[self.plugin_type]['router']) - self.assertEqual( - [], - self.core_plugin._unsupported_fields[self.plugin_type]['port']) - - def test_unsupported_extensions(self): - self.skipTest('No unsupported extensions in this plugin') - - -class TestPluginWithDvs(TestPluginWithDefaultPlugin): - """Test TVD plugin with the DVS sub plugin""" - - @property - def plugin_type(self): - return 'dvs' - - def test_plugin_initialized(self): - self._test_plugin_initialized() - - # no unsupported extensions for the dvs plugin - self.assertCountEqual( - ['mac_learning_enabled', 'provider_security_groups'], - self.core_plugin._unsupported_fields[self.plugin_type]['port']) - - def test_unsupported_extensions(self): - net_id = _uuid() - with mock.patch.object(self.core_plugin, '_get_network', - return_value={'tenant_id': self.project_id}): - self.assertRaises(n_exc.InvalidInput, - self.core_plugin.create_port, - self.context, - {'port': {'tenant_id': self.project_id, - 'network_id': net_id, - 'mac_learning_enabled': True}}) diff --git a/vmware_nsx/tests/unit/nsx_v/__init__.py b/vmware_nsx/tests/unit/nsx_v/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vmware_nsx/tests/unit/nsx_v/housekeeper/__init__.py b/vmware_nsx/tests/unit/nsx_v/housekeeper/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_backup_edge.py b/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_backup_edge.py deleted file mode 100644 index 585a80ccaf..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_backup_edge.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2017 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from neutron.tests import base -from neutron_lib.plugins import constants - -from vmware_nsx.plugins.common.housekeeper import base_job -from vmware_nsx.plugins.nsx_v.housekeeper import error_backup_edge - -FAKE_ROUTER_BINDINGS = [ - { - 'router_id': 'backup-3b0b1fe1-c984', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-782', - 'edge_type': 'service', 'appliance_size': 'compact'}] - - -class ErrorBackupEdgeTestCaseReadOnly(base.BaseTestCase): - - def setUp(self): - def get_plugin_mock(alias=constants.CORE): - if alias in (constants.CORE, constants.L3): - return self.plugin - - super(ErrorBackupEdgeTestCaseReadOnly, self).setUp() - self.plugin = mock.Mock() - self.context = mock.Mock() - self.context.session = mock.Mock() - mock.patch('neutron_lib.plugins.directory.get_plugin', - side_effect=get_plugin_mock).start() - self.log = mock.Mock() - base_job.LOG = self.log - self.job = error_backup_edge.ErrorBackupEdgeJob(True, []) - - def run_job(self): - self.job.run(self.context, readonly=True) - - def test_clean_run(self): - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', - return_value=[]).start() - self.run_job() - self.log.warning.assert_not_called() - - def test_broken_backup_edge(self): - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', - return_value=FAKE_ROUTER_BINDINGS).start() - - self.run_job() - self.log.warning.assert_called_once() - - -class ErrorBackupEdgeTestCaseReadWrite(ErrorBackupEdgeTestCaseReadOnly): - def run_job(self): - self.job.run(self.context, readonly=False) - - def test_broken_backup_edge(self): - upd_binding = mock.patch( - 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() - upd_edge = mock.patch.object(self.plugin.nsx_v, 'update_edge').start() - self.job.azs = mock.Mock() - az = mock.Mock() - mock.patch.object(self.job.azs, 'get_availability_zone', - return_value=az).start() - super(ErrorBackupEdgeTestCaseReadWrite, self - ).test_broken_backup_edge() - upd_binding.assert_has_calls( - [mock.call(mock.ANY, r['router_id'], status='ACTIVE') - for r in FAKE_ROUTER_BINDINGS]) - upd_edge.assert_called_with( - self.context, 'backup-3b0b1fe1-c984', 'edge-782', - 'backup-3b0b1fe1-c984', None, appliance_size='compact', - availability_zone=az, dist=False) diff --git a/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_dhcp_edge.py b/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_dhcp_edge.py deleted file mode 100644 index f43396bdb9..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_dhcp_edge.py +++ /dev/null @@ -1,489 +0,0 @@ -# Copyright 2017 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import datetime -from unittest import mock - -from neutron.tests import base -from neutron_lib.plugins import constants - -from vmware_nsx.plugins.common.housekeeper import base_job -from vmware_nsx.plugins.nsx_v.housekeeper import error_dhcp_edge - -FAKE_ROUTER_BINDINGS = [ - { - 'router_id': 'dhcp-16c224dd-7c2b-4241-a447-4fc07a3', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'}, - { - 'router_id': 'dhcp-31341032-6911-4596-8b64-afce92f', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'}, - { - 'router_id': 'dhcp-51c97abb-8ac9-4f24-b914-cc30cf8', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'}, - { - 'router_id': 'dhcp-5d01cea4-58f8-4a16-9be0-11012ca', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'}, - { - 'router_id': 'dhcp-65a5335c-4c72-4721-920e-5abdc9e', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'}, - { - 'router_id': 'dhcp-83bce421-b72c-4744-9285-a0fcc25', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'}, - { - 'router_id': 'dhcp-9d2f5b66-c252-4681-86af-9460484', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'}, - { - 'router_id': 'dhcp-aea44408-0448-42dd-9ae6-ed940da', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'}] - -BAD_ROUTER_BINDING = { - 'router_id': 'dhcp-11111111-1111-1111-aaaa-aaaaaaa', 'status': 'ERROR', - 'availability_zone': 'default', 'edge_id': 'edge-752'} - -FAKE_EDGE_VNIC_BINDS = [ - { - 'network_id': '7c0b6fb5-d86c-4e5e-a2af-9ce36971764b', - 'vnic_index': 1, 'edge_id': 'edge-752', 'tunnel_index': 1}, - { - 'network_id': '16c224dd-7c2b-4241-a447-4fc07a38dc80', - 'vnic_index': 2, 'edge_id': 'edge-752', 'tunnel_index': 4}, - { - 'network_id': '65a5335c-4c72-4721-920e-5abdc9e09ba4', - 'vnic_index': 2, 'edge_id': 'edge-752', 'tunnel_index': 6}, - { - 'network_id': 'aea44408-0448-42dd-9ae6-ed940dac564a', - 'vnic_index': 4, 'edge_id': 'edge-752', 'tunnel_index': 10}, - { - 'network_id': '5d01cea4-58f8-4a16-9be0-11012cadbf55', - 'vnic_index': 4, 'edge_id': 'edge-752', 'tunnel_index': 12}, - { - 'network_id': '51c97abb-8ac9-4f24-b914-cc30cf8e856a', - 'vnic_index': 6, 'edge_id': 'edge-752', 'tunnel_index': 16}, - { - 'network_id': '31341032-6911-4596-8b64-afce92f46bf4', - 'vnic_index': 6, 'edge_id': 'edge-752', 'tunnel_index': 18}, - { - 'network_id': '9d2f5b66-c252-4681-86af-946048414a1f', - 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 22}, - { - 'network_id': '83bce421-b72c-4744-9285-a0fcc25b001a', - 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 24}] - -BAD_VNIC_BINDING = { - 'network_id': '11111111-1111-1111-aaaa-aaaaaaabbaac', - 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 21} - -FAKE_INTERNAL_NETWORKS = [ - {'availability_zone': u'default', - 'network_id': u'7c0b6fb5-d86c-4e5e-a2af-9ce36971764b', - 'network_purpose': 'inter_edge_net', 'updated_at': None, - '_rev_bumped': False, - 'created_at': datetime.datetime(2017, 12, 13, 12, 28, 18)}] - -FAKE_NETWORK_RESULTS = [{'id': 'e3a02b46-b9c9-4f2f-bcea-7978355a7dca'}, - {'id': '031eaf4b-49b8-4003-9369-8a0dd5d7a163'}, - {'id': '16c224dd-7c2b-4241-a447-4fc07a38dc80'}, - {'id': '1a3b570c-c8b5-411e-8e13-d4dc0b3e56b2'}, - {'id': '24b31d2c-fcec-45e5-bdcb-aa089d3713ae'}, - {'id': '31341032-6911-4596-8b64-afce92f46bf4'}, - {'id': '51c97abb-8ac9-4f24-b914-cc30cf8e856a'}, - {'id': '5484b39b-ec6e-43f4-b900-fc1b2c49c71a'}, - {'id': '54eae237-3516-4f82-b46f-f955e91c989c'}, - {'id': '5a859fa0-bea0-41be-843a-9f9bf39e2509'}, - {'id': '5d01cea4-58f8-4a16-9be0-11012cadbf55'}, - {'id': '65a5335c-4c72-4721-920e-5abdc9e09ba4'}, - {'id': '708f11d4-00d0-48ea-836f-01273cbf36cc'}, - {'id': '7c0b6fb5-d86c-4e5e-a2af-9ce36971764b'}, - {'id': '83bce421-b72c-4744-9285-a0fcc25b001a'}, - {'id': '9d2f5b66-c252-4681-86af-946048414a1f'}, - {'id': 'aea44408-0448-42dd-9ae6-ed940dac564a'}, - {'id': 'b0cee4e3-266b-48d3-a651-04f1985fe4b0'}, - {'id': 'be82b8c5-96a9-4e08-a965-bb09d48ec161'}, - {'id': 'e69279c6-9a1e-4f7b-b421-b8b3eb92c54b'}] - -BACKEND_EDGE_VNICS = {'vnics': [ - {'label': 'vNic_0', 'name': 'external', - 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'uplink', - 'isConnected': True, 'index': 0, 'portgroupId': 'network-13', - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_1', 'name': 'internal1', 'addressGroups': { - 'addressGroups': [ - {'primaryAddress': '169.254.128.14', - 'secondaryAddresses': { - 'type': 'secondary_addresses', - 'ipAddress': ['169.254.169.254']}, - 'subnetMask': '255.255.128.0', - 'subnetPrefixLength': '17'}]}, 'mtu': 1500, - 'type': 'internal', 'isConnected': True, 'index': 1, - 'portgroupId': 'virtualwire-472', - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_2', 'name': 'internal2', - 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', - 'subInterfaces': {'subInterfaces': [ - {'isConnected': True, 'label': 'vNic_10', - 'name': '1639ff40-8137-4803-a29f-dcf0efc35b34', 'index': 10, - 'tunnelId': 4, 'logicalSwitchId': 'virtualwire-497', - 'logicalSwitchName': '16c224dd-7c2b-4241-a447-4fc07a38dc80', - 'enableSendRedirects': True, 'mtu': 1500, - 'addressGroups': {'addressGroups': [{ - 'primaryAddress': '10.24.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5025, - 'subInterfaceBackingType': 'NETWORK'}, - {'isConnected': True, 'label': 'vNic_12', - 'name': 'd1515746-a21a-442d-8347-62b36f5791d6', 'index': 12, - 'tunnelId': 6, 'logicalSwitchId': 'virtualwire-499', - 'logicalSwitchName': '65a5335c-4c72-4721-920e-5abdc9e09ba4', - 'enableSendRedirects': True, 'mtu': 1500, - 'addressGroups': {'addressGroups': [ - {'primaryAddress': '10.26.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5027, - 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, - 'index': 2, 'portgroupId': 'dvportgroup-1550', - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_3', 'name': 'vnic3', - 'addressGroups': {'addressGroups': []}, - 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 3, - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_4', 'name': 'internal4', - 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', - 'subInterfaces': {'subInterfaces': [ - {'isConnected': True, 'label': 'vNic_16', - 'name': 'e2405dc6-21d7-4421-a70c-3eecf675b286', 'index': 16, - 'tunnelId': 10, 'logicalSwitchId': 'virtualwire-503', - 'logicalSwitchName': 'aea44408-0448-42dd-9ae6-ed940dac564a', - 'enableSendRedirects': True, 'mtu': 1500, - 'addressGroups': {'addressGroups': [ - {'primaryAddress': '10.30.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5031, - 'subInterfaceBackingType': 'NETWORK'}, - {'isConnected': True, 'label': 'vNic_18', - 'name': 'a10fb348-30e4-477f-817f-bb3c9c9fd3f5', 'index': 18, - 'tunnelId': 12, 'logicalSwitchId': 'virtualwire-505', - 'logicalSwitchName': '5d01cea4-58f8-4a16-9be0-11012cadbf55', - 'enableSendRedirects': True, 'mtu': 1500, - 'addressGroups': {'addressGroups': [ - {'primaryAddress': '10.32.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5033, - 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, - 'index': 4, 'portgroupId': 'dvportgroup-1559', - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_5', 'name': 'vnic5', - 'addressGroups': {'addressGroups': []}, - 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 5, - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_6', 'name': 'internal6', - 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', - 'subInterfaces': {'subInterfaces': [ - {'isConnected': True, 'label': 'vNic_22', - 'name': '2da534c8-3d9b-4677-aa14-2e66efd09e3f', 'index': 22, - 'tunnelId': 16, 'logicalSwitchId': 'virtualwire-509', - 'logicalSwitchName': '51c97abb-8ac9-4f24-b914-cc30cf8e856a', - 'enableSendRedirects': True, 'mtu': 1500, - 'addressGroups': {'addressGroups': [ - {'primaryAddress': '10.36.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5037, - 'subInterfaceBackingType': 'NETWORK'}, - {'isConnected': True, 'label': 'vNic_24', - 'name': 'd25f00c2-eb82-455c-87b9-d2d510d42917', 'index': 24, - 'tunnelId': 18, 'logicalSwitchId': 'virtualwire-511', - 'logicalSwitchName': '31341032-6911-4596-8b64-afce92f46bf4', - 'enableSendRedirects': True, 'mtu': 1500, - 'addressGroups': {'addressGroups': [ - {'primaryAddress': '10.38.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5039, - 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, - 'index': 6, 'portgroupId': 'dvportgroup-1567', - - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_7', 'name': 'vnic7', - 'addressGroups': {'addressGroups': []}, - 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 7, - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_8', 'name': 'internal8', - 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', - 'subInterfaces': {'subInterfaces': [ - {'isConnected': True, 'label': 'vNic_28', - 'name': 'cf4cc867-e958-4f86-acea-d8a52a4c26c8', 'index': 28, - 'tunnelId': 22, 'logicalSwitchId': 'virtualwire-515', - 'logicalSwitchName': '9d2f5b66-c252-4681-86af-946048414a1f', - 'enableSendRedirects': True, 'mtu': 1500, - 'addressGroups': {'addressGroups': [ - {'primaryAddress': '10.42.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5043, - 'subInterfaceBackingType': 'NETWORK'}, - {'isConnected': True, 'label': 'vNic_30', - 'name': 'ceab3d83-3ee2-4372-b5d7-f1d47be76e9d', 'index': 30, - 'tunnelId': 24, 'logicalSwitchId': 'virtualwire-517', - 'logicalSwitchName': '83bce421-b72c-4744-9285-a0fcc25b001a', - 'enableSendRedirects': True, 'mtu': 1500, - 'addressGroups': {'addressGroups': [ - {'primaryAddress': '10.44.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5045, - 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, - 'index': 8, 'portgroupId': 'dvportgroup-1575', - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}, - {'label': 'vNic_9', 'name': 'vnic9', - 'addressGroups': {'addressGroups': []}, - 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 9, - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True}]} - -BAD_SUBINTERFACE = { - 'isConnected': True, 'label': 'vNic_31', - 'name': '11111111-2222-3333-4444-555555555555', 'index': 31, - 'tunnelId': 25, 'logicalSwitchId': 'virtualwire-518', - 'logicalSwitchName': '55555555-4444-3333-2222-111111111111', - 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': { - 'addressGroups': [ - {'primaryAddress': '10.99.0.2', 'subnetMask': '255.255.255.0', - 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5045, - 'subInterfaceBackingType': 'NETWORK'} - -BAD_INTERFACE = { - 'label': 'vNic_8', 'name': 'vnic8', - 'addressGroups': {'addressGroups': []}, - 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 8, - 'fenceParameters': [], 'enableProxyArp': False, - 'enableSendRedirects': True} - - -class ErrorDhcpEdgeTestCaseReadOnly(base.BaseTestCase): - - def setUp(self): - def get_plugin_mock(alias=constants.CORE): - if alias in (constants.CORE, constants.L3): - return self.plugin - - super(ErrorDhcpEdgeTestCaseReadOnly, self).setUp() - self.plugin = mock.Mock() - self.context = mock.Mock() - self.context.session = mock.Mock() - mock.patch('neutron_lib.plugins.directory.get_plugin', - side_effect=get_plugin_mock).start() - self.plugin.edge_manager = mock.Mock() - self.plugin.nsx_v = mock.Mock() - self.plugin.nsx_v.vcns = mock.Mock() - mock.patch.object(self.plugin, 'get_availability_zone_name_by_edge', - return_value='default').start() - self.log = mock.Mock() - base_job.LOG = self.log - self.job = error_dhcp_edge.ErrorDhcpEdgeJob(True, []) - - def run_job(self): - self.job.run(self.context, readonly=True) - - def test_clean_run(self): - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', - return_value=[]).start() - self.run_job() - self.log.warning.assert_not_called() - - def test_invalid_router_binding(self): - router_binds = copy.deepcopy(FAKE_ROUTER_BINDINGS) - router_binds.append(BAD_ROUTER_BINDING) - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', - return_value=router_binds).start() - mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', - return_value=FAKE_EDGE_VNIC_BINDS).start() - mock.patch.object(self.plugin, 'get_networks', - return_value=FAKE_NETWORK_RESULTS).start() - mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', - return_value=(None, BACKEND_EDGE_VNICS)).start() - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', - return_value=FAKE_INTERNAL_NETWORKS).start() - self.run_job() - self.log.warning.assert_called_once() - - def test_invalid_edge_vnic_bindings(self): - def fake_vnic_bind(*args, **kwargs): - # The DB content is manipulated by the housekeeper. Therefore - # get_edge_vnic_bindings_by_edge() output should be altered - if fake_vnic_bind.ctr < 2: - ret = fake_vnic_bind.vnic_binds - else: - ret = FAKE_EDGE_VNIC_BINDS - fake_vnic_bind.ctr += 1 - return ret - - fake_vnic_bind.ctr = 0 - fake_vnic_bind.vnic_binds = copy.deepcopy(FAKE_EDGE_VNIC_BINDS) - fake_vnic_bind.vnic_binds.append(BAD_VNIC_BINDING) - - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', - return_value=FAKE_ROUTER_BINDINGS).start() - mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', - side_effect=fake_vnic_bind).start() - mock.patch.object(self.plugin, 'get_networks', - return_value=FAKE_NETWORK_RESULTS).start() - mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', - return_value=(None, BACKEND_EDGE_VNICS)).start() - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', - return_value=FAKE_INTERNAL_NETWORKS).start() - self.run_job() - self.log.warning.assert_called_once() - - def test_invalid_edge_sub_if(self): - backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) - backend_vnics['vnics'][8]['subInterfaces']['subInterfaces'].append( - BAD_SUBINTERFACE) - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', - return_value=FAKE_ROUTER_BINDINGS).start() - mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', - return_value=FAKE_EDGE_VNIC_BINDS).start() - mock.patch.object(self.plugin, 'get_networks', - return_value=FAKE_NETWORK_RESULTS).start() - mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', - return_value=(None, backend_vnics)).start() - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', - return_value=FAKE_INTERNAL_NETWORKS).start() - self.run_job() - self.log.warning.assert_called_once() - - def test_missing_edge_sub_if(self): - backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) - del backend_vnics['vnics'][8]['subInterfaces']['subInterfaces'][1] - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', - return_value=FAKE_ROUTER_BINDINGS).start() - mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', - return_value=FAKE_EDGE_VNIC_BINDS).start() - mock.patch.object(self.plugin, 'get_networks', - return_value=FAKE_NETWORK_RESULTS).start() - mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', - return_value=(None, backend_vnics)).start() - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', - return_value=FAKE_INTERNAL_NETWORKS).start() - self.run_job() - self.log.warning.assert_called_once() - - def test_missing_edge_interface(self): - backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) - backend_vnics['vnics'][8] = BAD_INTERFACE - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', - return_value=FAKE_ROUTER_BINDINGS).start() - mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', - return_value=FAKE_EDGE_VNIC_BINDS).start() - mock.patch.object(self.plugin, 'get_networks', - return_value=FAKE_NETWORK_RESULTS).start() - mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', - return_value=(None, backend_vnics)).start() - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', - return_value=FAKE_INTERNAL_NETWORKS).start() - self.run_job() - self.assertEqual(2, self.log.warning.call_count) - - -class ErrorDhcpEdgeTestCaseReadWrite(ErrorDhcpEdgeTestCaseReadOnly): - - def run_job(self): - self.job.run(self.context, readonly=False) - - def test_invalid_router_binding(self): - del_binding = mock.patch( - 'vmware_nsx.db.nsxv_db.delete_nsxv_router_binding').start() - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', - return_value=FAKE_ROUTER_BINDINGS).start() - upd_binding = mock.patch( - 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() - super(ErrorDhcpEdgeTestCaseReadWrite, self - ).test_invalid_router_binding() - del_binding.assert_called_with(mock.ANY, - BAD_ROUTER_BINDING['router_id']) - upd_binding.assert_has_calls( - [mock.call(mock.ANY, r['router_id'], status='ACTIVE') - for r in FAKE_ROUTER_BINDINGS]) - - def test_invalid_edge_vnic_bindings(self): - del_binding = mock.patch( - 'vmware_nsx.db.nsxv_db.free_edge_vnic_by_network').start() - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', - return_value=FAKE_ROUTER_BINDINGS).start() - upd_binding = mock.patch( - 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() - super(ErrorDhcpEdgeTestCaseReadWrite, self - ).test_invalid_edge_vnic_bindings() - del_binding.assert_called_with(mock.ANY, BAD_VNIC_BINDING['edge_id'], - BAD_VNIC_BINDING['network_id']) - upd_binding.assert_has_calls( - [mock.call(mock.ANY, r['router_id'], status='ACTIVE') - for r in FAKE_ROUTER_BINDINGS]) - - def test_invalid_edge_sub_if(self): - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', - return_value=FAKE_ROUTER_BINDINGS).start() - upd_binding = mock.patch( - 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() - upd_if = mock.patch.object(self.plugin.nsx_v.vcns, - 'update_interface').start() - super(ErrorDhcpEdgeTestCaseReadWrite, self - ).test_invalid_edge_sub_if() - upd_binding.assert_has_calls( - [mock.call(mock.ANY, r['router_id'], status='ACTIVE') - for r in FAKE_ROUTER_BINDINGS]) - upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) - - def test_missing_edge_sub_if(self): - deleted_sub_if = BACKEND_EDGE_VNICS['vnics'][8]['subInterfaces'][ - 'subInterfaces'][1] - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', - return_value=FAKE_ROUTER_BINDINGS).start() - mock.patch.object( - self.plugin.edge_manager, '_create_sub_interface', - return_value=('dvportgroup-1575', deleted_sub_if)).start() - upd_binding = mock.patch( - 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() - upd_if = mock.patch.object(self.plugin.nsx_v.vcns, - 'update_interface').start() - super(ErrorDhcpEdgeTestCaseReadWrite, self - ).test_missing_edge_sub_if() - upd_binding.assert_has_calls( - [mock.call(mock.ANY, r['router_id'], status='ACTIVE') - for r in FAKE_ROUTER_BINDINGS]) - upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) - - def test_missing_edge_interface(self): - def fake_create_subif(*args, **kwargs): - deleted_sub_if = BACKEND_EDGE_VNICS['vnics'][8]['subInterfaces'][ - 'subInterfaces'][fake_create_subif.ctr] - fake_create_subif.ctr += 1 - return (BACKEND_EDGE_VNICS['vnics'][8]['portgroupId'], - deleted_sub_if) - - fake_create_subif.ctr = 0 - - mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', - return_value=FAKE_ROUTER_BINDINGS).start() - mock.patch.object( - self.plugin.edge_manager, '_create_sub_interface', - side_effect=fake_create_subif).start() - upd_binding = mock.patch( - 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() - upd_if = mock.patch.object(self.plugin.nsx_v.vcns, - 'update_interface').start() - super(ErrorDhcpEdgeTestCaseReadWrite, self - ).test_missing_edge_interface() - upd_binding.assert_has_calls( - [mock.call(mock.ANY, r['router_id'], status='ACTIVE') - for r in FAKE_ROUTER_BINDINGS]) - upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) diff --git a/vmware_nsx/tests/unit/nsx_v/test_availability_zones.py b/vmware_nsx/tests/unit/nsx_v/test_availability_zones.py deleted file mode 100644 index eda868c54b..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/test_availability_zones.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright 2016 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from neutron.tests import base - -from vmware_nsx.common import config -from vmware_nsx.common import exceptions as nsx_exc -from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az - - -DEF_AZ_POOL = ['service:compact:1:2', 'vdr:compact:1:2'] -DEF_GLOBAL_POOL = ['service:compact:4:10', 'vdr:compact:4:10'] - - -class NsxvAvailabilityZonesTestCase(base.BaseTestCase): - - def setUp(self): - super(NsxvAvailabilityZonesTestCase, self).setUp() - self.az_name = 'zone1' - self.group_name = 'az:%s' % self.az_name - config.register_nsxv_azs(cfg.CONF, [self.az_name]) - cfg.CONF.set_override("ha_placement_random", True, group="nsxv") - cfg.CONF.set_override("mgt_net_proxy_ips", ["2.2.2.2"], group="nsxv") - cfg.CONF.set_override("dvs_id", "dvs-1", group="nsxv") - - def _config_az(self, - resource_pool_id="respool", - datastore_id="datastore", - edge_ha=True, - ha_datastore_id="hastore", - backup_edge_pool=DEF_AZ_POOL, - ha_placement_random=False, - datacenter_moid="datacenter", - mgt_net_moid="portgroup-407", - mgt_net_proxy_ips=["1.1.1.1"], - mgt_net_proxy_netmask="255.255.255.0", - mgt_net_default_gateway="2.2.2.2", - external_network="network-17", - vdn_scope_id="vdnscope-1", - dvs_id="dvs-2"): - cfg.CONF.set_override("resource_pool_id", resource_pool_id, - group=self.group_name) - cfg.CONF.set_override("datastore_id", datastore_id, - group=self.group_name) - if edge_ha is not None: - cfg.CONF.set_override("edge_ha", edge_ha, - group=self.group_name) - cfg.CONF.set_override("ha_datastore_id", ha_datastore_id, - group=self.group_name) - if ha_placement_random is not None: - cfg.CONF.set_override("ha_placement_random", - ha_placement_random, - group=self.group_name) - if datacenter_moid is not None: - cfg.CONF.set_override("datacenter_moid", - datacenter_moid, - group=self.group_name) - if backup_edge_pool is not None: - cfg.CONF.set_override("backup_edge_pool", backup_edge_pool, - group=self.group_name) - if mgt_net_moid is not None: - cfg.CONF.set_override("mgt_net_moid", mgt_net_moid, - group=self.group_name) - if mgt_net_proxy_ips is not None: - cfg.CONF.set_override("mgt_net_proxy_ips", mgt_net_proxy_ips, - group=self.group_name) - if mgt_net_proxy_netmask is not None: - cfg.CONF.set_override("mgt_net_proxy_netmask", - mgt_net_proxy_netmask, - group=self.group_name) - if mgt_net_default_gateway is not None: - cfg.CONF.set_override("mgt_net_default_gateway", - mgt_net_default_gateway, - group=self.group_name) - if external_network is not None: - cfg.CONF.set_override("external_network", external_network, - group=self.group_name) - if vdn_scope_id is not None: - cfg.CONF.set_override("vdn_scope_id", vdn_scope_id, - group=self.group_name) - if dvs_id is not None: - cfg.CONF.set_override("dvs_id", dvs_id, - group=self.group_name) - - def test_simple_availability_zone(self): - self._config_az() - az = nsx_az.NsxVAvailabilityZone(self.az_name) - self.assertEqual(self.az_name, az.name) - self.assertEqual("respool", az.resource_pool) - self.assertEqual("datastore", az.datastore_id) - self.assertTrue(az.edge_ha) - self.assertEqual("hastore", az.ha_datastore_id) - self.assertFalse(az.ha_placement_random) - self.assertEqual("datacenter", az.datacenter_moid) - self.assertEqual(DEF_AZ_POOL, az.backup_edge_pool) - self.assertEqual("portgroup-407", az.mgt_net_moid) - self.assertEqual(["1.1.1.1"], az.mgt_net_proxy_ips) - self.assertEqual("255.255.255.0", az.mgt_net_proxy_netmask) - self.assertEqual("2.2.2.2", az.mgt_net_default_gateway) - self.assertEqual("network-17", az.external_network) - self.assertEqual("vdnscope-1", az.vdn_scope_id) - self.assertEqual("dvs-2", az.dvs_id) - self.assertTrue(az.az_metadata_support) - - def test_availability_zone_no_edge_ha(self): - self._config_az(edge_ha=False) - az = nsx_az.NsxVAvailabilityZone(self.az_name) - self.assertEqual(self.az_name, az.name) - self.assertEqual("respool", az.resource_pool) - self.assertEqual("datastore", az.datastore_id) - self.assertFalse(az.edge_ha) - self.assertIsNone(az.ha_datastore_id) - self.assertFalse(az.ha_placement_random) - - def test_availability_zone_no_ha_datastore(self): - self._config_az(ha_datastore_id=None) - az = nsx_az.NsxVAvailabilityZone(self.az_name) - self.assertEqual(self.az_name, az.name) - self.assertEqual("respool", az.resource_pool) - self.assertEqual("datastore", az.datastore_id) - self.assertTrue(az.edge_ha) - self.assertIsNone(az.ha_datastore_id) - self.assertFalse(az.ha_placement_random) - - def test_missing_group_section(self): - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - "doesnt_exist") - - def test_availability_zone_missing_respool(self): - self._config_az(resource_pool_id=None) - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - self.az_name) - - def test_availability_zone_missing_datastore(self): - self._config_az(datastore_id=None) - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - self.az_name) - - def test_availability_zone_missing_edge_ha(self): - self._config_az(edge_ha=None) - az = nsx_az.NsxVAvailabilityZone(self.az_name) - self.assertEqual(self.az_name, az.name) - self.assertEqual("respool", az.resource_pool) - self.assertEqual("datastore", az.datastore_id) - self.assertFalse(az.edge_ha) - self.assertIsNone(az.ha_datastore_id) - self.assertFalse(az.ha_placement_random) - - def test_availability_zone_missing_edge_placement(self): - self._config_az(ha_placement_random=None) - az = nsx_az.NsxVAvailabilityZone(self.az_name) - self.assertEqual(self.az_name, az.name) - self.assertEqual("respool", az.resource_pool) - self.assertEqual("datastore", az.datastore_id) - self.assertTrue(az.edge_ha) - self.assertEqual("hastore", az.ha_datastore_id) - # ha_placement_random should have the global value - self.assertTrue(az.ha_placement_random) - - def test_availability_zone_missing_backup_pool(self): - self._config_az(backup_edge_pool=None) - az = nsx_az.NsxVAvailabilityZone(self.az_name) - self.assertEqual(self.az_name, az.name) - # Should use the global configuration instead - self.assertEqual(DEF_GLOBAL_POOL, az.backup_edge_pool) - - def test_availability_zone_missing_metadata(self): - self._config_az(mgt_net_proxy_ips=None, dvs_id=None) - az = nsx_az.NsxVAvailabilityZone(self.az_name) - self.assertIsNone(az.mgt_net_moid) - self.assertEqual([], az.mgt_net_proxy_ips) - self.assertIsNone(az.mgt_net_proxy_netmask) - self.assertIsNone(az.mgt_net_default_gateway) - self.assertFalse(az.az_metadata_support) - - def test_availability_zone_same_metadata(self): - self._config_az(mgt_net_proxy_ips=["2.2.2.2"]) - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - self.az_name) - - self._config_az(mgt_net_proxy_ips=["2.2.2.2", "3.3.3.3"]) - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - self.az_name) - - -class NsxvAvailabilityZonesOldTestCase(base.BaseTestCase): - """Test old way of configuring the availability zones - - using a one-line configuration instead of different dynamic sections - """ - - def setUp(self): - super(NsxvAvailabilityZonesOldTestCase, self).setUp() - cfg.CONF.set_override("mgt_net_proxy_ips", ["2.2.2.2"], group="nsxv") - cfg.CONF.set_override("dvs_id", "dvs-1", group="nsxv") - - def test_simple_availability_zone(self): - az = nsx_az.NsxVAvailabilityZone( - "name:respool:datastore:true:hastore") - self.assertEqual("name", az.name) - self.assertEqual("respool", az.resource_pool) - self.assertEqual("datastore", az.datastore_id) - self.assertTrue(az.edge_ha) - self.assertEqual("hastore", az.ha_datastore_id) - self.assertFalse(az.ha_placement_random) - self.assertEqual(DEF_GLOBAL_POOL, az.backup_edge_pool) - # should get the global configuration (which is empty now) - self.assertIsNone(az.external_network) - self.assertIsNone(az.vdn_scope_id) - self.assertEqual("dvs-1", az.dvs_id) - # no metadata per az support - self.assertFalse(az.az_metadata_support) - self.assertIsNone(az.mgt_net_moid) - self.assertEqual([], az.mgt_net_proxy_ips) - self.assertIsNone(az.mgt_net_proxy_netmask) - self.assertIsNone(az.mgt_net_default_gateway) - - def test_availability_zone_without_ha_datastore(self): - az = nsx_az.NsxVAvailabilityZone( - "name:respool:datastore:true") - self.assertEqual("name", az.name) - self.assertEqual("respool", az.resource_pool) - self.assertEqual("datastore", az.datastore_id) - self.assertTrue(az.edge_ha) - self.assertIsNone(az.ha_datastore_id) - - def test_availability_zone_without_edge_ha(self): - az = nsx_az.NsxVAvailabilityZone( - "name:respool:datastore:FALSE") - self.assertEqual("name", az.name) - self.assertEqual("respool", az.resource_pool) - self.assertEqual("datastore", az.datastore_id) - self.assertFalse(az.edge_ha) - self.assertIsNone(az.ha_datastore_id) - - def test_availability_fail_long_name(self): - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - "very-very-very-very-very-longest-name:respool:da:true:ha") - - def test_availability_fail_few_args(self): - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - "name:respool") - - def test_availability_fail_many_args(self): - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - "name:1:2:3:4:5:6") - - def test_availability_fail_bad_edge_ha(self): - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - "name:respool:datastore:truex:hastore") - - def test_availability_fail_no_ha_datastore(self): - self.assertRaises( - nsx_exc.NsxInvalidConfiguration, - nsx_az.NsxVAvailabilityZone, - "name:respool:datastore:false:hastore") diff --git a/vmware_nsx/tests/unit/nsx_v/test_edge_loadbalancer_driver_v2.py b/vmware_nsx/tests/unit/nsx_v/test_edge_loadbalancer_driver_v2.py deleted file mode 100644 index 1d21e49cf9..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/test_edge_loadbalancer_driver_v2.py +++ /dev/null @@ -1,1134 +0,0 @@ -# Copyright 2015 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from oslo_config import cfg - -from neutron.services.flavors import flavors_plugin -from neutron.tests import base -from neutron_lib import context -from neutron_lib import exceptions as n_exc - -from vmware_nsx.db import nsxv_db -from vmware_nsx.services.lbaas import base_mgr -from vmware_nsx.services.lbaas.nsx_v.implementation import healthmon_mgr -from vmware_nsx.services.lbaas.nsx_v.implementation import l7policy_mgr -from vmware_nsx.services.lbaas.nsx_v.implementation import l7rule_mgr -from vmware_nsx.services.lbaas.nsx_v.implementation import listener_mgr -from vmware_nsx.services.lbaas.nsx_v.implementation import loadbalancer_mgr -from vmware_nsx.services.lbaas.nsx_v.implementation import member_mgr -from vmware_nsx.services.lbaas.nsx_v.implementation import pool_mgr -from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common -from vmware_nsx.services.lbaas.octavia import octavia_listener -from vmware_nsx.tests.unit.services.lbaas import lb_data_models as lb_models -from vmware_nsx.tests.unit.services.lbaas import lb_translators - -# TODO(asarfaty): Use octavia api for those tests -LB_VIP = '10.0.0.10' -LB_SUBNET = 'some-subnet' -LB_EDGE_ID = 'edge-x' -LB_ID = 'xxx-xxx' -LB_TENANT_ID = 'yyy-yyy' -LB_VIP_FWR_ID = 'fwr-1' -LB_BINDING = {'loadbalancer_id': LB_ID, - 'edge_id': LB_EDGE_ID, - 'edge_fw_rule_id': LB_VIP_FWR_ID, - 'vip_address': LB_VIP} -LISTENER_ID = 'xxx-111' -EDGE_APP_PROFILE_ID = 'appp-x' -EDGE_APP_PROF_DEF = {'sslPassthrough': False, 'insertXForwardedFor': False, - 'serverSslEnabled': False, 'name': LISTENER_ID, - 'template': 'http', - 'persistence': { - 'cookieMode': 'insert', - 'cookieName': 'default_cookie_name', - 'method': 'cookie'}} -EDGE_VIP_ID = 'vip-aaa' -EDGE_VIP_DEF = {'protocol': 'http', 'name': 'vip_' + LISTENER_ID, - 'connectionLimit': 0, 'defaultPoolId': None, - 'ipAddress': LB_VIP, 'port': 80, 'accelerationEnabled': False, - 'applicationProfileId': EDGE_APP_PROFILE_ID, 'description': '', - 'enabled': True} -LISTENER_BINDING = {'loadbalancer_id': LB_ID, - 'listener_id': LISTENER_ID, - 'app_profile_id': EDGE_APP_PROFILE_ID, - 'vse_id': EDGE_VIP_ID} -POOL_ID = 'ppp-qqq' -EDGE_POOL_ID = 'pool-xx' -EDGE_POOL_DEF = {'transparent': False, 'name': 'pool_' + POOL_ID, - 'algorithm': 'round-robin', 'description': ''} -POOL_BINDING = {'loadbalancer_id': LB_ID, - 'pool_id': POOL_ID, - 'edge_pool_id': EDGE_POOL_ID} -MEMBER_ID = 'mmm-mmm' -MEMBER_ADDRESS = '10.0.0.200' -EDGE_MEMBER_DEF = {'monitorPort': 80, 'name': 'member-' + MEMBER_ID, - 'weight': 1, 'ipAddress': MEMBER_ADDRESS, 'port': 80, - 'condition': 'disabled'} -POOL_FW_SECT = '10001' -HM_ID = 'hhh-mmm' -EDGE_HM_ID = 'hm-xx' -EDGE_HM_DEF = {'maxRetries': 1, 'interval': 3, 'type': 'icmp', 'name': HM_ID, - 'timeout': 3} - -HM_BINDING = {'loadbalancer_id': LB_ID, - 'pool_id': POOL_ID, - 'hm_id': HM_ID, - 'edge_id': LB_EDGE_ID, - 'edge_mon_id': EDGE_HM_ID} - -L7POL_ID = 'l7pol-l7pol' -EDGE_RULE_ID = 'app-rule-xx' -L7POL_BINDING = {'policy_id': L7POL_ID, - 'edge_id': LB_EDGE_ID, - 'edge_app_rule_id': EDGE_RULE_ID} -EDGE_L7POL_DEF = {'script': 'http-request deny if TRUE', - 'name': 'pol_' + L7POL_ID} - -L7RULE_ID1 = 'l7rule-111' -L7RULE_ID2 = 'l7rule-222' - - -class BaseTestEdgeLbaasV2(base.BaseTestCase): - def _tested_entity(self): - return None - - def completor(self, success=True): - self.last_completor_succees = success - self.last_completor_called = True - - def setUp(self): - super(BaseTestEdgeLbaasV2, self).setUp() - - self.last_completor_succees = False - self.last_completor_called = False - self.context = context.get_admin_context() - self.nsx_v = mock.Mock() - octavia_objects = { - 'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict( - self.nsx_v), - 'listener': listener_mgr.EdgeListenerManagerFromDict(self.nsx_v), - 'pool': pool_mgr.EdgePoolManagerFromDict(self.nsx_v), - 'member': member_mgr.EdgeMemberManagerFromDict(self.nsx_v), - 'healthmonitor': healthmon_mgr.EdgeHealthMonitorManagerFromDict( - self.nsx_v), - 'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(self.nsx_v), - 'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict(self.nsx_v)} - - self.edge_driver = octavia_listener.NSXOctaviaListenerEndpoint( - **octavia_objects) - - self.lbv2_driver = mock.Mock() - self.core_plugin = mock.Mock() - self.flavor_plugin = flavors_plugin.FlavorsPlugin() - base_mgr.LoadbalancerBaseManager._lbv2_driver = self.lbv2_driver - base_mgr.LoadbalancerBaseManager._core_plugin = self.core_plugin - base_mgr.LoadbalancerBaseManager._flavor_plugin = self.flavor_plugin - self._patch_lb_plugin(self.lbv2_driver, self._tested_entity) - - self.lb = lb_models.LoadBalancer(LB_ID, LB_TENANT_ID, 'lb-name', '', - LB_SUBNET, 'port-id', LB_VIP) - self.listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, - 'l-name', '', None, LB_ID, - 'HTTP', protocol_port=80, - loadbalancer=self.lb, - admin_state_up=True) - self.sess_persist = lb_models.SessionPersistence(type='HTTP_COOKIE') - self.pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', - None, 'HTTP', 'ROUND_ROBIN', - loadbalancer_id=LB_ID, - listener=self.listener, - listeners=[self.listener], - loadbalancer=self.lb, - session_persistence=self.sess_persist) - self.listener.default_pool = self.pool - self.member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, - MEMBER_ADDRESS, 80, 1, pool=self.pool) - self.hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, - 1, pool=self.pool) - self.l7policy = lb_models.L7Policy(L7POL_ID, LB_TENANT_ID, - name='policy-test', - description='policy-desc', - listener_id=LISTENER_ID, - action='REJECT', - listener=self.listener, - position=1) - self.l7rule1 = lb_models.L7Rule(L7RULE_ID1, LB_TENANT_ID, - l7policy_id=L7POL_ID, - compare_type='EQUAL_TO', - invert=False, - type='HEADER', - key='key1', - value='val1', - policy=self.l7policy) - self.l7rule2 = lb_models.L7Rule(L7RULE_ID2, LB_TENANT_ID, - l7policy_id=L7POL_ID, - compare_type='STARTS_WITH', - invert=True, - type='PATH', - value='/images', - policy=self.l7policy) - - # Translate LBaaS objects to dictionaries - self.lb_dict = lb_translators.lb_loadbalancer_obj_to_dict( - self.lb) - self.listener_dict = lb_translators.lb_listener_obj_to_dict( - self.listener) - self.pool_dict = lb_translators.lb_pool_obj_to_dict( - self.pool) - self.member_dict = lb_translators.lb_member_obj_to_dict( - self.member) - self.hm_dict = lb_translators.lb_hm_obj_to_dict( - self.hm) - self.l7policy_dict = lb_translators.lb_l7policy_obj_to_dict( - self.l7policy) - self.l7rule1_dict = lb_translators.lb_l7rule_obj_to_dict( - self.l7rule1) - self.l7rule2_dict = lb_translators.lb_l7rule_obj_to_dict( - self.l7rule2) - - def tearDown(self): - self._unpatch_lb_plugin(self.lbv2_driver, self._tested_entity) - super(BaseTestEdgeLbaasV2, self).tearDown() - - def _patch_lb_plugin(self, lb_plugin, manager): - self.real_manager = getattr(lb_plugin, manager) - lb_manager = mock.patch.object(lb_plugin, manager).start() - mock.patch.object(lb_manager, 'create').start() - mock.patch.object(lb_manager, 'update').start() - mock.patch.object(lb_manager, 'delete').start() - mock.patch.object(lb_manager, 'successful_completion').start() - - def _unpatch_lb_plugin(self, lb_plugin, manager): - setattr(lb_plugin, manager, self.real_manager) - - -class TestEdgeLbaasV2LoadbalancerOnRtr(BaseTestEdgeLbaasV2): - def setUp(self): - super(TestEdgeLbaasV2LoadbalancerOnRtr, self).setUp() - cfg.CONF.set_override('use_routers_as_lbaas_platform', - self._deploy_on_router, group="nsxv") - - @property - def _tested_entity(self): - return 'load_balancer' - - @property - def _edge_getter(self): - return 'get_lbaas_edge_id_for_subnet' - - @property - def _deploy_on_router(self): - return True - - def test_create(self): - with mock.patch.object(lb_common, self._edge_getter - ) as mock_get_edge, \ - mock.patch.object(lb_common, 'add_vip_as_secondary_ip' - ) as mock_vip_sec_ip, \ - mock.patch.object(lb_common, 'add_vip_fw_rule' - ) as mock_add_vip_fwr, \ - mock.patch.object(lb_common, 'set_lb_firewall_default_rule' - ) as mock_set_fw_rule, \ - mock.patch.object(lb_common, 'enable_edge_acceleration' - ) as mock_enable_edge_acceleration, \ - mock.patch.object(nsxv_db, - 'get_nsxv_lbaas_loadbalancer_binding_by_edge' - ) as mock_get_lb_binding_by_edge, \ - mock.patch.object(nsxv_db, 'add_nsxv_lbaas_loadbalancer_binding' - ) as mock_db_binding: - mock_get_edge.return_value = LB_EDGE_ID - mock_add_vip_fwr.return_value = LB_VIP_FWR_ID - mock_get_lb_binding_by_edge.return_value = [] - self.edge_driver.loadbalancer.create( - self.context, self.lb_dict, self.completor) - - if self._deploy_on_router: - mock_vip_sec_ip.assert_called_with(self.edge_driver.pool.vcns, - LB_EDGE_ID, - LB_VIP) - mock_get_edge.assert_called_with(mock.ANY, mock.ANY, - LB_SUBNET, LB_TENANT_ID) - else: - mock_set_fw_rule.assert_called_with( - self.edge_driver.pool.vcns, LB_EDGE_ID, 'accept') - mock_get_edge.assert_called_with(mock.ANY, mock.ANY, LB_ID, - LB_VIP, mock.ANY, - LB_TENANT_ID, mock.ANY) - - mock_add_vip_fwr.assert_called_with(self.edge_driver.pool.vcns, - LB_EDGE_ID, - LB_ID, - LB_VIP) - mock_db_binding.assert_called_with(self.context.session, - LB_ID, - LB_EDGE_ID, - LB_VIP_FWR_ID, - LB_VIP) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - mock_enable_edge_acceleration.assert_called_with( - self.edge_driver.pool.vcns, LB_EDGE_ID) - - def test_update(self): - new_lb = lb_models.LoadBalancer(LB_ID, 'yyy-yyy', 'lb-name', 'heh-huh', - LB_SUBNET, 'port-id', LB_VIP) - new_lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(new_lb) - self.edge_driver.loadbalancer.update( - self.context, self.lb_dict, new_lb_dict, self.completor) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_delete_old(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_binding, \ - mock.patch.object(lb_common, 'del_vip_fw_rule') as mock_del_fwr, \ - mock.patch.object(lb_common, 'del_vip_as_secondary_ip' - ) as mock_vip_sec_ip, \ - mock.patch.object(lb_common, 'set_lb_firewall_default_rule' - ) as mock_set_fw_rule, \ - mock.patch.object(nsxv_db, 'del_nsxv_lbaas_loadbalancer_binding', - ) as mock_del_binding, \ - mock.patch.object(self.core_plugin, 'get_ports' - ) as mock_get_ports, \ - mock.patch.object(self.core_plugin, 'get_router', - return_value={'router_type': 'exclusive'}), \ - mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' - ) as mock_get_r_binding: - mock_get_binding.return_value = LB_BINDING - mock_get_ports.return_value = [] - mock_get_r_binding.return_value = {'router_id': 'xxxx'} - self.edge_driver.loadbalancer.delete( - self.context, self.lb_dict, self.completor) - - mock_del_fwr.assert_called_with(self.edge_driver.pool.vcns, - LB_EDGE_ID, - LB_VIP_FWR_ID) - mock_vip_sec_ip.assert_called_with(self.edge_driver.pool.vcns, - LB_EDGE_ID, - LB_VIP) - mock_del_binding.assert_called_with(self.context.session, - LB_ID) - mock_set_fw_rule.assert_called_with( - self.edge_driver.pool.vcns, LB_EDGE_ID, 'deny') - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_delete_new(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_binding, \ - mock.patch.object(lb_common, 'set_lb_firewall_default_rule' - ) as mock_set_fw_rule, \ - mock.patch.object(nsxv_db, 'del_nsxv_lbaas_loadbalancer_binding', - ) as mock_del_binding, \ - mock.patch.object(self.core_plugin, 'get_ports' - ) as mock_get_ports, \ - mock.patch.object(self.core_plugin.edge_manager, 'delete_lrouter' - ) as mock_delete_lrouter, \ - mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' - ) as mock_get_r_binding: - mock_get_binding.return_value = LB_BINDING - mock_get_ports.return_value = [] - router_id = 'lbaas-xxxx' - mock_get_r_binding.return_value = {'router_id': router_id} - self.edge_driver.loadbalancer.delete( - self.context, self.lb_dict, self.completor) - - mock_del_binding.assert_called_with(self.context.session, - LB_ID) - mock_set_fw_rule.assert_called_with( - self.edge_driver.pool.vcns, LB_EDGE_ID, 'deny') - mock_delete_lrouter.assert_called_with( - mock.ANY, 'lbaas-' + LB_ID, dist=False) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - -class TestEdgeLbaasV2LoadbalancerOnEdge(TestEdgeLbaasV2LoadbalancerOnRtr): - @property - def _edge_getter(self): - return 'get_lbaas_edge_id' - - @property - def _deploy_on_router(self): - return False - - def setUp(self): - super(TestEdgeLbaasV2LoadbalancerOnEdge, self).setUp() - - def test_create_with_flavor(self): - flavor_name = 'large' - with mock.patch.object(lb_common, 'get_lbaas_edge_id' - ) as mock_get_edge, \ - mock.patch.object(lb_common, 'add_vip_fw_rule' - ) as mock_add_vip_fwr, \ - mock.patch.object(lb_common, 'set_lb_firewall_default_rule' - ) as mock_set_fw_rule, \ - mock.patch.object(lb_common, 'enable_edge_acceleration' - ) as mock_enable_edge_acceleration, \ - mock.patch.object(nsxv_db, - 'get_nsxv_lbaas_loadbalancer_binding_by_edge' - ) as mock_get_lb_binding_by_edge, \ - mock.patch.object(nsxv_db, 'add_nsxv_lbaas_loadbalancer_binding' - ) as mock_db_binding,\ - mock.patch('neutron.services.flavors.flavors_plugin.FlavorsPlugin.' - 'get_flavor', return_value={'name': flavor_name}): - mock_get_edge.return_value = LB_EDGE_ID - mock_add_vip_fwr.return_value = LB_VIP_FWR_ID - mock_get_lb_binding_by_edge.return_value = [] - self.lb.flavor_id = 'dummy' - lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(self.lb) - self.edge_driver.loadbalancer.create( - self.context, lb_dict, self.completor) - - mock_add_vip_fwr.assert_called_with(self.edge_driver.pool.vcns, - LB_EDGE_ID, - LB_ID, - LB_VIP) - mock_db_binding.assert_called_with(self.context.session, - LB_ID, - LB_EDGE_ID, - LB_VIP_FWR_ID, - LB_VIP) - mock_set_fw_rule.assert_called_with( - self.edge_driver.pool.vcns, LB_EDGE_ID, 'accept') - mock_get_edge.assert_called_with( - mock.ANY, mock.ANY, LB_ID, LB_VIP, - mock.ANY, LB_TENANT_ID, flavor_name) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - mock_enable_edge_acceleration.assert_called_with( - self.edge_driver.pool.vcns, LB_EDGE_ID) - self.lb.flavor_id = None - - def test_create_with_illegal_flavor(self): - flavor_name = 'no_size' - with mock.patch.object(lb_common, 'get_lbaas_edge_id' - ) as mock_get_edge, \ - mock.patch.object(lb_common, 'add_vip_fw_rule' - ) as mock_add_vip_fwr, \ - mock.patch.object(nsxv_db, - 'get_nsxv_lbaas_loadbalancer_binding_by_edge' - ) as mock_get_lb_binding_by_edge, \ - mock.patch('neutron.services.flavors.flavors_plugin.FlavorsPlugin.' - 'get_flavor', return_value={'name': flavor_name}): - mock_get_edge.return_value = LB_EDGE_ID - mock_add_vip_fwr.return_value = LB_VIP_FWR_ID - mock_get_lb_binding_by_edge.return_value = [] - self.lb.flavor_id = 'dummy' - lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(self.lb) - self.assertRaises( - n_exc.InvalidInput, - self.edge_driver.loadbalancer.create, - self.context, lb_dict, self.completor) - self.lb.flavor_id = None - - -class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2): - def setUp(self): - super(TestEdgeLbaasV2Listener, self).setUp() - - @property - def _tested_entity(self): - return 'listener' - - def test_create(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'create_app_profile' - ) as mock_create_app_prof, \ - mock.patch.object(self.edge_driver.pool.vcns, 'create_vip' - ) as mock_create_vip, \ - mock.patch.object(nsxv_db, 'add_nsxv_lbaas_listener_binding' - ) as mock_add_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding', - return_value=None): - mock_get_lb_binding.return_value = LB_BINDING - mock_create_app_prof.return_value = ( - {'location': 'x/' + EDGE_APP_PROFILE_ID}, None) - mock_create_vip.return_value = ( - {'location': 'x/' + EDGE_VIP_ID}, None) - - self.edge_driver.listener.create( - self.context, self.listener_dict, self.completor) - - mock_create_app_prof.assert_called_with(LB_EDGE_ID, - EDGE_APP_PROF_DEF) - mock_create_vip.assert_called_with(LB_EDGE_ID, - EDGE_VIP_DEF) - mock_add_binding.assert_called_with( - self.context.session, LB_ID, LISTENER_ID, EDGE_APP_PROFILE_ID, - EDGE_VIP_ID) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_update(self): - new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, - 'l-name', '', None, LB_ID, - 'HTTP', protocol_port=8000, - loadbalancer=self.lb, - admin_state_up=True) - new_listener.default_pool = self.pool - new_listener_dict = lb_translators.lb_listener_obj_to_dict( - new_listener) - - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' - ) as mock_get_listener_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding', - return_value=None), \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile' - ) as mock_upd_app_prof, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' - ) as mock_upd_vip: - mock_get_listener_binding.return_value = LISTENER_BINDING - mock_get_lb_binding.return_value = LB_BINDING - - self.edge_driver.listener.update( - self.context, self.listener_dict, new_listener_dict, - self.completor) - - mock_upd_app_prof.assert_called_with(LB_EDGE_ID, - EDGE_APP_PROFILE_ID, - EDGE_APP_PROF_DEF) - - edge_vip_def = EDGE_VIP_DEF.copy() - edge_vip_def['port'] = 8000 - mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, - edge_vip_def) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_delete(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' - ) as mock_get_listener_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'delete_vip' - ) as mock_del_vip, \ - mock.patch.object(self.edge_driver.pool.vcns, 'delete_app_profile' - ) as mock_del_app_prof, \ - mock.patch.object(nsxv_db, 'del_nsxv_lbaas_listener_binding' - ) as mock_del_binding: - mock_get_listener_binding.return_value = LISTENER_BINDING - mock_get_lb_binding.return_value = LB_BINDING - - self.edge_driver.listener.delete( - self.context, self.listener_dict, self.completor) - - mock_del_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID) - mock_del_app_prof.assert_called_with(LB_EDGE_ID, - EDGE_APP_PROFILE_ID) - mock_del_binding.assert_called_with(self.context.session, - LB_ID, LISTENER_ID) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - -class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2): - def setUp(self): - super(TestEdgeLbaasV2Pool, self).setUp() - - @property - def _tested_entity(self): - return 'pool' - - def test_create(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' - ) as mock_get_listener_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'create_pool' - ) as mock_create_pool, \ - mock.patch.object(nsxv_db, 'add_nsxv_lbaas_pool_binding' - ) as mock_add_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' - ) as mock_upd_vip,\ - mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile' - ) as mock_upd_app_prof: - mock_get_listener_binding.return_value = LISTENER_BINDING - mock_get_lb_binding.return_value = LB_BINDING - mock_create_pool.return_value = ( - {'location': 'x/' + EDGE_POOL_ID}, None) - - self.edge_driver.pool.create( - self.context, self.pool_dict, self.completor) - - mock_create_pool.assert_called_with(LB_EDGE_ID, - EDGE_POOL_DEF.copy()) - mock_add_binding.assert_called_with(self.context.session, - LB_ID, POOL_ID, EDGE_POOL_ID) - edge_vip_def = EDGE_VIP_DEF.copy() - edge_vip_def['defaultPoolId'] = EDGE_POOL_ID - mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, - edge_vip_def) - mock_upd_app_prof.assert_called_with(LB_EDGE_ID, - EDGE_APP_PROFILE_ID, - EDGE_APP_PROF_DEF) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_update(self): - new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', - None, 'HTTP', 'LEAST_CONNECTIONS', - listener=self.listener) - new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool) - list_bind = {'app_profile_id': EDGE_APP_PROFILE_ID} - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding,\ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding', - return_value=list_bind),\ - mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' - ) as mock_upd_pool,\ - mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' - ) as mock_get_pool,\ - mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile' - ) as mock_upd_app_prof: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - fake_edge = EDGE_POOL_DEF.copy() - fake_edge['monitorId'] = 'monitor-7' - fake_edge['member'] = ['member1', 'member2'] - mock_get_pool.return_value = (None, fake_edge) - self.edge_driver.pool.update( - self.context, self.pool_dict, new_pool_dict, self.completor) - - edge_pool_def = EDGE_POOL_DEF.copy() - edge_pool_def['algorithm'] = 'leastconn' - edge_pool_def['monitorId'] = 'monitor-7' - edge_pool_def['member'] = ['member1', 'member2'] - mock_upd_pool.assert_called_with( - LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) - mock_upd_app_prof.assert_called_with(LB_EDGE_ID, - EDGE_APP_PROFILE_ID, - EDGE_APP_PROF_DEF) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_delete(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding,\ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' - ) as mock_get_listener_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' - ) as mock_upd_vip, \ - mock.patch.object(self.edge_driver.pool.vcns, 'delete_pool' - ) as mock_del_pool, \ - mock.patch.object(nsxv_db, 'del_nsxv_lbaas_pool_binding' - ) as mock_del_binding,\ - mock.patch.object(lb_common, 'is_lb_on_router_edge' - ) as mock_lb_router, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile' - ): - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - mock_get_listener_binding.return_value = LISTENER_BINDING - mock_lb_router.return_value = False - - self.edge_driver.pool.delete( - self.context, self.pool_dict, self.completor) - - mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, - EDGE_VIP_DEF) - mock_del_pool.assert_called_with(LB_EDGE_ID, EDGE_POOL_ID) - mock_del_binding.assert_called_with( - self.context.session, LB_ID, POOL_ID) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - -def _nsx_member(ip_address): - return {'ipAddress': ip_address, - 'weight': 1, - 'port': 80, - 'monitorPort': 80, - 'name': 'member-test', - 'condition': 'enabled'} - - -def _lbaas_member(ip_address): - return {'address': ip_address, - 'weight': 1, - 'protocol_port': 80, - 'id': 'test', - 'admin_state_up': True} - - -class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2): - def setUp(self): - super(TestEdgeLbaasV2Member, self).setUp() - - @property - def _tested_entity(self): - return 'member' - - def test_create(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' - ), \ - mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' - ) as mock_get_pool, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' - ) as mock_update_pool: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - mock_get_pool.return_value = (None, EDGE_POOL_DEF.copy()) - - self.edge_driver.member.create( - self.context, self.member_dict, self.completor) - - edge_pool_def = EDGE_POOL_DEF.copy() - edge_pool_def['member'] = [EDGE_MEMBER_DEF] - mock_update_pool.assert_called_with( - LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_update(self): - new_member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, - MEMBER_ADDRESS, 8000, 1, True, - pool=self.pool) - new_member_dict = lb_translators.lb_member_obj_to_dict(new_member) - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' - ) as mock_get_pool, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' - ) as mock_update_pool: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - edge_pool_def = EDGE_POOL_DEF.copy() - edge_pool_def['member'] = [EDGE_MEMBER_DEF] - mock_get_pool.return_value = (None, edge_pool_def) - new_member_dict['pool']['members'] = [{'address': MEMBER_ADDRESS}] - self.edge_driver.member.update( - self.context, self.member_dict, - new_member_dict, self.completor) - - edge_member_def = EDGE_MEMBER_DEF.copy() - edge_member_def['port'] = 8000 - edge_member_def['monitorPort'] = 8000 - edge_member_def['condition'] = 'enabled' - edge_pool_def['member'] = [edge_member_def] - mock_update_pool.assert_called_with( - LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_delete(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' - ) as mock_get_pool, \ - mock.patch.object(self.core_plugin, 'get_ports' - ) as mock_get_ports, \ - mock.patch.object(lb_common, 'is_lb_on_router_edge' - ) as mock_lb_router, \ - mock.patch.object(lb_common, 'delete_lb_interface' - ) as mock_del_lb_iface, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' - ) as mock_update_pool: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - mock_lb_router.return_value = False - edge_pool_def = EDGE_POOL_DEF.copy() - edge_pool_def['member'] = [EDGE_MEMBER_DEF] - mock_get_pool.return_value = (None, edge_pool_def) - mock_get_ports.return_value = [] - self.edge_driver.member.delete( - self.context, self.member_dict, self.completor) - - edge_pool_def['member'] = [] - mock_update_pool.assert_called_with( - LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) - mock_del_lb_iface.assert_called_with( - self.context, self.core_plugin, LB_ID, None) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def _do_member_validation_test(self, in_ips, in_edge_ips, out_edge_ips): - pool = self.pool_dict.copy() - edge_pool = EDGE_POOL_DEF.copy() - - pool['members'] = [_lbaas_member(m) for m in in_ips] - - edge_pool['member'] = [_nsx_member(m) for m in in_edge_ips] - - member_mgr._validate_pool_members(pool, edge_pool) - self.assertEqual(edge_pool['member'], [_nsx_member(m) - for m in out_edge_ips]) - - def test_validate_pool_members_valid_lists(self): - self._do_member_validation_test(['10.0.0.10', '10.0.0.11'], - ['10.0.0.10', '10.0.0.11'], - ['10.0.0.10', '10.0.0.11']) - - def test_validate_pool_members_nsx_extra(self): - self._do_member_validation_test(['10.0.0.10'], - ['10.0.0.10', '10.0.0.11'], - ['10.0.0.10']) - - def test_validate_pool_members_lbaas_extra(self): - self._do_member_validation_test(['10.0.0.10', '10.0.0.11'], - ['10.0.0.10'], - ['10.0.0.10', '10.0.0.11']) - - -class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2): - def setUp(self): - super(TestEdgeLbaasV2HealthMonitor, self).setUp() - - @property - def _tested_entity(self): - return 'health_monitor' - - def test_create(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' - ) as mock_get_mon_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, - 'create_health_monitor') as mock_create_hm, \ - mock.patch.object(nsxv_db, 'add_nsxv_lbaas_monitor_binding' - ) as mock_add_hm_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' - ) as mock_get_pool, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' - ) as mock_update_pool: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - mock_get_mon_binding.return_value = None - mock_create_hm.return_value = ( - {'location': 'x/' + EDGE_HM_ID}, None) - mock_get_pool.return_value = (None, EDGE_POOL_DEF.copy()) - - self.edge_driver.healthmonitor.create( - self.context, self.hm_dict, self.completor) - - mock_create_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_DEF) - mock_add_hm_binding.assert_called_with( - self.context.session, LB_ID, POOL_ID, HM_ID, LB_EDGE_ID, - EDGE_HM_ID) - edge_pool_def = EDGE_POOL_DEF.copy() - edge_pool_def['monitorId'] = [EDGE_HM_ID] - mock_update_pool.assert_called_with( - LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_update(self): - new_hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, - 3, pool=self.pool) - new_hm_dict = lb_translators.lb_hm_obj_to_dict(new_hm) - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' - ) as mock_get_mon_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, - 'update_health_monitor') as mock_upd_hm: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - mock_get_mon_binding.return_value = HM_BINDING - - self.edge_driver.healthmonitor.update( - self.context, self.hm_dict, new_hm_dict, self.completor) - - edge_hm_def = EDGE_HM_DEF.copy() - edge_hm_def['maxRetries'] = 3 - mock_upd_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_ID, edge_hm_def) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_delete(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' - ) as mock_get_mon_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, - 'delete_health_monitor') as mock_del_hm, \ - mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' - ) as mock_get_pool, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' - ) as mock_update_pool, \ - mock.patch.object(nsxv_db, 'del_nsxv_lbaas_monitor_binding' - ) as mock_del_binding: - - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - mock_get_mon_binding.return_value = HM_BINDING - edge_pool_def = EDGE_POOL_DEF.copy() - edge_pool_def['monitorId'] = [EDGE_HM_ID] - mock_get_pool.return_value = (None, edge_pool_def) - - self.edge_driver.healthmonitor.delete( - self.context, self.hm_dict, self.completor) - - mock_del_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_ID) - edge_pool_def['monitorId'] = [] - mock_update_pool.assert_called_with( - LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) - mock_del_binding.assert_called_with(self.context.session, LB_ID, - POOL_ID, HM_ID, LB_EDGE_ID) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - -class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2): - def setUp(self): - super(TestEdgeLbaasV2L7Policy, self).setUp() - - @property - def _tested_entity(self): - return 'l7policy' - - def test_create(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' - ) as mock_get_l7policy_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' - ) as mock_get_listener_binding, \ - mock.patch.object(nsxv_db, 'add_nsxv_lbaas_l7policy_binding' - ) as mock_add_l7policy_binding,\ - mock.patch.object(self.edge_driver.pool.vcns, 'create_app_rule' - ) as mock_create_rule, \ - mock.patch.object(self.edge_driver.pool.vcns, 'get_vip' - ) as mock_get_vip, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' - ) as mock_upd_vip: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_l7policy_binding.return_value = L7POL_BINDING - mock_get_listener_binding.return_value = LISTENER_BINDING - mock_create_rule.return_value = ( - {'location': 'x/' + EDGE_RULE_ID}, None) - mock_get_vip.return_value = (None, EDGE_VIP_DEF.copy()) - - self.edge_driver.l7policy.create( - self.context, self.l7policy_dict, self.completor) - - mock_create_rule.assert_called_with(LB_EDGE_ID, - EDGE_L7POL_DEF.copy()) - mock_add_l7policy_binding.assert_called_with( - self.context.session, L7POL_ID, LB_EDGE_ID, EDGE_RULE_ID) - - edge_vip_def = EDGE_VIP_DEF.copy() - edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] - mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, - edge_vip_def) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_update(self): - url = 'http://www.test.com' - new_pol = lb_models.L7Policy(L7POL_ID, LB_TENANT_ID, - name='policy-test', - description='policy-desc', - listener_id=LISTENER_ID, - action='REDIRECT_TO_URL', - redirect_url=url, - listener=self.listener, - position=2) - new_pol_dict = lb_translators.lb_l7policy_obj_to_dict(new_pol) - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' - ) as mock_get_l7policy_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' - ) as mock_get_listener_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'get_vip' - ) as mock_get_vip, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' - ) as mock_upd_vip, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule' - ) as mock_update_rule: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_l7policy_binding.return_value = L7POL_BINDING - mock_get_listener_binding.return_value = LISTENER_BINDING - edge_vip_def = EDGE_VIP_DEF.copy() - edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] - mock_get_vip.return_value = (None, edge_vip_def) - - self.edge_driver.l7policy.update( - self.context, self.l7policy_dict, - new_pol_dict, self.completor) - - edge_rule_def = EDGE_L7POL_DEF.copy() - edge_rule_def['script'] = "redirect location %s if TRUE" % url - mock_update_rule.assert_called_with( - LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) - mock_upd_vip.assert_called() - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_delete(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' - ) as mock_get_l7policy_binding, \ - mock.patch.object(nsxv_db, 'del_nsxv_lbaas_l7policy_binding' - ) as mock_del_l7policy_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' - ) as mock_get_lb_binding, \ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' - ) as mock_get_pool_binding,\ - mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' - ) as mock_get_listener_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'delete_app_rule' - ) as mock_del_app_rule, \ - mock.patch.object(self.edge_driver.pool.vcns, 'get_vip' - ) as mock_get_vip, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' - ) as mock_upd_vip: - mock_get_lb_binding.return_value = LB_BINDING - mock_get_pool_binding.return_value = POOL_BINDING - mock_get_listener_binding.return_value = LISTENER_BINDING - mock_get_l7policy_binding.return_value = L7POL_BINDING - edge_vip_def = EDGE_VIP_DEF.copy() - edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] - mock_get_vip.return_value = (None, edge_vip_def) - - self.edge_driver.l7policy.delete( - self.context, self.l7policy_dict, self.completor) - - edge_vip_def2 = EDGE_VIP_DEF.copy() - edge_vip_def2['applicationRuleId'] = [] - mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, - edge_vip_def2) - mock_del_app_rule.assert_called_with(LB_EDGE_ID, EDGE_RULE_ID) - mock_del_l7policy_binding.assert_called_with( - self.context.session, L7POL_ID) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - -class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2): - def setUp(self): - super(TestEdgeLbaasV2L7Rule, self).setUp() - - @property - def _tested_entity(self): - return 'l7rule' - - def test_create(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' - ) as mock_get_l7policy_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule' - ) as mock_update_rule: - mock_get_l7policy_binding.return_value = L7POL_BINDING - - # Create the first rule - self.l7rule1.policy.rules = [self.l7rule1] - rule1_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule1) - self.edge_driver.l7rule.create( - self.context, rule1_dict, self.completor) - - edge_rule_def = EDGE_L7POL_DEF.copy() - edge_rule_def['script'] = ( - "acl %(rule_id)s hdr(key1) -i val1\n" - "http-request deny if %(rule_id)s" % - {'rule_id': L7RULE_ID1}) - mock_update_rule.assert_called_with( - LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) - - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - # Create the 2nd rule - self.l7rule2.policy.rules = [self.l7rule1, self.l7rule2] - rule2_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule2) - self.edge_driver.l7rule.create( - self.context, rule2_dict, self.completor) - - edge_rule_def = EDGE_L7POL_DEF.copy() - edge_rule_def['script'] = ( - "acl %(rule_id1)s hdr(key1) -i val1\n" - "acl %(rule_id2)s path_beg -i /images\n" - "http-request deny if %(rule_id1)s !%(rule_id2)s" % - {'rule_id1': L7RULE_ID1, - 'rule_id2': L7RULE_ID2}) - mock_update_rule.assert_called_with( - LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_update(self): - new_rule = lb_models.L7Rule(L7RULE_ID1, LB_TENANT_ID, - l7policy_id=L7POL_ID, - compare_type='EQUAL_TO', - invert=False, - type='HEADER', - key='key2', - value='val1', - policy=self.l7policy) - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' - ) as mock_get_l7policy_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule' - ) as mock_update_rule: - mock_get_l7policy_binding.return_value = L7POL_BINDING - - new_rule.policy.rules = [new_rule] - new_rule_dict = lb_translators.lb_l7rule_obj_to_dict(new_rule) - self.edge_driver.l7rule.update( - self.context, self.l7rule1_dict, new_rule_dict, - self.completor) - - edge_rule_def = EDGE_L7POL_DEF.copy() - edge_rule_def['script'] = ( - "acl %(rule_id)s hdr(key2) -i val1\n" - "http-request deny if %(rule_id)s" % - {'rule_id': L7RULE_ID1}) - mock_update_rule.assert_called_with( - LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) - - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) - - def test_delete(self): - with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' - ) as mock_get_l7policy_binding, \ - mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule' - ) as mock_update_rule: - mock_get_l7policy_binding.return_value = L7POL_BINDING - - self.l7rule1.policy.rules = [] - rule_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule1) - self.edge_driver.l7rule.delete( - self.context, rule_dict, self.completor) - - edge_rule_def = EDGE_L7POL_DEF.copy() - edge_rule_def['script'] = ( - "http-request deny if TRUE") - mock_update_rule.assert_called_with( - LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) - - self.assertTrue(self.last_completor_called) - self.assertTrue(self.last_completor_succees) diff --git a/vmware_nsx/tests/unit/nsx_v/test_fwaas_v2_driver.py b/vmware_nsx/tests/unit/nsx_v/test_fwaas_v2_driver.py deleted file mode 100644 index 8407dba1dc..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/test_fwaas_v2_driver.py +++ /dev/null @@ -1,402 +0,0 @@ -# Copyright 2018 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from unittest import mock - -from neutron_lib.plugins import directory - -from vmware_nsx.db import nsxv_models -from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver -from vmware_nsx.plugins.nsx_v.vshield import edge_utils -from vmware_nsx.services.fwaas.nsx_v import edge_fwaas_driver_v2 -from vmware_nsx.services.fwaas.nsx_v import fwaas_callbacks_v2 -from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin - -FAKE_FW_ID = 'fake_fw_uuid' -FAKE_ROUTER_ID = 'fake_rtr_uuid' -FAKE_PORT_ID = 'fake_port_uuid' -FAKE_NET_ID = 'fake_net_uuid' -FAKE_DB_OBJ = nsxv_models.NsxvEdgeVnicBinding(vnic_index='1') - - -class NsxvFwaasTestCase(test_v_plugin.NsxVPluginV2TestCase): - def setUp(self): - super(NsxvFwaasTestCase, self).setUp() - self.firewall = edge_fwaas_driver_v2.EdgeFwaasVDriverV2() - - self.plugin = directory.get_plugin() - self.plugin.fwaas_callbacks = fwaas_callbacks_v2.\ - NsxvFwaasCallbacksV2(False) - self.plugin.fwaas_callbacks.fwaas_enabled = True - self.plugin.fwaas_callbacks.fwaas_driver = self.firewall - self.plugin.fwaas_callbacks.internal_driver = self.firewall - self.plugin.init_is_complete = True - self.plugin.metadata_proxy_handler = None - - # Start some mocks - self.router = {'id': FAKE_ROUTER_ID, - 'external_gateway_info': {'network_id': 'external'}, - 'nsx_attributes': {'distributed': False, - 'router_type': 'exclusive'}} - self.distributed_router = {'id': FAKE_ROUTER_ID, - 'external_gateway_info': {'network_id': 'external'}, - 'nsx_attributes': {'distributed': True, - 'router_type': 'exclusive'}} - mock.patch.object(self.plugin, '_get_router', - return_value=self.router).start() - mock.patch.object(self.plugin, 'get_router', - return_value=self.router).start() - self.port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} - mock.patch.object(self.plugin, '_get_router_interfaces', - return_value=[self.port]).start() - mock.patch.object(self.plugin, 'get_port', - return_value=self.port).start() - mock.patch.object(self.plugin, '_get_subnet_fw_rules', - return_value=[]).start() - mock.patch.object(self.plugin, '_get_firewall_icmpv6_rules', - return_value=[]).start() - mock.patch.object(self.plugin, '_get_dnat_fw_rule', - return_value=[]).start() - mock.patch.object(self.plugin, '_get_allocation_pools_fw_rule', - return_value=[]).start() - mock.patch.object(self.plugin, '_get_nosnat_subnets_fw_rules', - return_value=[]).start() - - def _fake_rules_v4(self, is_ingress=True, is_conflict=False, - cidr='10.24.4.0/24'): - rule1 = {'enabled': True, - 'action': 'allow', - 'ip_version': 4, - 'protocol': 'tcp', - 'destination_port': '80', - 'id': 'fake-fw-rule1', - 'description': 'first rule', - 'position': '0'} - rule2 = {'enabled': True, - 'action': 'reject', - 'ip_version': 4, - 'protocol': 'tcp', - 'destination_port': '22:24', - 'source_port': '1:65535', - 'id': 'fake-fw-rule2', - 'position': '1'} - rule3 = {'enabled': True, - 'action': 'deny', - 'ip_version': 4, - 'protocol': 'icmp', - 'id': 'fake-fw-rule3', - 'position': '2'} - rule4 = {'enabled': True, - 'action': 'deny', - 'ip_version': 4, - 'id': 'fake-fw-rule4', - 'position': '3'} - if is_ingress: - if not is_conflict: - rule1['source_ip_address'] = cidr - else: - rule1['destination_ip_address'] = cidr - else: - if not is_conflict: - rule1['destination_ip_address'] = cidr - else: - rule1['source_ip_address'] = cidr - - return [rule1, rule2, rule3, rule4] - - def _fake_translated_rules(self, rules_list, - nsx_port_id, - is_ingress=True, - logged=False, fwg_id=None): - translated_rules = copy.copy(rules_list) - for rule in translated_rules: - if logged: - rule['logged'] = True - if is_ingress: - if (not rule.get('destination_ip_address') or - rule['destination_ip_address'].startswith('0.0.0.0')): - if nsx_port_id: - rule['destination_vnic_groups'] = [nsx_port_id] - else: - if (not rule.get('source_ip_address') or - rule['source_ip_address'].startswith('0.0.0.0')): - if nsx_port_id: - rule['source_vnic_groups'] = [nsx_port_id] - if rule.get('destination_ip_address'): - if rule['destination_ip_address'].startswith('0.0.0.0'): - del rule['destination_ip_address'] - else: - rule['destination_ip_address'] = [ - rule['destination_ip_address']] - if rule.get('source_ip_address'): - if rule['source_ip_address'].startswith('0.0.0.0'): - del rule['source_ip_address'] - else: - rule['source_ip_address'] = [ - rule['source_ip_address']] - rule['name'] = (fwaas_callbacks_v2.RULE_NAME_PREFIX + - (rule.get('name') or rule['id']))[:30] - if rule.get('id'): - if is_ingress: - rule['id'] = ('ingress-%s-%s' % (nsx_port_id or fwg_id, - rule['id']))[:36] - else: - rule['id'] = ('egress-%s-%s' % (nsx_port_id or fwg_id, - rule['id']))[:36] - - return translated_rules - - def _fake_empty_firewall_group(self): - fw_inst = {'id': FAKE_FW_ID, - 'admin_state_up': True, - 'tenant_id': 'tenant-uuid', - 'ingress_rule_list': [], - 'egress_rule_list': []} - return fw_inst - - def _fake_firewall_group(self, rule_list, is_ingress=True, - admin_state_up=True): - _rule_list = copy.deepcopy(rule_list) - for rule in _rule_list: - rule['position'] = str(_rule_list.index(rule)) - fw_inst = {'id': FAKE_FW_ID, - 'admin_state_up': admin_state_up, - 'tenant_id': 'tenant-uuid', - 'ingress_rule_list': [], - 'egress_rule_list': []} - if is_ingress: - fw_inst['ingress_rule_list'] = _rule_list - else: - fw_inst['egress_rule_list'] = _rule_list - return fw_inst - - def _fake_firewall_group_with_admin_down(self, rule_list, - is_ingress=True): - return self._fake_firewall_group( - rule_list, is_ingress=is_ingress, admin_state_up=False) - - def _fake_apply_list_template(self, router): - router_inst = router - router_info_inst = mock.Mock() - router_info_inst.router = router_inst - router_info_inst.router_id = FAKE_ROUTER_ID - apply_list = [(router_info_inst, FAKE_PORT_ID)] - return apply_list - - def _fake_apply_list(self): - return self._fake_apply_list_template(self.router) - - def _fake_distributed_apply_list(self): - return self._fake_apply_list_template(self.distributed_router) - - def test_create_firewall_no_rules(self): - apply_list = self._fake_apply_list() - firewall = self._fake_empty_firewall_group() - with mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', - return_value=firewall),\ - mock.patch.object(self.plugin.fwaas_callbacks, - '_get_port_firewall_group_id', - return_value=FAKE_FW_ID),\ - mock.patch.object(self.plugin.fwaas_callbacks, - '_get_fw_group_from_plugin', - return_value=firewall),\ - mock.patch("vmware_nsx.db.nsxv_db.get_edge_vnic_binding", - return_value=FAKE_DB_OBJ),\ - mock.patch.object(edge_utils, "update_firewall") as update_fw,\ - mock.patch.object(edge_utils, 'get_router_edge_id', - return_value='edge-1'): - self.firewall.create_firewall_group('nsx', apply_list, firewall) - # expecting 2 block rules for the logical port (egress & ingress) - # and last default allow all rule - expected_rules = [ - {'name': "Block port ingress", - 'action': edge_firewall_driver.FWAAS_DENY, - 'destination_vnic_groups': ['vnic-index-1'], - 'logged': False}, - {'name': "Block port egress", - 'action': edge_firewall_driver.FWAAS_DENY, - 'source_vnic_groups': ['vnic-index-1'], - 'logged': False}] - update_fw.assert_called_once_with( - self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, - {'firewall_rule_list': expected_rules}) - - def _setup_firewall_with_rules(self, func, is_ingress=True, - is_conflict=False, cidr='10.24.4.0/24'): - apply_list = self._fake_apply_list() - rule_list = self._fake_rules_v4(is_ingress=is_ingress, - is_conflict=is_conflict, cidr=cidr) - firewall = self._fake_firewall_group(rule_list, is_ingress=is_ingress) - with mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', - return_value=firewall),\ - mock.patch.object(self.plugin.fwaas_callbacks, - '_get_port_firewall_group_id', - return_value=FAKE_FW_ID),\ - mock.patch.object(self.plugin.fwaas_callbacks, - '_get_fw_group_from_plugin', - return_value=firewall),\ - mock.patch("vmware_nsx.db.nsxv_db.get_edge_vnic_binding", - return_value=FAKE_DB_OBJ),\ - mock.patch.object(edge_utils, "update_firewall") as update_fw,\ - mock.patch.object(edge_utils, 'get_router_edge_id', - return_value='edge-1'): - func('nsx', apply_list, firewall) - expected_rules = self._fake_translated_rules( - rule_list, - 'vnic-index-1', is_ingress=is_ingress) + [ - {'name': "Block port ingress", - 'action': edge_firewall_driver.FWAAS_DENY, - 'destination_vnic_groups': ['vnic-index-1'], - 'logged': False}, - {'name': "Block port egress", - 'action': edge_firewall_driver.FWAAS_DENY, - 'source_vnic_groups': ['vnic-index-1'], - 'logged': False}] - - update_fw.assert_called_once_with( - self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, - {'firewall_rule_list': expected_rules}) - - def test_create_firewall_with_ingress_rules(self): - self._setup_firewall_with_rules(self.firewall.create_firewall_group) - - def test_update_firewall_with_ingress_rules(self): - self._setup_firewall_with_rules(self.firewall.update_firewall_group) - - def test_create_firewall_with_egress_rules(self): - self._setup_firewall_with_rules(self.firewall.create_firewall_group, - is_ingress=False) - - def test_create_firewall_with_illegal_cidr(self): - self._setup_firewall_with_rules(self.firewall.create_firewall_group, - cidr='0.0.0.0/24') - - def test_update_firewall_with_egress_rules(self): - self._setup_firewall_with_rules(self.firewall.update_firewall_group, - is_ingress=False) - - def test_update_firewall_with_egress_conflicting_rules(self): - self._setup_firewall_with_rules(self.firewall.update_firewall_group, - is_ingress=False, is_conflict=True) - - def test_update_firewall_with_ingress_conflicting_rules(self): - self._setup_firewall_with_rules(self.firewall.update_firewall_group, - is_ingress=True, is_conflict=True) - - def test_delete_firewall(self): - apply_list = self._fake_apply_list() - firewall = self._fake_empty_firewall_group() - with mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', - return_value=None),\ - mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", - return_value=('vnic-index-1', 0)),\ - mock.patch.object(edge_utils, "update_firewall") as update_fw,\ - mock.patch.object(self.plugin.fwaas_callbacks, - '_get_port_firewall_group_id', - return_value=None),\ - mock.patch.object(edge_utils, 'get_router_edge_id', - return_value='edge-1'): - self.firewall.delete_firewall_group('nsx', apply_list, firewall) - update_fw.assert_called_once_with( - self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, - {'firewall_rule_list': []}) - - def test_create_firewall_with_admin_down(self): - apply_list = self._fake_apply_list() - rule_list = self._fake_rules_v4() - firewall = self._fake_firewall_group_with_admin_down(rule_list) - with mock.patch.object(edge_utils, "update_firewall") as update_fw,\ - mock.patch.object(self.plugin.fwaas_callbacks, - '_get_port_firewall_group_id', - return_value=None),\ - mock.patch.object(edge_utils, 'get_router_edge_id', - return_value='edge-1'): - self.firewall.create_firewall_group('nsx', apply_list, firewall) - update_fw.assert_called_once_with( - self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, - {'firewall_rule_list': []}) - - def _setup_dist_router_firewall_with_rules(self, func, is_ingress=True, - is_conflict=False, - cidr='10.24.4.0/24'): - apply_list = self._fake_distributed_apply_list() - rule_list = self._fake_rules_v4(is_ingress=is_ingress, - is_conflict=is_conflict, cidr=cidr) - firewall = self._fake_firewall_group(rule_list, is_ingress=is_ingress) - with mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', - return_value=firewall),\ - mock.patch.object(self.plugin.fwaas_callbacks, - '_get_port_firewall_group_id', - return_value=FAKE_FW_ID),\ - mock.patch.object(self.plugin.fwaas_callbacks, - '_get_fw_group_from_plugin', - return_value=firewall),\ - mock.patch.object(edge_utils, "update_firewall") as update_fw,\ - mock.patch.object(edge_utils, 'get_router_edge_id', - return_value='edge-1'),\ - mock.patch.object(self.plugin.edge_manager, 'get_plr_by_tlr_id', - return_value=FAKE_ROUTER_ID),\ - mock.patch.object(self.plugin, '_get_router', - return_value=self.distributed_router),\ - mock.patch.object(self.plugin, 'get_router', - return_value=self.distributed_router): - func('nsx', apply_list, firewall) - expected_rules = self._fake_translated_rules( - rule_list, None, is_ingress=is_ingress, fwg_id=FAKE_FW_ID) + [ - {'name': "Block port ingress", - 'action': edge_firewall_driver.FWAAS_DENY, - 'logged': False}, - {'name': "Block port egress", - 'action': edge_firewall_driver.FWAAS_DENY, - 'logged': False}] - - update_fw.assert_called_once_with( - self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, - {'firewall_rule_list': expected_rules}) - - def test_create_dist_router_firewall_with_ingress_rules(self): - self._setup_dist_router_firewall_with_rules( - self.firewall.create_firewall_group) - - def test_update_dist_router_firewall_with_ingress_rules(self): - self._setup_dist_router_firewall_with_rules( - self.firewall.update_firewall_group) - - def test_create_dist_router_firewall_with_egress_rules(self): - self._setup_dist_router_firewall_with_rules( - self.firewall.create_firewall_group, - is_ingress=False) - - def test_create_dist_router_firewall_with_illegal_cidr(self): - self._setup_dist_router_firewall_with_rules( - self.firewall.create_firewall_group, - cidr='0.0.0.0/24') - - def test_update_dist_router_firewall_with_egress_rules(self): - self._setup_dist_router_firewall_with_rules( - self.firewall.update_firewall_group, - is_ingress=False) - - def test_update_dist_router_firewall_with_egress_conflicting_rules(self): - self._setup_dist_router_firewall_with_rules( - self.firewall.update_firewall_group, - is_ingress=False, is_conflict=True) - - def test_update_dist_router_firewall_with_ingress_conflicting_rules(self): - self._setup_dist_router_firewall_with_rules( - self.firewall.update_firewall_group, - is_ingress=True, is_conflict=True) diff --git a/vmware_nsx/tests/unit/nsx_v/test_lbaas_common.py b/vmware_nsx/tests/unit/nsx_v/test_lbaas_common.py deleted file mode 100644 index c4b6db1fa6..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/test_lbaas_common.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2015 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from neutron.tests import base - -from vmware_nsx.plugins.nsx_v.vshield import vcns_driver -from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common - - -EDGE_ID = 'edge-x' -POOL_ID = 'b3dfb476-6fdf-4ddd-b6bd-e86ae78dc30b' - - -def firewall_section_maker(if_ip_list, vip_ip_list): - return ( - '
' + POOL_ID + - 'allow' - 'Ipv4Address' + ','.join(if_ip_list) + - '' - 'Ipv4Address' + - ','.join(vip_ip_list) + '' - '
') - - -def if_maker(ip_list): - intf = { - 'index': 1, 'name': 'internal1', 'addressGroups': { - 'addressGroups': [ - {'subnetPrefixLength': '24', - 'secondaryAddresses': { - 'ipAddress': ip_list, - 'type': 'secondary_addresses'}, - 'primaryAddress': '10.0.0.1', - 'subnetMask': '255.255.255.0'}]}, - 'portgroupName': 'pg1234', 'label': 'vNic_1', - 'type': 'internal', 'portgroupId': 'virtualwire-31'} - return intf - - -def if_list_maker(ip_list): - if_list = { - 'vnics': [ - {'index': 0, 'name': 'external', 'addressGroups': { - 'addressGroups': [ - {'subnetMask': '255.255.255.0', - 'primaryAddress': '172.24.4.2', - 'subnetPrefixLength': '24'}]}, - 'portgroupName': 'VM Network', 'label': 'vNic_0', - 'type': 'uplink', 'portgroupId': 'network-13'}, - {'index': 1, 'name': 'internal1', 'addressGroups': { - 'addressGroups': [ - {'subnetPrefixLength': '24', - 'secondaryAddresses': { - 'ipAddress': ip_list, - 'type': 'secondary_addresses'}, - 'primaryAddress': '10.0.0.1', - 'subnetMask': '255.255.255.0'}]}, - 'portgroupName': 'pg1234', - 'label': 'vNic_1', 'type': 'internal', - 'portgroupId': 'virtualwire-31'}, - {'index': 2, 'name': 'vnic2', - 'addressGroups': {'addressGroups': []}, - 'label': 'vNic_2', 'type': 'internal'}, - {'index': 3, 'name': 'vnic3', - 'addressGroups': {'addressGroups': []}, - 'label': 'vNic_3', 'type': 'internal'}]} - return if_list - - -class TestLbaasCommon(base.BaseTestCase): - def setUp(self): - super(TestLbaasCommon, self).setUp() - callbacks = mock.Mock() - callbacks.plugin = mock.Mock() - self.edge_driver = vcns_driver.VcnsDriver(callbacks) - self.edge_driver._lb_driver_prop = mock.Mock() - - def _mock_edge_driver_vcns(self, attr): - return mock.patch.object(self.edge_driver.vcns, attr) - - def test_add_vip_as_secondary_ip(self): - update_if = if_maker(['10.0.0.6', '10.0.0.8']) - - with self._mock_edge_driver_vcns('get_interfaces') as mock_get_if,\ - self._mock_edge_driver_vcns( - 'update_interface') as mock_update_if: - - mock_get_if.return_value = (None, if_list_maker(['10.0.0.6'])) - - lb_common.add_vip_as_secondary_ip( - self.edge_driver.vcns, EDGE_ID, '10.0.0.8') - mock_update_if.assert_called_with(EDGE_ID, update_if) - - def test_del_vip_as_secondary_ip(self): - update_if = if_maker(['10.0.0.6']) - - with self._mock_edge_driver_vcns('get_interfaces') as mock_get_if,\ - self._mock_edge_driver_vcns( - 'update_interface') as mock_update_if: - - mock_get_if.return_value = (None, if_list_maker(['10.0.0.6', - '10.0.0.8'])) - - lb_common.del_vip_as_secondary_ip( - self.edge_driver.vcns, EDGE_ID, '10.0.0.8') - mock_update_if.assert_called_with(EDGE_ID, update_if) - - def test_get_edge_ip_addresses(self): - get_if_list = if_list_maker(['10.0.0.6']) - - with mock.patch.object(self.edge_driver.vcns, 'get_interfaces', - return_value=(None, get_if_list)): - ip_list = lb_common.get_edge_ip_addresses(self.edge_driver.vcns, - EDGE_ID) - self.assertEqual(['172.24.4.2', '10.0.0.1'], ip_list) diff --git a/vmware_nsx/tests/unit/nsx_v/test_misc.py b/vmware_nsx/tests/unit/nsx_v/test_misc.py deleted file mode 100644 index e5763fb9a7..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/test_misc.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2014 VMware. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from neutron.tests import base - -from vmware_nsx.plugins.nsx_v.vshield.common import exceptions -from vmware_nsx.plugins.nsx_v.vshield import vcns - - -def raise_until_attempt(attempt, exception): - def raises_until(): - if raises_until.current_attempt < attempt: - raises_until.current_attempt += 1 - raise exception - else: - return raises_until.current_attempt - raises_until.current_attempt = 1 - return raises_until - - -class TestMisc(base.BaseTestCase): - response = """ -
Dummy
1 - core-services
- """ - - def test_retry_on_exception_one_attempt(self): - success_on_first_attempt = raise_until_attempt( - 1, exceptions.RequestBad(uri='', response='')) - should_return_one = vcns.retry_upon_exception( - exceptions.RequestBad, - max_attempts=1)(success_on_first_attempt) - self.assertEqual(1, should_return_one()) - - def test_retry_on_exception_five_attempts(self): - success_on_fifth_attempt = raise_until_attempt( - 5, exceptions.RequestBad(uri='', response='')) - should_return_five = vcns.retry_upon_exception( - exceptions.RequestBad, - max_attempts=10)(success_on_fifth_attempt) - self.assertEqual(5, should_return_five()) - - def test_retry_on_exception_exceed_attempts(self): - success_on_fifth_attempt = raise_until_attempt( - 5, exceptions.RequestBad(uri='', response='')) - should_raise = vcns.retry_upon_exception( - exceptions.RequestBad, - max_attempts=4)(success_on_fifth_attempt) - self.assertRaises(exceptions.RequestBad, should_raise) - - def test_retry_on_exception_exclude_error_codes_retry(self): - success_on_fifth_attempt = raise_until_attempt( - 5, exceptions.RequestBad(uri='', response=self.response)) - # excluding another error code, so should retry - should_return_five = vcns.retry_upon_exception_exclude_error_codes( - exceptions.RequestBad, [2], - max_attempts=10)(success_on_fifth_attempt) - self.assertEqual(5, should_return_five()) - - def test_retry_on_exception_exclude_error_codes_raise(self): - success_on_fifth_attempt = raise_until_attempt( - 5, exceptions.RequestBad(uri='', response=self.response)) - # excluding the returned error code, so no retries are expected - should_raise = vcns.retry_upon_exception_exclude_error_codes( - exceptions.RequestBad, [1], - max_attempts=10)(success_on_fifth_attempt) - self.assertRaises(exceptions.RequestBad, should_raise) diff --git a/vmware_nsx/tests/unit/nsx_v/test_nsxv_loadbalancer.py b/vmware_nsx/tests/unit/nsx_v/test_nsxv_loadbalancer.py deleted file mode 100644 index 96da8807c1..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/test_nsxv_loadbalancer.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2014 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from neutron.tests import base -from oslo_serialization import jsonutils - -from vmware_nsx.plugins.nsx_v.vshield import nsxv_loadbalancer -from vmware_nsx.plugins.nsx_v.vshield import vcns - - -class NsxvLoadbalancerTestCase(base.BaseTestCase): - - EDGE_OBJ_JSON = ( - '{"accelerationEnabled":false,"applicationProfile":[{' - '"applicationProfileId":"applicationProfile-1","insertXForwardedFor":' - 'false,"name":"MDSrvProxy","persistence":{"cookieMode":"insert",' - '"cookieName":"JSESSIONID","expire":"30","method":"cookie"},' - '"serverSslEnabled":false,"sslPassthrough":false,"template":"HTTP"}],' - '"applicationRule":[],"enableServiceInsertion":false,"enabled":true,' - '"featureType":"loadbalancer_4.0","logging":{"enable":false,' - '"logLevel":"info"},"monitor":[{"interval":10,"maxRetries":3,"method":' - '"GET","monitorId":"monitor-1","name":"MDSrvMon","timeout":15,"type":' - '"http","url":"/"}],"pool":[{"algorithm":"round-robin",' - '"applicationRuleId":[],"member":[{"condition":"enabled","ipAddress":' - '"192.168.0.39","maxConn":0,"memberId":"member-1","minConn":0,' - '"monitorPort":8775,"name":"Member-1","port":8775,"weight":1}],' - '"monitorId":["monitor-1"],"name":"MDSrvPool","poolId":"pool-1",' - '"transparent":false}],"version":6,"virtualServer":[{' - '"accelerationEnabled":false,"applicationProfileId":' - '"applicationProfile-1","applicationRuleId":[],"connectionLimit":0,' - '"defaultPoolId":"pool-1","enableServiceInsertion":false,' - '"enabled":true,"ipAddress":"169.254.0.3","name":"MdSrv",' - '"port":"8775","protocol":"http","virtualServerId":' - '"virtualServer-1"}]}') - - OUT_OBJ_JSON = ( - '{"accelerationEnabled": false, "applicationProfile": [{' - '"applicationProfileId": "applicationProfile-1", ' - '"insertXForwardedFor": false, "name": "MDSrvProxy", "persistence": ' - '{"expire": "30", "method": "cookie"}, "serverSslEnabled": false, ' - '"sslPassthrough": false, "template": "HTTP"}],' - ' "enableServiceInsertion": false, "enabled": true, "featureType": ' - '"loadbalancer_4.0", "monitor": [{"interval": 10, "maxRetries": 3, ' - '"method": "GET", "monitorId": "monitor-1", "name": "MDSrvMon", ' - '"timeout": 15, "type": "http", "url": "/"}], "pool": [{"algorithm":' - ' "round-robin", "member": [{"condition": "enabled", "ipAddress": ' - '"192.168.0.39", "maxConn": 0, "memberId": "member-1", "minConn": 0, ' - '"monitorPort": 8775, "name": "Member-1", "port": 8775, "weight": 1}],' - ' "monitorId": ["monitor-1"], "name": "MDSrvPool", "poolId": "pool-1",' - ' "transparent": false}], "virtualServer": [{"accelerationEnabled": ' - 'false, "applicationProfileId": "applicationProfile-1", ' - '"connectionLimit": 0, "defaultPoolId": "pool-1", ' - '"enableServiceInsertion": false, "enabled": true, "ipAddress": ' - '"169.254.0.3", "name": "MdSrv", "port": "8775", "protocol": ' - '"http", "virtualServerId": "virtualServer-1"}]}') - - LB_URI = '/api/4.0/edges/%s/loadbalancer/config' - EDGE_1 = 'edge-x' - EDGE_2 = 'edge-y' - - def setUp(self): - super(NsxvLoadbalancerTestCase, self).setUp() - self._lb = nsxv_loadbalancer.NsxvLoadbalancer() - self._vcns = vcns.Vcns(None, None, None, None, True) - - def test_get_edge_loadbalancer(self): - h = None - v = jsonutils.loads(self.EDGE_OBJ_JSON) - - with mock.patch.object(self._vcns, 'do_request', - return_value=(h, v)) as mock_do_request: - lb = nsxv_loadbalancer.NsxvLoadbalancer.get_loadbalancer( - self._vcns, self.EDGE_1) - lb.submit_to_backend(self._vcns, self.EDGE_2) - - mock_do_request.assert_called_with( - vcns.HTTP_PUT, - self.LB_URI % self.EDGE_2, - self.OUT_OBJ_JSON, - format='json', - encode=False) diff --git a/vmware_nsx/tests/unit/nsx_v/test_plugin.py b/vmware_nsx/tests/unit/nsx_v/test_plugin.py deleted file mode 100644 index 0271822481..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/test_plugin.py +++ /dev/null @@ -1,6341 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib -import copy -import re -from unittest import mock - -import decorator - -from eventlet import greenthread -import netaddr -from neutron.db import securitygroups_db as sg_db -from neutron.extensions import address_scope -from neutron.extensions import l3 -from neutron.extensions import securitygroup as secgrp -from neutron.tests.unit import _test_extension_portbindings as test_bindings -import neutron.tests.unit.db.test_allowedaddresspairs_db as test_addr_pair -import neutron.tests.unit.db.test_db_base_plugin_v2 as test_plugin -from neutron.tests.unit.extensions import base as extension -from neutron.tests.unit.extensions import test_address_scope -from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts -import neutron.tests.unit.extensions.test_l3 as test_l3_plugin -import neutron.tests.unit.extensions.test_l3_ext_gw_mode as test_ext_gw_mode -import neutron.tests.unit.extensions.test_portsecurity as test_psec -import neutron.tests.unit.extensions.test_securitygroup as ext_sg -from neutron.tests.unit import testlib_api -from neutron_lib.api.definitions import allowedaddresspairs as addrp_apidef -from neutron_lib.api.definitions import dvr as dvr_apidef -from neutron_lib.api.definitions import external_net as extnet_apidef -from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext -from neutron_lib.api.definitions import l3 as l3_apidef -from neutron_lib.api.definitions import l3_ext_gw_mode as l3_egm_apidef -from neutron_lib.api.definitions import l3_flavors as l3fav_apidef -from neutron_lib.api.definitions import port_security as psec -from neutron_lib.api.definitions import portbindings -from neutron_lib.api.definitions import provider_net as pnet -from neutron_lib.api.definitions import router_availability_zone as raz_apidef -from neutron_lib.api import validators -from neutron_lib.callbacks import events -from neutron_lib.callbacks import registry -from neutron_lib.callbacks import resources -from neutron_lib import constants -from neutron_lib import context -from neutron_lib.db import resource_extend -from neutron_lib import exceptions as n_exc -from neutron_lib.plugins import constants as plugin_const -from neutron_lib.plugins import directory -from neutron_lib.plugins import utils -from neutron_lib.services.qos import constants as qos_consts -from neutron_lib.utils import helpers -from oslo_config import cfg -from oslo_utils import uuidutils -from testtools import matchers -import webob.exc - -from vmware_nsx._i18n import _ -from vmware_nsx.common import config -from vmware_nsx.common import exceptions as nsxv_exc -from vmware_nsx.common import nsx_constants -from vmware_nsx.common import nsxv_constants -from vmware_nsx.common import utils as c_utils -from vmware_nsx.db import nsxv_db -from vmware_nsx.dvs import dvs -from vmware_nsx.dvs import dvs_utils -from vmware_nsx.extensions import projectpluginmap -from vmware_nsx.extensions import routersize as router_size -from vmware_nsx.extensions import routertype as router_type -from vmware_nsx.extensions import vnicindex as ext_vnic_idx -from vmware_nsx.plugins.common import plugin as com_plugin -from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az -from vmware_nsx.plugins.nsx_v.drivers import ( - distributed_router_driver as dist_router_driver) -from vmware_nsx.plugins.nsx_v.drivers import ( - exclusive_router_driver as ex_router_driver) -from vmware_nsx.plugins.nsx_v.drivers import ( - shared_router_driver as router_driver) -from vmware_nsx.plugins.nsx_v import md_proxy -from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const -from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc -from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver -from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver -from vmware_nsx.plugins.nsx_v.vshield import edge_utils -from vmware_nsx.services.qos.nsx_v import utils as qos_utils -from vmware_nsx.tests import unit as vmware -from vmware_nsx.tests.unit.extensions import test_vnic_index -from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns -from vmware_nsx.tests.unit import test_utils - -PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' - -_uuid = uuidutils.generate_uuid - - -def set_az_in_config(name, resource_pool_id="respool-7", - datastore_id="datastore-7", - edge_ha=False, ha_datastore_id=None): - group_name = 'az:%s' % name - cfg.CONF.set_override('availability_zones', [name], group="nsxv") - config.register_nsxv_azs(cfg.CONF, [name]) - cfg.CONF.set_override("resource_pool_id", resource_pool_id, - group=group_name) - cfg.CONF.set_override("datastore_id", datastore_id, - group=group_name) - cfg.CONF.set_override("edge_ha", edge_ha, - group=group_name) - cfg.CONF.set_override("ha_datastore_id", ha_datastore_id, - group=group_name) - - -# Override subnet creation in some tests to create a subnet with dhcp -# disabled -@decorator.decorator -def with_no_dhcp_subnet(f, *args, **kwargs): - obj = args[0] - obj.subnet = obj.no_dhcp_subnet - result = f(*args, **kwargs) - obj.subnet = obj.original_subnet - return result - - -class NsxVPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): - - def _create_network(self, fmt, name, admin_state_up, - arg_list=None, providernet_args=None, - set_context=False, tenant_id=None, - **kwargs): - tenant_id = tenant_id or self._tenant_id - data = {'network': {'name': name, - 'admin_state_up': admin_state_up, - 'tenant_id': tenant_id}} - # Fix to allow the router:external attribute and any other - # attributes containing a colon to be passed with - # a double underscore instead - kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items()) - if extnet_apidef.EXTERNAL in kwargs: - arg_list = (extnet_apidef.EXTERNAL, ) + (arg_list or ()) - - attrs = kwargs - if providernet_args: - attrs.update(providernet_args) - for arg in (('admin_state_up', 'tenant_id', 'shared') + - (arg_list or ())): - # Arg must be present and not empty - if arg in kwargs: - data['network'][arg] = kwargs[arg] - network_req = self.new_create_request('networks', data, fmt) - if set_context and tenant_id: - # create a specific auth context for this request - network_req.environ['neutron.context'] = context.Context( - '', tenant_id) - return network_req.get_response(self.api) - - @contextlib.contextmanager - def subnet(self, network=None, **kwargs): - # Override the subnet method to automatically disable dhcp on external - # subnets or ipv6 subnets, unless specified. - set_context = kwargs.get('set_context', False) - with test_plugin.optional_ctx( - network, self.network, - set_context=set_context, - tenant_id=kwargs.get('tenant_id')) as network_to_use: - if 'enable_dhcp' not in kwargs: - if kwargs.get('ip_version') == 6: - kwargs['enable_dhcp'] = False - else: - # Read the network itself, as the network in the args - # does not content this value - net = self._show('networks', - network_to_use['network']['id']) - if net['network']['router:external']: - kwargs['enable_dhcp'] = False - subnet = self._make_subnet(self.fmt, - network_to_use, - kwargs.get( - 'gateway_ip', - constants.ATTR_NOT_SPECIFIED), - kwargs.get('cidr', '10.0.0.0/24'), - kwargs.get('subnetpool_id'), - kwargs.get('allocation_pools'), - kwargs.get('ip_version', 4), - kwargs.get('enable_dhcp', True), - kwargs.get('dns_nameservers'), - kwargs.get('host_routes'), - segment_id=kwargs.get('segment_id'), - shared=kwargs.get('shared'), - ipv6_ra_mode=kwargs.get('ipv6_ra_mode'), - ipv6_address_mode=kwargs.get( - 'ipv6_address_mode'), - tenant_id=kwargs.get('tenant_id'), - set_context=set_context) - yield subnet - - @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') - def setUp(self, mock_deploy_edge, - plugin=PLUGIN_NAME, - ext_mgr=None, - service_plugins=None, - with_md_proxy=True, - with_octavia=False, - **kwargs): - test_utils.override_nsx_ini_test() - mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) - mock_vcns_instance = mock_vcns.start() - self.fc2 = fake_vcns.FakeVcns() - mock_vcns_instance.return_value = self.fc2 - edge_utils.query_dhcp_service_config = mock.Mock(return_value=[]) - self.mock_create_dhcp_service = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, 'create_dhcp_edge_service')) - self.mock_create_dhcp_service.start() - mock_update_dhcp_service = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, 'update_dhcp_edge_service')) - mock_update_dhcp_service.start() - mock_delete_dhcp_service = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, 'delete_dhcp_edge_service')) - mock_delete_dhcp_service.start() - mock_check_backup_edge_pools = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, '_check_backup_edge_pools')) - mock_check_backup_edge_pools.start() - mock_deploy_backup_edges_at_backend = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, '_deploy_backup_edges_at_backend')) - mock_deploy_backup_edges_at_backend.start() - mock.patch( - 'neutron_lib.rpc.Connection.consume_in_threads', - return_value=[]).start() - - self.default_res_pool = 'respool-28' - cfg.CONF.set_override("resource_pool_id", self.default_res_pool, - group="nsxv") - set_az_in_config('az7') - - # Add the metadata configuration - self.with_md_proxy = with_md_proxy - if self.with_md_proxy: - cfg.CONF.set_override('mgt_net_moid', 'net-1', group="nsxv") - cfg.CONF.set_override('mgt_net_proxy_ips', ['2.2.2.2'], - group="nsxv") - cfg.CONF.set_override('mgt_net_proxy_netmask', '255.255.255.0', - group="nsxv") - cfg.CONF.set_override('mgt_net_default_gateway', '1.1.1.1', - group="nsxv") - cfg.CONF.set_override('nova_metadata_ips', ['3.3.3.3'], - group="nsxv") - - # Add some mocks required for the md code - mock.patch.object(edge_utils, "update_internal_interface").start() - - # Skip Octavia init because of RPC conflicts - if not with_octavia: - mock.patch("vmware_nsx.services.lbaas.octavia.octavia_listener." - "NSXOctaviaListener.__init__", - return_value=None).start() - mock.patch("vmware_nsx.services.lbaas.octavia.octavia_listener." - "NSXOctaviaStatisticsCollector.__init__", - return_value=None).start() - - if service_plugins is not None: - # override the service plugins only if specified directly - super(NsxVPluginV2TestCase, self).setUp( - plugin=plugin, - service_plugins=service_plugins, - ext_mgr=ext_mgr) - else: - super(NsxVPluginV2TestCase, self).setUp( - plugin=plugin, - ext_mgr=ext_mgr) - self.addCleanup(self.fc2.reset_all) - plugin_instance = directory.get_plugin() - # handle TVD plugin case - if plugin_instance.is_tvd_plugin(): - plugin_instance = plugin_instance.get_plugin_by_type( - projectpluginmap.NsxPlugins.NSX_V) - plugin_instance.real_get_edge = plugin_instance._get_edge_id_by_rtr_id - plugin_instance._get_edge_id_by_rtr_id = mock.Mock() - plugin_instance._get_edge_id_by_rtr_id.return_value = False - plugin_instance._get_edge_id_and_az_by_rtr_id = mock.Mock() - plugin_instance._get_edge_id_and_az_by_rtr_id.return_value = ( - False, False) - # call init_complete manually. The event is not called in unit tests - plugin_instance.init_complete(None, None, {}) - - self.context = context.get_admin_context() - self.original_subnet = self.subnet - - self.internal_net_id = None - if self.with_md_proxy: - self.internal_net_id = nsxv_db.get_nsxv_internal_network_for_az( - self.context.session, - vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, - 'default')['network_id'] - - def no_dhcp_subnet(self, *args, **kwargs): - if 'enable_dhcp' in kwargs: - return self.original_subnet(*args, **kwargs) - return self.original_subnet(*args, enable_dhcp=False, **kwargs) - - def _get_core_plugin_with_dvs(self): - # enable dvs features to allow policy with QOS - cfg.CONF.set_default('use_dvs_features', True, 'nsxv') - plugin = directory.get_plugin() - with mock.patch.object(dvs_utils, 'dvs_create_session'): - plugin._vcm = dvs.VCManager() - return plugin - - def _remove_md_proxy_from_list(self, items): - for r in items[:]: - if (r.get('tenant_id') == nsxv_constants.INTERNAL_TENANT_ID or - r.get('name') == 'inter-edge-net'): - items.remove(r) - - def deserialize(self, content_type, response): - """Override list actions to skip metadata internal objects - This will allow most tests to run with mdproxy - """ - ctype = 'application/%s' % content_type - data = self._deserializers[ctype].deserialize(response.body)['body'] - for resource in ['networks', 'subnets', 'ports']: - if data.get(resource): - self._remove_md_proxy_from_list(data[resource]) - return data - - def _list(self, resource, fmt=None, neutron_context=None, - query_params=None, expected_code=webob.exc.HTTPOk.code): - fmt = fmt or self.fmt - req = self.new_list_request(resource, fmt, query_params) - if neutron_context: - req.environ['neutron.context'] = neutron_context - res = req.get_response(self._api_for_resource(resource)) - self.assertEqual(expected_code, res.status_int) - if query_params and '_id=' in query_params: - # Do not remove objects if their id was requested specifically - return super(NsxVPluginV2TestCase, self).deserialize(fmt, res) - else: - return self.deserialize(fmt, res) - - def _test_list_with_pagination(self, resource, items, sort, - limit, expected_page_num, - resources=None, - query_params='', - verify_key='id'): - """Override list actions to skip metadata internal objects - This will allow most tests to run with mdproxy - """ - if not resources: - resources = '%ss' % resource - query_str = query_params + '&' if query_params else '' - query_str = query_str + ("limit=%s&sort_key=%s&" - "sort_dir=%s") % (limit, sort[0], sort[1]) - req = self.new_list_request(resources, params=query_str) - items_res = [] - page_num = 0 - api = self._api_for_resource(resources) - resource = resource.replace('-', '_') - resources = resources.replace('-', '_') - while req: - page_num = page_num + 1 - res = super(NsxVPluginV2TestCase, self).deserialize( - self.fmt, req.get_response(api)) - self.assertThat(len(res[resources]), - matchers.LessThan(limit + 1)) - items_res = items_res + res[resources] - req = None - if '%s_links' % resources in res: - for link in res['%s_links' % resources]: - if link['rel'] == 'next': - content_type = 'application/%s' % self.fmt - req = testlib_api.create_request(link['href'], - '', content_type) - self.assertEqual(len(res[resources]), - limit) - # skip md-proxy objects - orig_items_num = len(items_res) - self._remove_md_proxy_from_list(items_res) - # Test number of pages only if no mdproxy entries were removed - if orig_items_num == len(items_res): - self.assertEqual(expected_page_num, page_num) - self.assertEqual([item[resource][verify_key] for item in items], - [n[verify_key] for n in items_res]) - - def _test_list_with_pagination_reverse(self, resource, items, sort, - limit, expected_page_num, - resources=None, - query_params=''): - """Override list actions to skip metadata internal objects - This will allow most tests to run with mdproxy - """ - if not resources: - resources = '%ss' % resource - resource = resource.replace('-', '_') - api = self._api_for_resource(resources) - marker = items[-1][resource]['id'] - query_str = query_params + '&' if query_params else '' - query_str = query_str + ("limit=%s&page_reverse=True&" - "sort_key=%s&sort_dir=%s&" - "marker=%s") % (limit, sort[0], sort[1], - marker) - req = self.new_list_request(resources, params=query_str) - item_res = [items[-1][resource]] - page_num = 0 - resources = resources.replace('-', '_') - while req: - page_num = page_num + 1 - res = super(NsxVPluginV2TestCase, self).deserialize( - self.fmt, req.get_response(api)) - self.assertThat(len(res[resources]), - matchers.LessThan(limit + 1)) - res[resources].reverse() - item_res = item_res + res[resources] - req = None - if '%s_links' % resources in res: - for link in res['%s_links' % resources]: - if link['rel'] == 'previous': - content_type = 'application/%s' % self.fmt - req = testlib_api.create_request(link['href'], - '', content_type) - self.assertEqual(len(res[resources]), - limit) - # skip md-proxy objects - orig_items_num = len(item_res) - self._remove_md_proxy_from_list(item_res) - # Test number of pages only if no mdproxy entries were removed - if orig_items_num == len(item_res): - self.assertEqual(expected_page_num, page_num) - expected_res = [item[resource]['id'] for item in items] - expected_res.reverse() - self.assertEqual(expected_res, [n['id'] for n in item_res]) - - -class TestNetworksV2(test_plugin.TestNetworksV2, NsxVPluginV2TestCase): - - def _test_create_bridge_network(self, vlan_id=0): - net_type = vlan_id and 'vlan' or 'flat' - name = 'bridge_net' - expected = [('subnets', []), ('name', name), ('admin_state_up', True), - ('status', 'ACTIVE'), ('shared', False), - (pnet.NETWORK_TYPE, net_type), - (pnet.PHYSICAL_NETWORK, 'tzuuid'), - (pnet.SEGMENTATION_ID, vlan_id)] - providernet_args = {pnet.NETWORK_TYPE: net_type, - pnet.PHYSICAL_NETWORK: 'tzuuid'} - if vlan_id: - providernet_args[pnet.SEGMENTATION_ID] = vlan_id - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK, - pnet.SEGMENTATION_ID)) as net: - for k, v in expected: - self.assertEqual(net['network'][k], v) - - def test_create_bridge_network(self): - self._test_create_bridge_network() - - def test_create_bridge_vlan_network(self): - self._test_create_bridge_network(vlan_id=123) - - def test_get_vlan_network_name(self): - p = directory.get_plugin() - net_id = uuidutils.generate_uuid() - dvs_id = 'dvs-10' - net = {'name': '', - 'id': net_id} - # Empty net['name'] should yield dvs_id-net_id as a name for the - # port group. - expected = '%s-%s' % (dvs_id, net_id) - self.assertEqual(expected, - p._get_vlan_network_name(net, dvs_id)) - # If network name is provided then it should yield - # dvs_id-net_name-net_id as a name for the port group. - net = {'name': 'pele', - 'id': net_id} - expected = '%s-%s-%s' % (dvs_id, 'pele', net_id) - self.assertEqual(expected, - p._get_vlan_network_name(net, dvs_id)) - name = 'X' * 500 - net = {'name': name, - 'id': net_id} - expected = '%s-%s-%s' % (dvs_id, name[:36], net_id) - self.assertEqual(expected, - p._get_vlan_network_name(net, dvs_id)) - - def test_get_vlan_network_name_with_net_name_missing(self): - p = directory.get_plugin() - net_id = uuidutils.generate_uuid() - dvs_id = 'dvs-10' - net = {'id': net_id} - # Missing net['name'] should yield dvs_id-net_id as a name for the - # port group. - expected = '%s-%s' % (dvs_id, net_id) - self.assertEqual(expected, - p._get_vlan_network_name(net, dvs_id)) - - def _test_generate_tag(self, vlan_id): - net_type = 'vlan' - name = 'bridge_net' - plugin = directory.get_plugin() - plugin._network_vlans = utils.parse_network_vlan_ranges( - cfg.CONF.nsxv.network_vlan_ranges) - expected = [('subnets', []), ('name', name), ('admin_state_up', True), - ('status', 'ACTIVE'), ('shared', False), - (pnet.NETWORK_TYPE, net_type), - (pnet.PHYSICAL_NETWORK, 'dvs-70'), - (pnet.SEGMENTATION_ID, vlan_id)] - providernet_args = {pnet.NETWORK_TYPE: net_type, - pnet.PHYSICAL_NETWORK: 'dvs-70'} - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK)) as net: - for k, v in expected: - self.assertEqual(net['network'][k], v) - - def test_create_bridge_vlan_generate(self): - cfg.CONF.set_default('network_vlan_ranges', 'dvs-70', 'nsxv') - self._test_generate_tag(1) - - def test_create_bridge_vlan_generate_range(self): - cfg.CONF.set_default('network_vlan_ranges', 'dvs-70:100:110', 'nsxv') - self._test_generate_tag(100) - - def test_create_bridge_vlan_network_outofrange_returns_400(self): - with testlib_api.ExpectedException( - webob.exc.HTTPClientError) as ctx_manager: - self._test_create_bridge_network(vlan_id=5000) - self.assertEqual(ctx_manager.exception.code, 400) - - def test_create_external_portgroup_network(self): - name = 'ext_net' - expected = [('subnets', []), ('name', name), ('admin_state_up', True), - ('status', 'ACTIVE'), ('shared', False), - (extnet_apidef.EXTERNAL, True), - (pnet.NETWORK_TYPE, 'portgroup'), - (pnet.PHYSICAL_NETWORK, 'tzuuid')] - providernet_args = {pnet.NETWORK_TYPE: 'portgroup', - pnet.PHYSICAL_NETWORK: 'tzuuid', - extnet_apidef.EXTERNAL: True} - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK, - extnet_apidef.EXTERNAL)) as net: - for k, v in expected: - self.assertEqual(net['network'][k], v) - - def test_create_portgroup_network(self): - name = 'pg_net' - expected = [('subnets', []), ('name', name), ('admin_state_up', True), - ('status', 'ACTIVE'), ('shared', False), - (pnet.NETWORK_TYPE, 'portgroup'), - (pnet.PHYSICAL_NETWORK, 'tzuuid')] - providernet_args = {pnet.NETWORK_TYPE: 'portgroup', - pnet.PHYSICAL_NETWORK: 'tzuuid'} - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK)) as net: - for k, v in expected: - self.assertEqual(net['network'][k], v) - - # try to create another one on the same physical net will failure - res = self._create_network( - self.fmt, name, True, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK)) - data = self.deserialize(self.fmt, res) - self.assertIn('NeutronError', data) - - def test_delete_network_after_removing_subnet(self): - gateway_ip = '10.0.0.1' - cidr = '10.0.0.0/24' - fmt = 'json' - # Create new network - res = self._create_network(fmt=fmt, name='net', - admin_state_up=True) - network = self.deserialize(fmt, res) - subnet = self._make_subnet(fmt, network, gateway_ip, - cidr, ip_version=4) - req = self.new_delete_request('subnets', subnet['subnet']['id']) - sub_del_res = req.get_response(self.api) - self.assertEqual(sub_del_res.status_int, 204) - req = self.new_delete_request('networks', network['network']['id']) - net_del_res = req.get_response(self.api) - self.assertEqual(net_del_res.status_int, 204) - - def test_list_networks_with_shared(self): - with self.network(name='net1'): - with self.network(name='net2', shared=True): - req = self.new_list_request('networks') - res = self.deserialize('json', req.get_response(self.api)) - ###self._remove_md_proxy_from_list(res['networks']) - self.assertEqual(len(res['networks']), 2) - req_2 = self.new_list_request('networks') - req_2.environ['neutron.context'] = context.Context('', - 'somebody') - res = self.deserialize('json', req_2.get_response(self.api)) - ###self._remove_md_proxy_from_list(res['networks']) - # tenant must see a single network - self.assertEqual(len(res['networks']), 1) - - def test_create_network_name_exceeds_40_chars(self): - name = 'this_is_a_network_whose_name_is_longer_than_40_chars' - with self.network(name=name) as net: - # Assert neutron name is not truncated - self.assertEqual(net['network']['name'], name) - - def test_create_update_network_allow_multiple_addresses_spoofguard(self): - # allow_multiple_addresses flag is True, first step is to check that - # when port-security-allowed is false - spoofguard policy is not - # created. - # next step is to update port-security-allowed to true - spoofguard - # policy is now created for this network. - q_context = context.Context('', 'tenant_1') - providernet_args = {psec.PORTSECURITY: False} - cfg.CONF.set_default('allow_multiple_ip_addresses', True, 'nsxv') - res = self._create_network(fmt='json', name='net-1', - admin_state_up=True, - providernet_args=providernet_args, - arg_list=(psec.PORTSECURITY,)) - network1 = self.deserialize(self.fmt, res) - net1_id = network1['network']['id'] - # not creating spoofguard policy - self.assertIsNone(nsxv_db.get_spoofguard_policy_id(q_context.session, - net1_id)) - args = {'network': {psec.PORTSECURITY: True}} - req = self.new_update_request('networks', args, - network1['network']['id'], fmt='json') - res = self.deserialize('json', req.get_response(self.api)) - net1_id = res['network']['id'] - # creating spoofguard policy - self.assertIsNotNone(nsxv_db.get_spoofguard_policy_id( - q_context.session, net1_id)) - - def test_update_network_with_admin_false(self): - data = {'network': {'admin_state_up': False}} - with self.network() as net: - plugin = directory.get_plugin() - self.assertRaises(NotImplementedError, - plugin.update_network, - context.get_admin_context(), - net['network']['id'], data) - - def test_create_extend_dvs_provider_network(self): - name = 'provider_net' - expected = [('subnets', []), ('name', name), ('admin_state_up', True), - ('status', 'ACTIVE'), ('shared', False), - (pnet.NETWORK_TYPE, 'flat'), - (pnet.PHYSICAL_NETWORK, 'dvs-uuid')] - providernet_args = {pnet.NETWORK_TYPE: 'flat', - pnet.PHYSICAL_NETWORK: 'dvs-uuid'} - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK)) as net: - for k, v in expected: - self.assertEqual(net['network'][k], v) - - def test_create_same_vlan_network_with_different_dvs(self): - name = 'dvs-provider-net' - expected = [('subnets', []), ('name', name), ('admin_state_up', True), - ('status', 'ACTIVE'), ('shared', False), - (pnet.NETWORK_TYPE, 'vlan'), - (pnet.SEGMENTATION_ID, 43), - (pnet.PHYSICAL_NETWORK, 'dvs-uuid-1')] - providernet_args = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 43, - pnet.PHYSICAL_NETWORK: 'dvs-uuid-1'} - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)) as net: - for k, v in expected: - self.assertEqual(net['network'][k], v) - - expected_same_vlan = [(pnet.NETWORK_TYPE, 'vlan'), - (pnet.SEGMENTATION_ID, 43), - (pnet.PHYSICAL_NETWORK, 'dvs-uuid-2')] - providernet_args_1 = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 43, - pnet.PHYSICAL_NETWORK: 'dvs-uuid-2'} - with self.network(name=name, - providernet_args=providernet_args_1, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)) as net1: - for k, v in expected_same_vlan: - self.assertEqual(net1['network'][k], v) - - def test_create_vlan_network_with_multiple_dvs(self): - name = 'multi-dvs-vlan-net' - providernet_args = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 100, - pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} - p = directory.get_plugin() - with mock.patch.object( - p, '_create_vlan_network_at_backend', - # Return three netmorefs as side effect - side_effect=[_uuid(), _uuid(), _uuid()]) as vlan_net_call: - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)): - # _create_vlan_network_at_backend is expected to be called - # three times since we have three DVS IDs in the physical - # network attribute. - self.assertEqual(3, vlan_net_call.call_count) - - def test_create_vlan_network_with_multiple_dvs_backend_failure(self): - net_data = {'name': 'vlan-net', - 'tenant_id': self._tenant_id, - pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 100, - pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} - network = {'network': net_data} - p = directory.get_plugin() - with mock.patch.object( - p, '_create_vlan_network_at_backend', - # Return two successful netmorefs and fail on the backend - # for the third netmoref creation as side effect. - side_effect=[_uuid(), _uuid(), - nsxv_exc.NsxPluginException(err_msg='')]): - with mock.patch.object( - p, '_delete_backend_network') as delete_net_call: - self.assertRaises(nsxv_exc.NsxPluginException, - p.create_network, - context.get_admin_context(), - network) - # Two successfully created port groups should be rolled back - # on the failure of third port group creation. - self.assertEqual(2, delete_net_call.call_count) - - def test_create_vlan_network_with_multiple_dvs_not_found_failure(self): - net_data = {'name': 'vlan-net', - 'tenant_id': self._tenant_id, - pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 100, - pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} - network = {'network': net_data} - p = directory.get_plugin() - with mock.patch.object( - p, '_validate_provider_create', - side_effect=[nsxv_exc.NsxResourceNotFound(res_id='dvs-2', - res_name='dvs_id')]): - with mock.patch.object( - p, '_create_vlan_network_at_backend') as create_net_call: - self.assertRaises(nsxv_exc.NsxResourceNotFound, - p.create_network, - context.get_admin_context(), - network) - # Verify no port group is created on the backend. - self.assertEqual(0, create_net_call.call_count) - - def test_create_vlan_network_with_multiple_dvs_ignore_duplicate_dvs(self): - name = 'multi-dvs-vlan-net' - providernet_args = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 100, - pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-1'} - p = directory.get_plugin() - with mock.patch.object( - p, '_create_vlan_network_at_backend', - # Return two netmorefs as side effect - side_effect=[_uuid(), _uuid()]) as vlan_net_call: - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)): - # _create_vlan_network_at_backend is expected to be called - # two times since we have only two unique DVS IDs in the - # physical network attribute. - self.assertEqual(2, vlan_net_call.call_count) - - def test_update_vlan_network_add_dvs(self): - name = 'multi-dvs-vlan-net' - providernet_args = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 100, - pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2'} - p = directory.get_plugin() - with mock.patch.object( - p, '_create_vlan_network_at_backend', - # Return 3 netmorefs as side effect - side_effect=[_uuid(), _uuid(), _uuid()]) as vlan_net_call: - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)) as net: - # _create_vlan_network_at_backend is expected to be called - # 2 times since we have 2 DVS IDs in the physical - # network attribute. - self.assertEqual(2, vlan_net_call.call_count) - self.assertEqual('dvs-1, dvs-2', - net['network'][pnet.PHYSICAL_NETWORK]) - # Add another dvs - data = {'network': {pnet.PHYSICAL_NETWORK: - 'dvs-1, dvs-2, dvs-3'}} - req = self.new_update_request('networks', data, - net['network']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual(3, vlan_net_call.call_count) - self.assertEqual('dvs-1, dvs-2, dvs-3', - res['network'][pnet.PHYSICAL_NETWORK]) - - # make sure it is updates also in the DB - req = self.new_show_request('networks', net['network']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual('dvs-1, dvs-2, dvs-3', - res['network'][pnet.PHYSICAL_NETWORK]) - - # update again - with no real change - req = self.new_update_request('networks', data, - net['network']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual(3, vlan_net_call.call_count) - self.assertEqual('dvs-1, dvs-2, dvs-3', - res['network'][pnet.PHYSICAL_NETWORK]) - - def test_update_vlan_network_remove_dvs(self): - name = 'multi-dvs-vlan-net' - providernet_args = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 100, - pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2'} - p = directory.get_plugin() - with mock.patch.object( - p, '_create_vlan_network_at_backend', - # Return 2 netmorefs as side effect - side_effect=[_uuid(), _uuid()]) as vlan_net_call,\ - mock.patch.object( - p, '_delete_backend_network') as del_net: - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)) as net: - # _create_vlan_network_at_backend is expected to be called - # 2 times since we have 2 DVS IDs in the physical - # network attribute. - self.assertEqual(2, vlan_net_call.call_count) - self.assertEqual('dvs-1, dvs-2', - net['network'][pnet.PHYSICAL_NETWORK]) - # Keep only dvs-1 (Remove dvs-2) - data = {'network': {pnet.PHYSICAL_NETWORK: 'dvs-1'}} - req = self.new_update_request('networks', data, - net['network']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual(2, vlan_net_call.call_count) - del_net.assert_called_once() - self.assertEqual('dvs-1', - res['network'][pnet.PHYSICAL_NETWORK]) - - # make sure it is updates also in the DB - req = self.new_show_request('networks', net['network']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual('dvs-1', - res['network'][pnet.PHYSICAL_NETWORK]) - - def test_get_dvs_ids_for_multiple_dvs_vlan_network(self): - p = directory.get_plugin() - default_dvs = 'fake_dvs_id' - # If no DVS-ID is provided as part of physical network, return - # global DVS-ID configured in nsx.ini - physical_network = constants.ATTR_NOT_SPECIFIED - self.assertEqual(['fake_dvs_id'], p._get_dvs_ids( - physical_network, default_dvs)) - # If DVS-IDs are provided as part of physical network as a comma - # separated string, return them as a list of DVS-IDs. - physical_network = 'dvs-1,dvs-2, dvs-3' - expected_dvs_ids = ['dvs-1', 'dvs-2', 'dvs-3'] - self.assertEqual(expected_dvs_ids, - sorted(p._get_dvs_ids(physical_network, default_dvs))) - # Ignore extra commas ',' in the physical_network attribute. - physical_network = ',,,dvs-1,dvs-2,, dvs-3,' - expected_dvs_ids = ['dvs-1', 'dvs-2', 'dvs-3'] - self.assertEqual(expected_dvs_ids, - sorted(p._get_dvs_ids(physical_network, default_dvs))) - # Ignore duplicate DVS-IDs in the physical_network attribute. - physical_network = ',,,dvs-1,dvs-2,, dvs-2,' - expected_dvs_ids = ['dvs-1', 'dvs-2'] - self.assertEqual(expected_dvs_ids, - sorted(p._get_dvs_ids(physical_network, default_dvs))) - - def test_create_vxlan_with_tz_provider_network(self): - name = 'provider_net_vxlan' - expected = [('subnets', []), ('name', name), ('admin_state_up', True), - ('status', 'ACTIVE'), ('shared', False), - (pnet.NETWORK_TYPE, 'vxlan'), - (pnet.PHYSICAL_NETWORK, 'vdnscope-2')] - providernet_args = {pnet.NETWORK_TYPE: 'vxlan', - pnet.PHYSICAL_NETWORK: 'vdnscope-2'} - with self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK)) as net: - for k, v in expected: - self.assertEqual(net['network'][k], v) - - def test_create_vxlan_with_tz_provider_network_not_found_fail(self): - name = 'provider_net_vxlan' - data = {'network': { - 'name': name, - 'tenant_id': self._tenant_id, - pnet.SEGMENTATION_ID: constants.ATTR_NOT_SPECIFIED, - pnet.NETWORK_TYPE: 'vxlan', - pnet.PHYSICAL_NETWORK: 'vdnscope-2'}} - p = directory.get_plugin() - with mock.patch.object(p.nsx_v.vcns, 'validate_vdn_scope', - side_effect=[False]): - self.assertRaises(nsxv_exc.NsxResourceNotFound, - p.create_network, - context.get_admin_context(), - data) - - def test_create_network_with_qos_no_dvs_fail(self): - # network creation should fail if the qos policy parameter exists, - # and no use_dvs_features configured - data = {'network': { - 'name': 'test-qos', - 'tenant_id': self._tenant_id, - 'qos_policy_id': _uuid()}} - plugin = directory.get_plugin() - with mock.patch.object(plugin, '_validate_qos_policy_id'): - self.assertRaises(n_exc.InvalidInput, - plugin.create_network, - context.get_admin_context(), - data) - - def test_update_network_with_qos_no_dvs_fail(self): - # network update should fail if the qos policy parameter exists, - # and no use_dvs_features configured - data = {'network': {'qos_policy_id': _uuid()}} - with self.network() as net: - plugin = directory.get_plugin() - self.assertRaises(n_exc.InvalidInput, - plugin.update_network, - context.get_admin_context(), - net['network']['id'], data) - - @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') - @mock.patch.object(qos_utils.NsxVQosRule, '_init_from_policy_id') - def test_create_network_with_qos_policy(self, - fake_init_from_policy, - fake_dvs_update): - # enable dvs features to allow policy with QOS - plugin = self._get_core_plugin_with_dvs() - ctx = context.get_admin_context() - - # Mark init as complete, as otherwise QoS won't be called - plugin.init_is_complete = True - - # fake policy id - policy_id = _uuid() - data = {'network': { - 'name': 'test-qos', - 'tenant_id': self._tenant_id, - 'qos_policy_id': policy_id, - 'port_security_enabled': False, - 'admin_state_up': False, - 'shared': False - }} - with mock.patch('vmware_nsx.services.qos.common.utils.' - 'get_network_policy_id', - return_value=policy_id),\ - mock.patch.object(self.plugin, '_validate_qos_policy_id'): - # create the network - should succeed and translate the policy id - net = plugin.create_network(ctx, data) - self.assertEqual(policy_id, net[qos_consts.QOS_POLICY_ID]) - fake_init_from_policy.assert_called_once_with(ctx, policy_id) - self.assertTrue(fake_dvs_update.called) - - # Get network should also return the qos policy id - net2 = plugin.get_network(ctx, net['id']) - self.assertEqual(policy_id, net2[qos_consts.QOS_POLICY_ID]) - - @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') - @mock.patch.object(qos_utils.NsxVQosRule, '_init_from_policy_id') - def test_update_network_with_qos_policy(self, - fake_init_from_policy, - fake_dvs_update): - # enable dvs features to allow policy with QOS - plugin = self._get_core_plugin_with_dvs() - ctx = context.get_admin_context() - - # create the network without qos policy - data = {'network': { - 'name': 'test-qos', - 'tenant_id': self._tenant_id, - 'port_security_enabled': False, - 'admin_state_up': True, - 'shared': False - }} - net = plugin.create_network(ctx, data) - - # fake policy id - policy_id = _uuid() - data['network']['qos_policy_id'] = policy_id - # update the network - should succeed and translate the policy id - with mock.patch('vmware_nsx.services.qos.common.utils.' - 'get_network_policy_id', - return_value=policy_id),\ - mock.patch.object(self.plugin, '_validate_qos_policy_id'): - res = plugin.update_network(ctx, net['id'], data) - self.assertEqual(policy_id, res[qos_consts.QOS_POLICY_ID]) - fake_init_from_policy.assert_called_once_with(ctx, policy_id) - self.assertTrue(fake_dvs_update.called) - - # Get network should also return the qos policy id - net2 = plugin.get_network(ctx, net['id']) - self.assertEqual(policy_id, net2[qos_consts.QOS_POLICY_ID]) - - def test_create_network_with_bad_az_hint(self): - p = directory.get_plugin() - ctx = context.get_admin_context() - data = {'network': { - 'name': 'test-qos', - 'tenant_id': self._tenant_id, - 'port_security_enabled': False, - 'admin_state_up': True, - 'shared': False, - 'availability_zone_hints': ['bad_hint'] - }} - self.assertRaises(n_exc.NeutronException, - p.create_network, - ctx, data) - - def test_create_network_with_az_hint(self): - az_name = 'az7' - set_az_in_config(az_name) - p = directory.get_plugin() - p._availability_zones_data = nsx_az.NsxVAvailabilityZones() - ctx = context.get_admin_context() - - data = {'network': { - 'name': 'test-qos', - 'tenant_id': self._tenant_id, - 'port_security_enabled': False, - 'admin_state_up': True, - 'shared': False, - 'availability_zone_hints': [az_name] - }} - - # network creation should succeed - net = p.create_network(ctx, data) - self.assertEqual([az_name], - net['availability_zone_hints']) - # the availability zone is still empty until subnet creation - self.assertEqual([], - net['availability_zones']) - - def test_list_networks_with_fields(self): - with self.network(name='net1'): - req = self.new_list_request('networks', - params='fields=name') - res = self.deserialize(self.fmt, req.get_response(self.api)) - self._remove_md_proxy_from_list(res['networks']) - self.assertEqual(1, len(res['networks'])) - net = res['networks'][0] - self.assertEqual('net1', net['name']) - self.assertNotIn('id', net) - self.assertNotIn('tenant_id', net) - self.assertNotIn('project_id', net) - - def test_list_networks_without_pk_in_fields_pagination_native(self): - self.skipTest("The test is not suitable for the metadata test case") - - def test_cannot_delete_md_net(self): - if self.internal_net_id: - req = self.new_delete_request('networks', self.internal_net_id) - net_del_res = req.get_response(self.api) - self.assertEqual(net_del_res.status_int, 400) - - -class TestVnicIndex(NsxVPluginV2TestCase, - test_vnic_index.VnicIndexDbTestCase): - def test_update_port_twice_with_the_same_index(self): - """Tests that updates which does not modify the port vnic - index association do not produce any errors - """ - with self.subnet() as subnet: - with self.port(subnet=subnet) as port: - res = self._port_index_update(port['port']['id'], 2) - self.assertEqual(2, res['port'][ext_vnic_idx.VNIC_INDEX]) - res = self._port_index_update(port['port']['id'], 2) - self.assertEqual(2, res['port'][ext_vnic_idx.VNIC_INDEX]) - - -class TestPortsV2(NsxVPluginV2TestCase, - test_plugin.TestPortsV2, - test_bindings.PortBindingsTestCase, - test_bindings.PortBindingsHostTestCaseMixin, - test_bindings.PortBindingsVnicTestCaseMixin): - - VIF_TYPE = nsx_constants.VIF_TYPE_DVS - HAS_PORT_FILTER = True - - def test_is_mac_in_use(self): - ctx = context.get_admin_context() - with self.port() as port: - net_id = port['port']['network_id'] - mac = port['port']['mac_address'] - self.assertTrue(self.plugin._is_mac_in_use(ctx, net_id, mac)) - mac2 = '00:22:00:44:00:66' # other mac, same network - self.assertFalse(self.plugin._is_mac_in_use(ctx, net_id, mac2)) - net_id2 = port['port']['id'] # other net uuid, same mac - self.assertTrue(self.plugin._is_mac_in_use(ctx, net_id2, mac)) - - @with_no_dhcp_subnet - def test_duplicate_mac_generation(self): - self.skipTest('Skip need to address in future - started fail Aug 2021') - - def test_get_ports_count(self): - with self.port(), self.port(), self.port(), self.port() as p: - tenid = p['port']['tenant_id'] - ctx = context.Context(user_id=None, tenant_id=tenid, - is_admin=False) - pl = directory.get_plugin() - count = pl.get_ports_count(ctx, filters={'tenant_id': [tenid]}) - # Each port above has subnet => we have an additional port - # for DHCP - self.assertEqual(8, count) - - @with_no_dhcp_subnet - def test_requested_ips_only(self): - return super(TestPortsV2, self).test_requested_ips_only() - - def test_delete_network_port_exists_owned_by_network_race(self): - self.skipTest('Skip need to address in future') - - def test_create_port_with_too_many_fixed_ips(self): - self.skipTest('DHCP only supports one binding') - - def test_create_port_invalid_fixed_ip_address_v6_pd_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_invalid_fixed_ip_address_v6_pd_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_invalid_subnet_v6_pd_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_mac_v6_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_invalid_fixed_ip_address_v6_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self): - self.skipTest('No DHCP v6 Support yet') - - def test_requested_subnet_id_v6_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self): - self.skipTest('No DHCP v6 Support yet') - - def test_requested_fixed_ip_address_v6_slaac_router_iface(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self): - self.skipTest('No DHCP v6 Support yet') - - def test_requested_invalid_fixed_ip_address_v6_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_delete_port_with_ipv6_slaac_address(self): - self.skipTest('No DHCP v6 Support yet') - - def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self): - self.skipTest('No DHCP v6 Support yet') - - def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode, - ipv6_pd=False): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_with_new_ipv6_slaac_subnet_in_fixed_ips(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_port_anticipating_allocation(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - @with_no_dhcp_subnet - def test_list_ports(self): - return super(TestPortsV2, self).test_list_ports() - - @with_no_dhcp_subnet - def test_list_ports_public_network(self): - return super(TestPortsV2, self).test_list_ports_public_network() - - @with_no_dhcp_subnet - def test_list_ports_with_pagination_emulated(self): - return super(TestPortsV2, - self).test_list_ports_with_pagination_emulated() - - @with_no_dhcp_subnet - def test_list_ports_with_pagination_native(self): - return super(TestPortsV2, - self).test_list_ports_with_pagination_native() - - @with_no_dhcp_subnet - def test_list_ports_with_sort_emulated(self): - return super(TestPortsV2, self).test_list_ports_with_sort_emulated() - - @with_no_dhcp_subnet - def test_list_ports_with_sort_native(self): - return super(TestPortsV2, self).test_list_ports_with_sort_native() - - def test_list_ports_filtered_by_security_groups(self): - ctx = context.get_admin_context() - with self.port() as port1, self.port() as port2: - query_params = "security_groups=%s" % ( - port1['port']['security_groups'][0]) - ports_data = self._list('ports', query_params=query_params) - self.assertEqual(set([port1['port']['id'], port2['port']['id']]), - set([port['id'] for port in ports_data['ports']])) - query_params = "security_groups=%s&id=%s" % ( - port1['port']['security_groups'][0], - port1['port']['id']) - ports_data = self._list('ports', query_params=query_params) - self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id']) - self.assertEqual(1, len(ports_data['ports'])) - temp_sg = {'security_group': {'tenant_id': 'some_tenant', - 'name': '', 'description': 's'}} - sg_dbMixin = sg_db.SecurityGroupDbMixin() - sg = sg_dbMixin.create_security_group(ctx, temp_sg) - sg_dbMixin._delete_port_security_group_bindings( - ctx, port2['port']['id']) - sg_dbMixin._create_port_security_group_binding( - ctx, port2['port']['id'], sg['id']) - port2['port']['security_groups'][0] = sg['id'] - query_params = "security_groups=%s" % ( - port1['port']['security_groups'][0]) - ports_data = self._list('ports', query_params=query_params) - self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id']) - self.assertEqual(1, len(ports_data['ports'])) - query_params = "security_groups=%s" % ( - (port2['port']['security_groups'][0])) - ports_data = self._list('ports', query_params=query_params) - self.assertEqual(port2['port']['id'], ports_data['ports'][0]['id']) - - def test_update_port_delete_ip(self): - # This test case overrides the default because the nsx plugin - # implements port_security/security groups and it is not allowed - # to remove an ip address from a port unless the security group - # is first removed. - with self.subnet() as subnet: - with self.port(subnet=subnet) as port: - data = {'port': {'admin_state_up': False, - 'fixed_ips': [], - secgrp.SECURITYGROUPS: []}} - req = self.new_update_request('ports', - data, port['port']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual(res['port']['admin_state_up'], - data['port']['admin_state_up']) - self.assertEqual(res['port']['fixed_ips'], - data['port']['fixed_ips']) - - def _update_port_index(self, port_id, device_id, index): - data = {'port': {'device_owner': 'compute:None', - 'device_id': device_id, - 'vnic_index': index}} - req = self.new_update_request('ports', - data, port_id) - res = self.deserialize('json', req.get_response(self.api)) - return res - - def _test_update_port_index_and_spoofguard( - self, ip_version, subnet_cidr, port_ip, port_mac, ipv6_lla): - q_context = context.Context('', 'tenant_1') - device_id = _uuid() - with self.subnet(ip_version=ip_version, - enable_dhcp=(False if ip_version == 6 else True), - cidr=subnet_cidr, - gateway_ip=None) as subnet, \ - mock.patch.object(edge_utils.EdgeManager, - 'delete_dhcp_binding') as delete_dhcp_binding: - fixed_ip_data = [{'ip_address': port_ip, - 'subnet_id': subnet['subnet']['id']}] - with self.port(subnet=subnet, - device_id=device_id, - mac_address=port_mac, - fixed_ips=fixed_ip_data) as port: - # set port as compute first - res = self._update_port_index( - port['port']['id'], device_id, None) - self.assertIsNone(res['port']['vnic_index']) - - self.fc2.approve_assigned_addresses = ( - mock.Mock().approve_assigned_addresses) - self.fc2.publish_assigned_addresses = ( - mock.Mock().publish_assigned_addresses) - self.fc2.inactivate_vnic_assigned_addresses = ( - mock.Mock().inactivate_vnic_assigned_addresses) - vnic_index = 3 - res = self._update_port_index( - port['port']['id'], device_id, vnic_index) - self.assertEqual(vnic_index, res['port']['vnic_index']) - - policy_id = nsxv_db.get_spoofguard_policy_id( - q_context.session, port['port']['network_id']) - vnic_id = '%s.%03d' % (device_id, vnic_index) - - # Verify that the spoofguard policy assigned and published - expected_ips = [port_ip] - if ipv6_lla: - expected_ips.append(ipv6_lla) - (self.fc2.approve_assigned_addresses. - assert_called_once_with(policy_id, vnic_id, port_mac, - expected_ips)) - (self.fc2.publish_assigned_addresses. - assert_called_once_with(policy_id, vnic_id)) - - # Updating the vnic_index to None implies the vnic does - # no longer obtain the addresses associated with this port, - # we need to inactivate previous addresses configurations for - # this vnic in the context of this network spoofguard policy. - res = self._update_port_index(port['port']['id'], '', None) - - (self.fc2.inactivate_vnic_assigned_addresses. - assert_called_once_with(policy_id, vnic_id)) - self.assertTrue(delete_dhcp_binding.called) - - def test_update_port_index(self): - ip_version = 4 - subnet_cidr = '10.0.0.0/24' - port_ip = '10.0.0.8' - port_mac = '00:00:00:00:00:02' - ipv6_lla = None - self._test_update_port_index_and_spoofguard( - ip_version, - subnet_cidr, - port_ip, - port_mac, - ipv6_lla) - - def test_update_port_index_ipv6(self): - ip_version = 6 - subnet_cidr = 'ae80::/64' - port_mac = '00:00:00:00:00:02' - ipv6_lla = 'fe80::200:ff:fe00:2' - port_ip = 'ae80::2' - self._test_update_port_index_and_spoofguard( - ip_version, - subnet_cidr, - port_ip, - port_mac, - ipv6_lla) - - def test_update_port_with_compute_device_owner(self): - """ - Test that DHCP binding is created when ports 'device_owner' - is updated to compute, for example when attaching an interface to a - instance with existing port. - """ - with self.port() as port: - with mock.patch(PLUGIN_NAME + '._create_dhcp_static_binding'): - update = {'port': {'device_owner'}} - self.new_update_request('ports', - update, port['port']['id']) - - @with_no_dhcp_subnet - def test_ports_vif_host(self): - return super(TestPortsV2, self).test_ports_vif_host() - - @with_no_dhcp_subnet - def test_ports_vif_host_update(self): - return super(TestPortsV2, self).test_ports_vif_host_update() - - @with_no_dhcp_subnet - def test_ports_vif_details(self): - return super(TestPortsV2, self).test_ports_vif_details() - - @with_no_dhcp_subnet - def test_ports_vnic_type(self): - return super(TestPortsV2, self).test_ports_vnic_type() - - @with_no_dhcp_subnet - def test_ports_vnic_type_list(self): - vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} - with self.subnet(enable_dhcp=False) as subnet,\ - self.port(subnet, name='name1', - arg_list=(portbindings.VNIC_TYPE,), - **vnic_arg) as port1,\ - self.port(subnet, name='name2') as port2,\ - self.port(subnet, name='name3', - arg_list=(portbindings.VNIC_TYPE,), - **vnic_arg) as port3: - - self._test_list_resources('port', (port1, port2, port3), - query_params='%s=%s' % ( - portbindings.VNIC_TYPE, - self.vnic_type)) - - def test_port_invalid_vnic_type(self): - with self._test_create_direct_network(vlan_id=7) as network: - kwargs = {portbindings.VNIC_TYPE: 'invalid', - psec.PORTSECURITY: False} - net_id = network['network']['id'] - res = self._create_port(self.fmt, net_id=net_id, - arg_list=(portbindings.VNIC_TYPE, - psec.PORTSECURITY), - **kwargs) - self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) - - def test_range_allocation(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def test_requested_subnet_id_v4_and_v6(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - @with_no_dhcp_subnet - def test_update_port_update_ip(self): - return super(TestPortsV2, self).test_update_port_update_ip() - - @with_no_dhcp_subnet - def test_update_port_update_ips(self): - return super(TestPortsV2, self).test_update_port_update_ips() - - def test_update_port_update_ip_dhcp(self): - #Test updating a port IP when the device owner is DHCP - with self.subnet(enable_dhcp=False) as subnet: - with self.port(subnet=subnet, - device_owner=constants.DEVICE_OWNER_DHCP) as port: - data = {'port': {'fixed_ips': [{'subnet_id': - subnet['subnet']['id'], - 'ip_address': "10.0.0.10"}]}} - plugin = directory.get_plugin() - ctx = context.get_admin_context() - with mock.patch.object( - plugin.edge_manager, - 'update_dhcp_edge_service') as update_dhcp: - plugin.update_port(ctx, port['port']['id'], data) - self.assertTrue(update_dhcp.called) - - def test_update_port_update_ip_compute(self): - #Test that updating a port IP succeed if the device owner starts - #with compute. - owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'xxx' - with self.subnet(enable_dhcp=False) as subnet: - with self.port(subnet=subnet, device_id=_uuid(), - device_owner=owner) as port: - data = {'port': {'fixed_ips': [{'subnet_id': - subnet['subnet']['id'], - 'ip_address': "10.0.0.10"}]}} - plugin = directory.get_plugin() - with mock.patch.object( - plugin.edge_manager, - 'delete_dhcp_binding') as delete_dhcp: - with mock.patch.object( - plugin.edge_manager, - 'create_static_binding') as create_static: - with mock.patch.object( - plugin.edge_manager, - 'create_dhcp_bindings') as create_dhcp: - plugin.update_port(context.get_admin_context(), - port['port']['id'], data) - self.assertTrue(delete_dhcp.called) - self.assertTrue(create_static.called) - self.assertTrue(create_dhcp.called) - - def test_update_port_update_ip_and_owner_fail(self): - #Test that updating a port IP and device owner at the same - #transaction fails - with self.subnet(enable_dhcp=False) as subnet: - with self.port(subnet=subnet, - device_owner='aaa') as port: - data = {'port': {'device_owner': 'bbb', - 'fixed_ips': [{'subnet_id': - subnet['subnet']['id'], - 'ip_address': "10.0.0.10"}]}} - plugin = directory.get_plugin() - self.assertRaises(n_exc.BadRequest, - plugin.update_port, - context.get_admin_context(), - port['port']['id'], data) - - def test_update_port_update_ip_router(self): - #Test that updating a port IP succeed if the device owner is a router - owner = constants.DEVICE_OWNER_ROUTER_GW - router_id = _uuid() - old_ip = '10.0.0.3' - new_ip = '10.0.0.10' - with self.subnet(enable_dhcp=False) as subnet: - with self.port(subnet=subnet, device_id=router_id, - device_owner=owner, - fixed_ips=[{'ip_address': old_ip}]) as port: - data = {'port': {'fixed_ips': [{'subnet_id': - subnet['subnet']['id'], - 'ip_address': new_ip}]}} - plugin = directory.get_plugin() - ctx = context.get_admin_context() - router_obj = router_driver.RouterSharedDriver(plugin) - with mock.patch.object(plugin, '_find_router_driver', - return_value=router_obj): - with mock.patch.object( - router_obj, - 'update_router_interface_ip') as update_router: - port_id = port['port']['id'] - plugin.update_port(ctx, port_id, data) - net_id = port['port']['network_id'] - update_router.assert_called_once_with( - ctx, - router_id, - port_id, - net_id, - old_ip, - new_ip, "255.255.255.0") - - def test_update_port_update_ip_unattached_router(self): - #Test that updating a port IP succeed if the device owner is a router - #and the shared router is not attached to any edge yet - owner = constants.DEVICE_OWNER_ROUTER_GW - router_id = _uuid() - old_ip = '10.0.0.3' - new_ip = '10.0.0.10' - with self.subnet(enable_dhcp=False) as subnet: - with self.port(subnet=subnet, device_id=router_id, - device_owner=owner, - fixed_ips=[{'ip_address': old_ip}]) as port: - data = {'port': {'fixed_ips': [{'subnet_id': - subnet['subnet']['id'], - 'ip_address': new_ip}]}} - plugin = directory.get_plugin() - ctx = context.get_admin_context() - router_obj = router_driver.RouterSharedDriver(plugin) - with mock.patch.object(plugin, '_find_router_driver', - return_value=router_obj): - # make sure the router will not be attached to an edge - with mock.patch.object( - edge_utils, 'get_router_edge_id', - return_value=None): - port_id = port['port']['id'] - # The actual test here is that this call does not - # raise an exception - new_port = plugin.update_port(ctx, port_id, data) - ips = new_port['fixed_ips'] - self.assertEqual(len(ips), 1) - self.assertEqual(ips[0]['ip_address'], new_ip) - self.assertEqual(ips[0]['subnet_id'], - subnet['subnet']['id']) - - def test_update_port_delete_ip_router(self): - #Test that deleting a port IP succeed if the device owner is a router - owner = constants.DEVICE_OWNER_ROUTER_GW - router_id = _uuid() - old_ip = '10.0.0.3' - with self.subnet(enable_dhcp=False) as subnet: - with self.port(subnet=subnet, device_id=router_id, - device_owner=owner, - fixed_ips=[{'ip_address': old_ip}]) as port: - data = {'port': {'fixed_ips': []}} - plugin = directory.get_plugin() - ctx = context.get_admin_context() - router_obj = router_driver.RouterSharedDriver(plugin) - with mock.patch.object(plugin, '_find_router_driver', - return_value=router_obj): - with mock.patch.object( - router_obj, - 'update_router_interface_ip') as update_router: - port_id = port['port']['id'] - plugin.update_port(ctx, port_id, data) - net_id = port['port']['network_id'] - update_router.assert_called_once_with( - ctx, - router_id, - port_id, - net_id, - old_ip, - None, None) - - def test_update_port_add_additional_ip(self): - """Test update of port with additional IP fails.""" - with self.subnet() as subnet: - with self.port(subnet=subnet) as port: - data = {'port': {'admin_state_up': False, - 'fixed_ips': [{'subnet_id': - subnet['subnet']['id']}, - {'subnet_id': - subnet['subnet']['id']}]}} - req = self.new_update_request('ports', data, - port['port']['id']) - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPBadRequest.code, - res.status_int) - - def test_create_port_additional_ip(self): - """Test that creation of port with additional IP fails.""" - with self.subnet() as subnet: - data = {'port': {'network_id': subnet['subnet']['network_id'], - 'tenant_id': subnet['subnet']['tenant_id'], - 'fixed_ips': [{'subnet_id': - subnet['subnet']['id']}, - {'subnet_id': - subnet['subnet']['id']}]}} - port_req = self.new_create_request('ports', data) - res = port_req.get_response(self.api) - self.assertEqual(webob.exc.HTTPBadRequest.code, - res.status_int) - - def test_update_port_update_ip_address_only(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def test_requested_invalid_fixed_ips(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def test_requested_subnet_id_v4_and_v6_slaac(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def test_update_dhcp_port_with_exceeding_fixed_ips(self): - self.skipTest('Updating dhcp port IP is not supported') - - def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): - # This test should fail as the NSX-v plugin should cause Neutron to - # return a 400 status code - with testlib_api.ExpectedException( - webob.exc.HTTPClientError) as ctx_manager: - super(TestPortsV2, self).\ - test_create_port_with_multiple_ipv4_and_ipv6_subnets() - self.assertEqual(ctx_manager.exception.code, 400) - - @with_no_dhcp_subnet - def test_list_ports_for_network_owner(self): - return super(TestPortsV2, self).test_list_ports_for_network_owner() - - def test_mac_duplication(self): - # create 2 networks - res = self._create_network(fmt=self.fmt, name='net1', - admin_state_up=True) - network1 = self.deserialize(self.fmt, res) - net1_id = network1['network']['id'] - - res = self._create_network(fmt=self.fmt, name='net2', - admin_state_up=True) - network2 = self.deserialize(self.fmt, res) - net2_id = network2['network']['id'] - - # create a port on the first network - mac = '33:00:00:00:00:01' - res = self._create_port(self.fmt, net_id=net1_id, - arg_list=('mac_address',), - mac_address=mac) - port1 = self.deserialize('json', res) - self.assertEqual(mac, port1['port']['mac_address']) - - # creating another port on a different network with the same mac - # should fail - res = self._create_port(self.fmt, net_id=net2_id, - arg_list=('mac_address',), - mac_address=mac) - port2 = self.deserialize('json', res) - self.assertEqual("MacAddressInUse", port2['NeutronError']['type']) - - def _test_create_direct_network(self, vlan_id=0): - net_type = vlan_id and 'vlan' or 'flat' - name = 'direct_net' - providernet_args = {pnet.NETWORK_TYPE: net_type, - pnet.PHYSICAL_NETWORK: 'tzuuid'} - if vlan_id: - providernet_args[pnet.SEGMENTATION_ID] = vlan_id - return self.network(name=name, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK, - pnet.SEGMENTATION_ID)) - - def test_create_port_vnic_direct(self): - with self._test_create_direct_network(vlan_id=7) as network: - # Check that port security conflicts - kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, - psec.PORTSECURITY: True} - net_id = network['network']['id'] - res = self._create_port(self.fmt, net_id=net_id, - arg_list=(portbindings.VNIC_TYPE, - psec.PORTSECURITY), - **kwargs) - self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) - - # Check that security group conflicts - kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, - 'security_groups': [ - '4cd70774-cc67-4a87-9b39-7d1db38eb087'], - psec.PORTSECURITY: False} - net_id = network['network']['id'] - res = self._create_port(self.fmt, net_id=net_id, - arg_list=(portbindings.VNIC_TYPE, - psec.PORTSECURITY), - **kwargs) - self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) - - # All is kosher so we can create the port - kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT} - net_id = network['network']['id'] - res = self._create_port(self.fmt, net_id=net_id, - arg_list=(portbindings.VNIC_TYPE,), - **kwargs) - port = self.deserialize('json', res) - self.assertEqual("direct", port['port'][portbindings.VNIC_TYPE]) - - def test_create_port_vnic_direct_invalid_network(self): - with self.network(name='not vlan/flat') as net: - kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, - psec.PORTSECURITY: False} - net_id = net['network']['id'] - res = self._create_port(self.fmt, net_id=net_id, - arg_list=(portbindings.VNIC_TYPE, - psec.PORTSECURITY), - **kwargs) - self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) - - def test_update_vnic_direct(self): - with self._test_create_direct_network(vlan_id=7) as network: - with self.subnet(network=network) as subnet: - with self.port(subnet=subnet) as port: - # need to do two updates as the update for port security - # disabled requires that it can only change 2 items - data = {'port': {psec.PORTSECURITY: False, - 'security_groups': []}} - req = self.new_update_request('ports', - data, port['port']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual(portbindings.VNIC_NORMAL, - res['port'][portbindings.VNIC_TYPE]) - - data = {'port': {portbindings.VNIC_TYPE: - portbindings.VNIC_DIRECT}} - - req = self.new_update_request('ports', - data, port['port']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual(portbindings.VNIC_DIRECT, - res['port'][portbindings.VNIC_TYPE]) - - def test_delete_network_port_exists_owned_by_network_port_not_found(self): - """Tests that we continue to gracefully delete the network even if - a neutron:dhcp-owned port was deleted concurrently. - """ - res = self._create_network(fmt=self.fmt, name='net', - admin_state_up=True) - network = self.deserialize(self.fmt, res) - network_id = network['network']['id'] - self._create_port(self.fmt, network_id, - device_owner=constants.DEVICE_OWNER_DHCP) - # Raise PortNotFound when trying to delete the port to simulate a - # concurrent delete race; note that we actually have to delete the port - # "out of band" otherwise deleting the network will fail because of - # constraints in the data model. - plugin = directory.get_plugin() - orig_delete = plugin.delete_port - - def fake_delete_port(context, id, force_delete_dhcp=False): - # Delete the port for real from the database and then raise - # PortNotFound to simulate the race. - self.assertIsNone(orig_delete( - context, id, - force_delete_dhcp=force_delete_dhcp)) - raise n_exc.PortNotFound(port_id=id) - - p = mock.patch.object(plugin, 'delete_port') - mock_del_port = p.start() - mock_del_port.side_effect = fake_delete_port - req = self.new_delete_request('networks', network_id) - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) - - def test_create_port_sec_disabled_and_provider_rule(self): - with self.network() as network: - kwargs = {'provider_security_groups': [uuidutils.generate_uuid()], - 'port_security_enabled': False} - res = self._create_port(self.fmt, - network['network']['id'], - arg_list=('provider_security_groups', - 'port_security_enabled'), - **kwargs) - self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) - - def test_update_port_sec_disabled_and_provider_rule(self): - with self.port() as port: - with mock.patch( - PLUGIN_NAME + '._get_provider_security_groups_on_port'): - data = {'port': {'port_security_enabled': False}} - req = self.new_update_request('ports', - data, port['port']['id']) - res = self.deserialize('json', req.get_response(self.api)) - self.assertEqual("PortSecurityAndIPRequiredForSecurityGroups", - res['NeutronError']['type']) - - def test_port_add_to_spoofguard_allow_multiple_addresses(self): - # allow_multiple_addresses flag is True, first step is to check that - # when port-security-allowed is false - spoofguard policy is not - # created. - # next step is to update port-security-allowed to true - spoofguard - # policy is now created for this network. - providernet_args = {psec.PORTSECURITY: False} - cfg.CONF.set_default('allow_multiple_ip_addresses', True, 'nsxv') - res = self._create_network(fmt='json', name='net-1', - admin_state_up=True, - providernet_args=providernet_args, - arg_list=(psec.PORTSECURITY,)) - network1 = self.deserialize(self.fmt, res) - net1_id = network1['network']['id'] - with self.subnet(network=network1, cidr='10.0.0.0/24'): - # create a compute port with port security - address_pairs = [{'ip_address': '192.168.1.1'}] - device_id = _uuid() - vnic_index = 3 - compute_port_create = self._create_port( - 'json', net1_id, - arg_list=( - 'port_security_enabled', - 'device_id', - 'device_owner', - 'allowed_address_pairs',), - port_security_enabled=True, - device_id=device_id, - device_owner='compute:None', - allowed_address_pairs=address_pairs) - port = self.deserialize('json', compute_port_create) - port = self._update_port_index( - port['port']['id'], device_id, vnic_index) - # Verify the port is added to the spoofguard policy - with mock.patch.object( - self.plugin, '_update_vnic_assigned_addresses') as \ - update_approved_port: - args = {'network': {psec.PORTSECURITY: True}} - req = self.new_update_request('networks', args, net1_id, - fmt='json') - req.get_response(self.api) - # The expected vnic-id format by NsxV - update_approved_port.assert_called_once_with( - mock.ANY, mock.ANY, '%s.%03d' % (device_id, vnic_index)) - - def test_port_add_to_spoofguard_allow_multiple_addresses_fail(self): - # allow_multiple_addresses flag is True, first step is to check that - # when port-security-allowed is false - spoofguard policy is not - # created. - # next step is to update port-security-allowed to true but the port - # has CIDR defined as a address pair - action is aborted. - # policy is now created for this network. - providernet_args = {psec.PORTSECURITY: False} - cfg.CONF.set_default('allow_multiple_ip_addresses', True, 'nsxv') - res = self._create_network(fmt='json', name='net-1', - admin_state_up=True, - providernet_args=providernet_args, - arg_list=(psec.PORTSECURITY,)) - network1 = self.deserialize(self.fmt, res) - net1_id = network1['network']['id'] - with self.subnet(network=network1, cidr='10.0.0.0/24'): - # create a compute port with port security - address_pairs = [{'ip_address': '192.168.1.0/24'}] - device_id = _uuid() - vnic_index = 3 - compute_port_create = self._create_port( - 'json', net1_id, - arg_list=( - 'port_security_enabled', - 'device_id', - 'device_owner', - 'allowed_address_pairs',), - port_security_enabled=True, - device_id=device_id, - device_owner='compute:None', - allowed_address_pairs=address_pairs) - port = self.deserialize('json', compute_port_create) - port = self._update_port_index( - port['port']['id'], device_id, vnic_index) - # Action is failed due to CIDR defined in the port. - args = {'network': {psec.PORTSECURITY: True}} - plugin = directory.get_plugin() - self.assertRaises(n_exc.BadRequest, - plugin.update_network, - context.get_admin_context(), - net1_id, args) - - -class TestSubnetsV2(NsxVPluginV2TestCase, - test_plugin.TestSubnetsV2): - def setUp(self, - plugin=PLUGIN_NAME, - ext_mgr=None, - service_plugins=None): - super(TestSubnetsV2, self).setUp() - self.context = context.get_admin_context() - - def _test_subnet_update_ipv4_and_ipv6_pd_subnets(self, ra_addr_mode): - self.skipTest('No DHCP v6 Support yet') - - def test__subnet_ipv6_not_supported(self): - with self.network() as network: - data = {'subnet': {'network_id': network['network']['id'], - 'gateway': 'fe80::1', - 'cidr': '2607:f0d0:1002:51::/64', - 'ip_version': '6', - 'tenant_id': network['network']['tenant_id']}} - subnet_req = self.new_create_request('subnets', data) - res = subnet_req.get_response(self.api) - self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) - - def test_create_subnet_ipv6_gw_is_nw_start_addr(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_gw_is_nw_start_addr_canonicalize(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_gw_is_nw_end_addr(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_first_ip_owned_by_router(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_first_ip_owned_by_non_router(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_out_of_cidr_global(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_pd_gw_values(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_slaac_with_port_on_network(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): - self.skipTest('No DHCP v6 Support yet') - - def test_delete_subnet_ipv6_slaac_port_exists(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_out_of_cidr_lla(self): - self.skipTest('No DHCP v6 Support yet') - - def test_xxxa(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_only_ip_version_v6(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_subnet_ipv6_address_mode_fails(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_with_v6_allocation_pool(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_with_v6_pd_allocation_pool(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_subnet_ipv6_ra_mode_fails(self): - self.skipTest('No DHCP v6 Support yet') - - def test_delete_subnet_ipv6_slaac_router_port_exists(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_subnet_inconsistent_ipv6_gatewayv4(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_subnet_ipv6_attributes_fails(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_subnet_ipv6_cannot_disable_dhcp(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_V6_pd_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_V6_pd_stateless(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_V6_pd_statefull(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_V6_pd_no_mode(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_slaac_with_ip_already_allocated(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_dhcpv6_stateless_with_ip_already_allocated(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnets_bulk_native_ipv6(self): - self.skipTest('No DHCP v6 Support yet') - - def _create_subnet_bulk(self, fmt, number, net_id, name, - ip_version=4, **kwargs): - base_data = {'subnet': {'network_id': net_id, - 'ip_version': ip_version, - 'enable_dhcp': False, - 'tenant_id': self._tenant_id}} - - if 'ipv6_mode' in kwargs: - base_data['subnet']['ipv6_ra_mode'] = kwargs['ipv6_mode'] - base_data['subnet']['ipv6_address_mode'] = kwargs['ipv6_mode'] - # auto-generate cidrs as they should not overlap - base_cidr = "10.0.%s.0/24" - if ip_version == constants.IP_VERSION_6: - base_cidr = "fd%s::/64" - - # auto-generate cidrs as they should not overlap - overrides = dict((k, v) - for (k, v) in zip(range(number), - [{'cidr': base_cidr % num} - for num in range(number)])) - kwargs.update({'override': overrides}) - return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs) - - @with_no_dhcp_subnet - def test_create_subnet_nonzero_cidr(self): - return super(TestSubnetsV2, self).test_create_subnet_nonzero_cidr() - - def test_create_subnet_ipv6_attributes(self): - # Expected to fail for now as we don't support IPv6 for NSXv - cidr = "fe80::/80" - with testlib_api.ExpectedException( - webob.exc.HTTPClientError) as ctx_manager: - self._test_create_subnet(cidr=cidr) - self.assertEqual(ctx_manager.exception.code, 400) - - def test_create_subnet_with_different_dhcp_server(self): - self.mock_create_dhcp_service.stop() - name = 'dvs-provider-net' - providernet_args = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 43, - pnet.PHYSICAL_NETWORK: 'dvs-uuid'} - with self.network(name=name, do_delete=False, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)) as net: - self._test_create_subnet(network=net, cidr='10.0.0.0/24') - dhcp_router_id = (vcns_const.DHCP_EDGE_PREFIX + - net['network']['id'])[:36] - dhcp_server_id = nsxv_db.get_nsxv_router_binding( - self.context.session, dhcp_router_id)['edge_id'] - providernet_args_1 = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 43, - pnet.PHYSICAL_NETWORK: 'dvs-uuid-1'} - with self.network(name=name, do_delete=False, - providernet_args=providernet_args_1, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)) as net1: - self._test_create_subnet(network=net1, cidr='10.0.1.0/24') - router_id = (vcns_const.DHCP_EDGE_PREFIX + - net1['network']['id'])[:36] - dhcp_server_id_1 = nsxv_db.get_nsxv_router_binding( - self.context.session, router_id)['edge_id'] - self.assertNotEqual(dhcp_server_id, dhcp_server_id_1) - - def test_create_subnet_with_different_dhcp_by_flat_net(self): - self.mock_create_dhcp_service.stop() - name = 'flat-net' - providernet_args = {pnet.NETWORK_TYPE: 'flat', - pnet.PHYSICAL_NETWORK: 'dvs-uuid'} - with self.network(name=name, do_delete=False, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK)) as net: - self._test_create_subnet(network=net, cidr='10.0.0.0/24') - dhcp_router_id = (vcns_const.DHCP_EDGE_PREFIX + - net['network']['id'])[:36] - dhcp_server_id = nsxv_db.get_nsxv_router_binding( - self.context.session, dhcp_router_id)['edge_id'] - providernet_args_1 = {pnet.NETWORK_TYPE: 'flat', - pnet.PHYSICAL_NETWORK: 'dvs-uuid'} - with self.network(name=name, do_delete=False, - providernet_args=providernet_args_1, - arg_list=(pnet.NETWORK_TYPE, - pnet.PHYSICAL_NETWORK)) as net1: - self._test_create_subnet(network=net1, cidr='10.0.1.0/24') - router_id = (vcns_const.DHCP_EDGE_PREFIX + - net1['network']['id'])[:36] - dhcp_server_id_1 = nsxv_db.get_nsxv_router_binding( - self.context.session, router_id)['edge_id'] - self.assertNotEqual(dhcp_server_id, dhcp_server_id_1) - - def test_create_subnets_with_different_tenants_non_shared(self): - cfg.CONF.set_override('share_edges_between_tenants', False, - group="nsxv") - self.mock_create_dhcp_service.stop() - # create 2 networks with different tenants - with self.network(name='net1', tenant_id='fake1') as net1,\ - self.network(name='net2', tenant_id='fake2') as net2: - # create 2 non-overlapping subnets - self._test_create_subnet(network=net1, cidr='10.0.0.0/24') - router_id1 = (vcns_const.DHCP_EDGE_PREFIX + - net1['network']['id'])[:36] - edge1 = nsxv_db.get_nsxv_router_binding( - self.context.session, router_id1)['edge_id'] - - self._test_create_subnet(network=net2, cidr='20.0.0.0/24') - router_id2 = (vcns_const.DHCP_EDGE_PREFIX + - net2['network']['id'])[:36] - edge2 = nsxv_db.get_nsxv_router_binding( - self.context.session, router_id2)['edge_id'] - # make sure we have 2 separate dhcp edges - self.assertNotEqual(edge1, edge2) - - def test_create_subnets_with_different_tenants_shared(self): - cfg.CONF.set_override('share_edges_between_tenants', True, - group="nsxv") - self.mock_create_dhcp_service.stop() - # create 2 networks with different tenants - with self.network(name='net1', tenant_id='fake1') as net1,\ - self.network(name='net2', tenant_id='fake2') as net2: - # create 2 non-overlapping subnets - self._test_create_subnet(network=net1, cidr='10.0.0.0/24') - router_id1 = (vcns_const.DHCP_EDGE_PREFIX + - net1['network']['id'])[:36] - edge1 = nsxv_db.get_nsxv_router_binding( - self.context.session, router_id1)['edge_id'] - - self._test_create_subnet(network=net2, cidr='20.0.0.0/24') - router_id2 = (vcns_const.DHCP_EDGE_PREFIX + - net2['network']['id'])[:36] - edge2 = nsxv_db.get_nsxv_router_binding( - self.context.session, router_id2)['edge_id'] - # make sure we have both networks on the same dhcp edges - self.assertEqual(edge1, edge2) - - def test_create_subnet_ipv6_slaac_with_db_reference_error(self): - self.skipTest('Currently not supported') - - def test_create_subnet_ipv6_slaac_with_port_not_found(self): - self.skipTest('Currently not supported') - - def test_bulk_create_subnet_ipv6_auto_addr_with_port_on_network(self): - self.skipTest('Currently not supported') - - def test_create_subnet_ipv6_gw_values(self): - # This test should fail with response code 400 as IPv6 subnets with - # DHCP are not supported by this plugin - with testlib_api.ExpectedException( - webob.exc.HTTPClientError) as ctx_manager: - super(TestSubnetsV2, self).test_create_subnet_ipv6_gw_values() - self.assertEqual(ctx_manager.exception.code, 400) - - def test_create_subnet_only_ip_version_v6_old(self): - self.skipTest('Currently not supported') - - def test_create_subnet_reserved_network(self): - self.mock_create_dhcp_service.stop() - name = 'overlap-reserved-net' - providernet_args = {pnet.NETWORK_TYPE: 'flat', - pnet.PHYSICAL_NETWORK: 'dvs-uuid'} - with testlib_api.ExpectedException( - webob.exc.HTTPClientError) as ctx_manager: - with self.network(name=name, do_delete=False, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)) as net: - self._test_create_subnet(network=net, - cidr='169.254.128.128/25') - self.assertEqual(ctx_manager.exception.code, 400) - - def test_cannot_delete_md_subnet(self): - if self.internal_net_id: - query_params = "network_id=%s" % self.internal_net_id - res = self._list('subnets', - neutron_context=self.context, - query_params=query_params) - internal_sub = res['subnets'][0]['id'] - req = self.new_delete_request('subnets', internal_sub) - net_del_res = req.get_response(self.api) - self.assertEqual(net_del_res.status_int, 400) - - -class TestSubnetPoolsV2(NsxVPluginV2TestCase, test_plugin.TestSubnetsV2): - def setUp(self, - plugin=PLUGIN_NAME, - ext_mgr=None, - service_plugins=None): - super(TestSubnetPoolsV2, self).setUp() - self.context = context.get_admin_context() - - def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): - self.skipTest('No DHCP v6 Support yet') - - def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_gw_is_nw_start_addr(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_gw_is_nw_start_addr_canonicalize(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_gw_is_nw_end_addr(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_first_ip_owned_by_router(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_first_ip_owned_by_non_router(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_out_of_cidr_global(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_V6_pd_stateless(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_V6_pd_slaac(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_ipv6_slaac_with_ip_already_allocated(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_dhcpv6_stateless_with_ip_already_allocated(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): - self.skipTest('Not supported') - - def test_create_subnet_ipv6_gw_values(self): - self.skipTest('Not supported') - - def test_create_subnet_ipv6_out_of_cidr_lla(self): - self.skipTest('Not supported') - - def test_create_subnet_ipv6_pd_gw_values(self): - self.skipTest('Not supported') - - def test_create_subnet_ipv6_slaac_with_db_reference_error(self): - self.skipTest('Not supported') - - def test_create_subnet_ipv6_slaac_with_port_not_found(self): - self.skipTest('Not supported') - - def test_bulk_create_subnet_ipv6_auto_addr_with_port_on_network(self): - self.skipTest('Currently not supported') - - def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): - self.skipTest('Not supported') - - def test_create_subnet_ipv6_slaac_with_port_on_network(self): - self.skipTest('Not supported') - - def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): - self.skipTest('Not supported') - - def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): - self.skipTest('Not supported') - - def test_create_subnet_only_ip_version_v6(self): - self.skipTest('Not supported') - - def test_create_subnet_with_v6_allocation_pool(self): - self.skipTest('Not supported') - - def test_create_subnet_with_v6_pd_allocation_pool(self): - self.skipTest('Not supported') - - def test_delete_subnet_ipv6_slaac_port_exists(self): - self.skipTest('Not supported') - - def test_delete_subnet_ipv6_slaac_router_port_exists(self): - self.skipTest('Not supported') - - def test_update_subnet_inconsistent_ipv6_gatewayv4(self): - self.skipTest('Not supported') - - def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): - self.skipTest('Not supported') - - def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): - self.skipTest('Not supported') - - def test_update_subnet_ipv6_address_mode_fails(self): - self.skipTest('Not supported') - - def test_update_subnet_ipv6_attributes_fails(self): - self.skipTest('Not supported') - - def test_update_subnet_ipv6_cannot_disable_dhcp(self): - self.skipTest('Not supported') - - def test_update_subnet_ipv6_ra_mode_fails(self): - self.skipTest('Not supported') - - def test_create_subnet_only_ip_version_v6_old(self): - self.skipTest('Currently not supported') - - def test_create_subnets_bulk_native_ipv6(self): - self.skipTest('No DHCP v6 Support yet') - - -class TestBasicGet(test_plugin.TestBasicGet, NsxVPluginV2TestCase): - pass - - -class TestV2HTTPResponse(test_plugin.TestV2HTTPResponse, NsxVPluginV2TestCase): - pass - - -class TestL3ExtensionManager(object): - - def get_resources(self): - # Simulate extension of L3 attribute map - l3.L3().update_attributes_map( - l3_egm_apidef.RESOURCE_ATTRIBUTE_MAP) - l3.L3().update_attributes_map( - dvr_apidef.RESOURCE_ATTRIBUTE_MAP) - l3.L3().update_attributes_map( - router_type.EXTENDED_ATTRIBUTES_2_0) - l3.L3().update_attributes_map( - router_size.EXTENDED_ATTRIBUTES_2_0) - l3.L3().update_attributes_map( - raz_apidef.RESOURCE_ATTRIBUTE_MAP) - l3.L3().update_attributes_map( - l3fav_apidef.RESOURCE_ATTRIBUTE_MAP) - return (l3.L3.get_resources() + - address_scope.Address_scope.get_resources()) - - def get_actions(self): - return [] - - def get_request_extensions(self): - return [] - - -class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxVPluginV2TestCase): - - def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): - cfg.CONF.set_override('task_status_check_interval', 200, group="nsxv") - - cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) - ext_mgr = ext_mgr or TestL3ExtensionManager() - super(L3NatTest, self).setUp( - plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) - self.plugin_instance = directory.get_plugin() - self._plugin_name = "%s.%s" % ( - self.plugin_instance.__module__, - self.plugin_instance.__class__.__name__) - self._plugin_class = self.plugin_instance.__class__ - - def tearDown(self): - plugin = directory.get_plugin() - _manager = plugin.nsx_v.task_manager - # wait max ~10 seconds for all tasks to be finished - for i in range(100): - if not _manager.has_pending_task(): - break - greenthread.sleep(0.1) - if _manager.has_pending_task(): - _manager.show_pending_tasks() - raise Exception(_("Tasks not completed")) - _manager.stop() - # Ensure the manager thread has been stopped - self.assertIsNone(_manager._thread) - super(L3NatTest, self).tearDown() - - def _create_l3_ext_network(self, vlan_id=None): - name = 'l3_ext_net' - return self.network(name=name, - router__external=True) - - def _create_router(self, fmt, tenant_id, name=None, - admin_state_up=None, set_context=False, - arg_list=None, **kwargs): - tenant_id = tenant_id or _uuid() - data = {'router': {'tenant_id': tenant_id}} - if name: - data['router']['name'] = name - if admin_state_up: - data['router']['admin_state_up'] = admin_state_up - for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): - # Arg must be present and not empty - if kwargs.get(arg): - data['router'][arg] = kwargs[arg] - router_req = self.new_create_request('routers', data, fmt) - if set_context and tenant_id: - # create a specific auth context for this request - router_req.environ['neutron.context'] = context.Context( - '', tenant_id) - - return router_req.get_response(self.ext_api) - - def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None, - external_gateway_info=None, set_context=False, - arg_list=None, **kwargs): - if external_gateway_info: - arg_list = ('external_gateway_info', ) + (arg_list or ()) - res = self._create_router(fmt, tenant_id, name, - admin_state_up, set_context, - arg_list=arg_list, - external_gateway_info=external_gateway_info, - **kwargs) - return self.deserialize(fmt, res) - - @contextlib.contextmanager - def router(self, name=None, admin_state_up=True, - fmt=None, tenant_id=None, - external_gateway_info=None, set_context=False, - **kwargs): - # avoid name duplication of edge - if not name: - name = _uuid() - router = self._make_router(fmt or self.fmt, tenant_id, name, - admin_state_up, external_gateway_info, - set_context, **kwargs) - yield router - - def _recursive_sort_list(self, lst): - sorted_list = [] - for ele in lst: - if isinstance(ele, list): - sorted_list.append(self._recursive_sort_list(ele)) - elif isinstance(ele, dict): - sorted_list.append(self._recursive_sort_dict(ele)) - else: - sorted_list.append(ele) - return sorted(sorted_list, key=helpers.safe_sort_key) - - def _recursive_sort_dict(self, dct): - sorted_dict = {} - for k, v in dct.items(): - if isinstance(v, list): - sorted_dict[k] = self._recursive_sort_list(v) - elif isinstance(v, dict): - sorted_dict[k] = self._recursive_sort_dict(v) - else: - sorted_dict[k] = v - return sorted_dict - - def _update_router_enable_snat(self, router_id, network_id, enable_snat): - return self._update('routers', router_id, - {'router': {'external_gateway_info': - {'network_id': network_id, - 'enable_snat': enable_snat}}}) - - def test_floatingip_association_on_unowned_router(self): - self.skipTest("Currently no support in plugin for this") - - def test_router_add_gateway_no_subnet(self): - self.skipTest('No support for no subnet gateway set') - - def test_floatingip_create_different_fixed_ip_same_port(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def test_router_add_interface_multiple_ipv6_subnet_port(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def test_floatingip_update_different_fixed_ip_same_port(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def test_create_multiple_floatingips_same_fixed_ip_same_port(self): - self.skipTest('Multiple fixed ips on a port are not supported') - - def _set_net_external(self, net_id): - self._update('networks', net_id, - {'network': {extnet_apidef.EXTERNAL: True}}) - - def _add_external_gateway_to_router(self, router_id, network_id, - expected_code=webob.exc.HTTPOk.code, - neutron_context=None, ext_ips=None): - ext_ips = ext_ips or [] - body = {'router': - {'external_gateway_info': {'network_id': network_id}}} - if ext_ips: - body['router']['external_gateway_info'][ - 'external_fixed_ips'] = ext_ips - return self._update('routers', router_id, body, - expected_code=expected_code, - neutron_context=neutron_context) - - def test_router_add_gateway_no_subnet_forbidden(self): - with self.router() as r: - with self.network() as n: - self._set_net_external(n['network']['id']) - self._add_external_gateway_to_router( - r['router']['id'], n['network']['id'], - expected_code=webob.exc.HTTPBadRequest.code) - - -class L3NatTestCaseBase(test_l3_plugin.L3NatTestCaseMixin): - - def setUp(self, **kwargs): - super(L3NatTestCaseBase, self).setUp(**kwargs) - mock.patch.object(self.plugin, '_get_firewall_icmpv6_rules', - return_value=[]).start() - - def test_create_floatingip_with_specific_ip(self): - with self.subnet(cidr='10.0.0.0/24', - enable_dhcp=False) as s: - network_id = s['subnet']['network_id'] - self._set_net_external(network_id) - fp = self._make_floatingip(self.fmt, network_id, - floating_ip='10.0.0.10') - self.assertEqual('10.0.0.10', - fp['floatingip']['floating_ip_address']) - - def test_floatingip_same_external_and_internal(self): - # Select router with subnet's gateway_ip for floatingip when - # routers connected to same subnet and external network. - with self.subnet(cidr="10.0.0.0/24", enable_dhcp=False) as exs,\ - self.subnet(cidr="12.0.0.0/24", - gateway_ip="12.0.0.50", - enable_dhcp=False) as ins: - network_ex_id = exs['subnet']['network_id'] - self._set_net_external(network_ex_id) - - r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] - with self.router() as r1,\ - self.router() as r2,\ - self.port(subnet=ins, - fixed_ips=r2i_fixed_ips) as r2i_port: - self._add_external_gateway_to_router( - r1['router']['id'], - network_ex_id) - self._router_interface_action('add', r2['router']['id'], - None, - r2i_port['port']['id']) - self._router_interface_action('add', r1['router']['id'], - ins['subnet']['id'], - None) - self._add_external_gateway_to_router( - r2['router']['id'], - network_ex_id) - - with self.port(subnet=ins, - fixed_ips=[{'ip_address': '12.0.0.8'}] - ) as private_port: - - fp = self._make_floatingip(self.fmt, network_ex_id, - private_port['port']['id']) - self.assertEqual(r1['router']['id'], - fp['floatingip']['router_id']) - - def test_floatingip_multi_external_one_internal(self): - with self.subnet(cidr="10.0.0.0/24", enable_dhcp=False) as exs1,\ - self.subnet(cidr="11.0.0.0/24", enable_dhcp=False) as exs2,\ - self.subnet(cidr="12.0.0.0/24", enable_dhcp=False) as ins1: - network_ex_id1 = exs1['subnet']['network_id'] - network_ex_id2 = exs2['subnet']['network_id'] - self._set_net_external(network_ex_id1) - self._set_net_external(network_ex_id2) - - r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] - with self.router() as r1,\ - self.router() as r2,\ - self.port(subnet=ins1, - fixed_ips=r2i_fixed_ips) as r2i_port: - self._add_external_gateway_to_router( - r1['router']['id'], - network_ex_id1) - self._router_interface_action('add', r1['router']['id'], - ins1['subnet']['id'], - None) - self._add_external_gateway_to_router( - r2['router']['id'], - network_ex_id2) - self._router_interface_action('add', r2['router']['id'], - None, - r2i_port['port']['id']) - - with self.port(subnet=ins1, - fixed_ips=[{'ip_address': '12.0.0.3'}] - ) as private_port: - - fp1 = self._make_floatingip(self.fmt, network_ex_id1, - private_port['port']['id']) - fp2 = self._make_floatingip(self.fmt, network_ex_id2, - private_port['port']['id']) - self.assertEqual(fp1['floatingip']['router_id'], - r1['router']['id']) - self.assertEqual(fp2['floatingip']['router_id'], - r2['router']['id']) - - def _get_md_proxy_fw_rules(self): - if not self.with_md_proxy: - return [] - return md_proxy.get_router_fw_rules() - - @mock.patch.object(edge_utils, "update_firewall") - def test_router_set_gateway_with_nosnat(self, mock): - expected_fw = [{'action': 'allow', - 'enabled': True, - 'name': 'Subnet Rule', - 'source_ip_address': [], - 'destination_ip_address': []} - ] + self._get_md_proxy_fw_rules() - nosnat_fw = [{'action': 'allow', - 'enabled': True, - 'name': 'No SNAT Rule', - 'source_vnic_groups': ["external"], - 'destination_ip_address': []}] - - with self.router() as r1,\ - self.subnet() as ext_subnet,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(cidr='12.0.0.0/24') as s2: - self._set_net_external(ext_subnet['subnet']['network_id']) - - self._router_interface_action( - 'add', r1['router']['id'], - s1['subnet']['id'], None) - expected_fw[0]['source_ip_address'] = ['11.0.0.0/24'] - expected_fw[0]['destination_ip_address'] = ['11.0.0.0/24'] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual(self._recursive_sort_list(expected_fw), - self._recursive_sort_list(fw_rules)) - self._add_external_gateway_to_router( - r1['router']['id'], - ext_subnet['subnet']['network_id']) - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual(self._recursive_sort_list(expected_fw), - self._recursive_sort_list(fw_rules)) - self._update_router_enable_snat( - r1['router']['id'], - ext_subnet['subnet']['network_id'], - False) - nosnat_fw[0]['destination_ip_address'] = ['11.0.0.0/24'] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list(expected_fw + nosnat_fw), - self._recursive_sort_list(fw_rules)) - self._router_interface_action('add', - r1['router']['id'], - s2['subnet']['id'], - None) - expected_fw[0]['source_ip_address'] = ['12.0.0.0/24', - '11.0.0.0/24'] - expected_fw[0]['destination_ip_address'] = ['12.0.0.0/24', - '11.0.0.0/24'] - nosnat_fw[0]['destination_ip_address'] = ['11.0.0.0/24', - '12.0.0.0/24'] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list(expected_fw + nosnat_fw), - self._recursive_sort_list(fw_rules)) - self._router_interface_action('remove', - r1['router']['id'], - s1['subnet']['id'], - None) - expected_fw[0]['source_ip_address'] = ['12.0.0.0/24'] - expected_fw[0]['destination_ip_address'] = ['12.0.0.0/24'] - nosnat_fw[0]['destination_ip_address'] = ['12.0.0.0/24'] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list(expected_fw + nosnat_fw), - self._recursive_sort_list(fw_rules)) - self._update_router_enable_snat( - r1['router']['id'], - ext_subnet['subnet']['network_id'], - True) - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list(expected_fw), - self._recursive_sort_list(fw_rules)) - self._router_interface_action('remove', - r1['router']['id'], - s2['subnet']['id'], - None) - self._remove_external_gateway_from_router( - r1['router']['id'], - ext_subnet['subnet']['network_id']) - - def test_router_add_interface_port_bad_tenant_returns_404(self): - self.skipTest('TBD') - - def test_router_add_interface_subnet_with_bad_tenant_returns_404(self): - self.skipTest('TBD') - - def test__notify_gateway_port_ip_changed(self): - self.skipTest('not supported') - - def test_router_add_interface_multiple_ipv6_subnets_same_net(self): - """Test router-interface-add for multiple ipv6 subnets on a network. - - Verify that adding multiple ipv6 subnets from the same network - to a router places them all on the same router interface. - """ - with self.router() as r, self.network() as n: - with self.subnet( - network=n, cidr='fd00::1/64', - enable_dhcp=False, ip_version=6) as s1, self.subnet( - network=n, cidr='fd01::1/64', - ip_version=6, enable_dhcp=False) as s2: - - body = self._router_interface_action('add', - r['router']['id'], - s1['subnet']['id'], - None) - pid1 = body['port_id'] - body = self._router_interface_action('add', - r['router']['id'], - s2['subnet']['id'], - None) - pid2 = body['port_id'] - self.assertEqual(pid1, pid2) - port = self._show('ports', pid1) - self.assertEqual(2, len(port['port']['fixed_ips'])) - port_subnet_ids = [fip['subnet_id'] for fip in - port['port']['fixed_ips']] - self.assertIn(s1['subnet']['id'], port_subnet_ids) - self.assertIn(s2['subnet']['id'], port_subnet_ids) - self._router_interface_action('remove', r['router']['id'], - s1['subnet']['id'], None) - self._router_interface_action('remove', r['router']['id'], - s2['subnet']['id'], None) - - def test_router_add_interface_ipv6_port_existing_network_returns_400(self): - """Ensure unique IPv6 router ports per network id. - Adding a router port containing one or more IPv6 subnets with the same - network id as an existing router port should fail. This is so - there is no ambiguity regarding on which port to add an IPv6 subnet - when executing router-interface-add with a subnet and no port. - """ - with self.network() as n, self.router() as r: - with self.subnet(network=n, cidr='fd00::/64', - ip_version=6, enable_dhcp=False) as s1, ( - self.subnet(network=n, cidr='fd01::/64', - ip_version=6, enable_dhcp=False)) as s2: - with self.port(subnet=s1) as p: - self._router_interface_action('add', - r['router']['id'], - s2['subnet']['id'], - None) - exp_code = webob.exc.HTTPBadRequest.code - self._router_interface_action('add', - r['router']['id'], - None, - p['port']['id'], - expected_code=exp_code) - self._router_interface_action('remove', - r['router']['id'], - s2['subnet']['id'], - None) - - def test_subnet_dhcp_metadata_with_update(self): - self.plugin_instance.metadata_proxy_handler = mock.Mock() - with self.subnet(cidr="10.0.0.0/24", enable_dhcp=True) as s1: - subnet_id = s1['subnet']['id'] - is_dhcp_meta = self.plugin_instance.is_dhcp_metadata( - context.get_admin_context(), subnet_id) - self.assertTrue(is_dhcp_meta) - port_data = {'port': {'tenant_id': s1['subnet']['tenant_id'], - 'network_id': s1['subnet']['network_id'], - 'device_owner': 'compute:None'}} - req = self.new_create_request( - 'ports', port_data).get_response(self.api) - port_req = self.deserialize(self.fmt, req) - subnet_data = {'subnet': {'enable_dhcp': False}} - self.new_update_request( - 'subnets', subnet_data, - s1['subnet']['id']).get_response(self.api) - is_dhcp_meta = self.plugin_instance.is_dhcp_metadata( - context.get_admin_context(), subnet_id) - self.assertFalse(is_dhcp_meta) - self.new_delete_request('ports', port_req['port']['id']) - - def test_router_add_gateway_notifications(self): - with self.router() as r,\ - self._create_l3_ext_network() as ext_net,\ - self.subnet(network=ext_net): - with mock.patch.object(registry, 'publish') as publish: - self._add_external_gateway_to_router( - r['router']['id'], ext_net['network']['id']) - expected = [mock.call( - resources.ROUTER_GATEWAY, - events.AFTER_CREATE, mock.ANY, - payload=mock.ANY)] - publish.assert_has_calls(expected) - - def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self): - self.skipTest('No DHCP v6 Support yet') - - def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): - self.skipTest('No DHCP v6 Support yet') - - def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): - self.skipTest('No DHCP v6 Support yet') - - def test_router_remove_ipv6_subnet_from_interface(self): - self.skipTest('No DHCP v6 Support yet') - - def test_router_update_gateway_add_multiple_prefixes_ipv6(self): - self.skipTest('No DHCP v6 Support yet') - - def test_router_concurrent_delete_upon_subnet_create(self): - self.skipTest('No DHCP v6 Support yet') - - def test_router_update_gateway_upon_subnet_create_ipv6(self): - self.skipTest('No DHCP v6 Support yet') - - def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): - self.skipTest('No DHCP v6 Support yet') - - def test_floatingip_via_router_interface_returns_201(self): - self.skipTest('not supported') - - def test_floatingip_via_router_interface_returns_404(self): - self.skipTest('not supported') - - def test_floatingip_update_subnet_gateway_disabled(self): - self.skipTest('not supported') - - -class IPv6ExpectedFailuresTestMixin(object): - - def test_router_add_interface_ipv6_subnet(self): - self.skipTest('Not supported') - - def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): - # This returns a 400 too, but as an exception is raised the response - # code need to be asserted differently - with testlib_api.ExpectedException( - webob.exc.HTTPClientError) as ctx_manager: - super(IPv6ExpectedFailuresTestMixin, self).\ - test_router_add_iface_ipv6_ext_ra_subnet_returns_400() - self.assertEqual(ctx_manager.exception.code, 400) - - def test_router_add_gateway_multiple_subnets_ipv6(self): - self.skipTest('not supported') - - -class TestExclusiveRouterTestCase(L3NatTest, L3NatTestCaseBase, - test_l3_plugin.L3NatDBIntTestCase, - IPv6ExpectedFailuresTestMixin, - NsxVPluginV2TestCase, - test_address_scope.AddressScopeTestCase): - - def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): - super(TestExclusiveRouterTestCase, self).setUp( - plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) - self.plugin_instance.nsx_v.is_subnet_in_use = mock.Mock() - self.plugin_instance.nsx_v.is_subnet_in_use.return_value = False - self._default_tenant_id = self._tenant_id - self._router_tenant_id = 'test-router-tenant' - - def _create_router(self, fmt, tenant_id, name=None, - admin_state_up=None, set_context=False, - arg_list=None, **kwargs): - tenant_id = tenant_id or _uuid() - data = {'router': {'tenant_id': tenant_id}} - if name: - data['router']['name'] = name - if admin_state_up: - data['router']['admin_state_up'] = admin_state_up - for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): - # Arg must be present and not empty - if arg in kwargs and kwargs[arg]: - data['router'][arg] = kwargs[arg] - - data['router']['router_type'] = kwargs.get('router_type', 'exclusive') - - router_req = self.new_create_request('routers', data, fmt) - if set_context and tenant_id: - # create a specific auth context for this request - router_req.environ['neutron.context'] = context.Context( - '', tenant_id) - - return router_req.get_response(self.ext_api) - - def _test_create_l3_ext_network(self, vlan_id=0): - name = 'l3_ext_net' - expected = [('subnets', []), ('name', name), ('admin_state_up', True), - ('status', 'ACTIVE'), ('shared', False), - (extnet_apidef.EXTERNAL, True)] - with self._create_l3_ext_network(vlan_id) as net: - for k, v in expected: - self.assertEqual(net['network'][k], v) - - def test_create_router_fail_at_the_backend(self): - p = directory.get_plugin() - edge_manager = p.edge_manager - with mock.patch.object(edge_manager, 'create_lrouter', - side_effect=[n_exc.NeutronException]): - router = {'router': {'admin_state_up': True, - 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', - 'tenant_id': 'fake_tenant', - 'router_type': 'exclusive'}} - self.assertRaises(n_exc.NeutronException, - p.create_router, - context.get_admin_context(), - router) - self._test_list_resources('router', ()) - - def test_create_l3_ext_network_with_dhcp(self): - with self._create_l3_ext_network() as net: - with testlib_api.ExpectedException( - webob.exc.HTTPClientError) as ctx_manager: - with self.subnet(network=net, enable_dhcp=True): - self.assertEqual(ctx_manager.exception.code, 400) - - def test_create_l3_ext_network_without_vlan(self): - self._test_create_l3_ext_network() - - def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None, - validate_ext_gw=False, - router_ctx=None): - tenant_id = self._router_tenant_id if router_ctx else self._tenant_id - with self._create_l3_ext_network(vlan_id) as net: - with self.subnet(network=net, enable_dhcp=False) as s: - data = {'router': {'tenant_id': tenant_id}} - data['router']['name'] = 'router1' - data['router']['external_gateway_info'] = { - 'network_id': s['subnet']['network_id']} - router_req = self.new_create_request( - 'routers', data, self.fmt, context=router_ctx) - res = router_req.get_response(self.ext_api) - router = self.deserialize(self.fmt, res) - self.assertEqual( - s['subnet']['network_id'], - (router['router']['external_gateway_info'] - ['network_id'])) - if validate_ext_gw: - pass - - def test_router_create_with_gwinfo_and_l3_ext_net(self): - self._test_router_create_with_gwinfo_and_l3_ext_net() - - def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self): - self._test_router_create_with_gwinfo_and_l3_ext_net(444) - - def test_router_create_with_gwinfo_and_l3_ext_net_with_non_admin(self): - ctx = context.Context(user_id=None, - tenant_id=self._router_tenant_id, - is_admin=False) - self._test_router_create_with_gwinfo_and_l3_ext_net(router_ctx=ctx) - - def test_router_create_with_different_sizes(self): - data = {'router': { - 'tenant_id': 'whatever', - 'name': 'test_router', - 'router_type': 'exclusive'}} - for size in ['compact', 'large', 'xlarge', 'quadlarge']: - data['router']['router_size'] = size - router_req = self.new_create_request('routers', data, self.fmt) - res = router_req.get_response(self.ext_api) - router = self.deserialize(self.fmt, res) - self.assertEqual(size, router['router']['router_size']) - - def test_router_create_overriding_default_edge_size(self): - data = {'router': { - 'tenant_id': 'whatever', - 'name': 'test_router', - 'router_type': 'exclusive'}} - cfg.CONF.set_override('exclusive_router_appliance_size', - 'xlarge', group='nsxv') - router_req = self.new_create_request('routers', data, self.fmt) - res = router_req.get_response(self.ext_api) - router = self.deserialize(self.fmt, res) - self.assertEqual('xlarge', router['router']['router_size']) - - def test_router_add_gateway_invalid_network_returns_404(self): - # NOTE(salv-orlando): This unit test has been overridden - # as the nsx plugin support the ext_gw_mode extension - # which mandates an uuid for the external network identifier - with self.router() as r: - self._add_external_gateway_to_router( - r['router']['id'], - uuidutils.generate_uuid(), - expected_code=webob.exc.HTTPNotFound.code) - - def test_router_rename(self): - with self.router(name='old_name') as r: - with mock.patch.object(edge_appliance_driver.EdgeApplianceDriver, - 'rename_edge') as edge_rename: - new_name = 'new_name' - router_id = r['router']['id'] - # get the edge of this router - plugin = directory.get_plugin() - router_obj = ex_router_driver.RouterExclusiveDriver(plugin) - ctx = context.get_admin_context() - edge_id = router_obj._get_edge_id_or_raise(ctx, router_id) - - # update the name - - body = self._update('routers', router_id, - {'router': {'name': new_name}}) - self.assertEqual(new_name, body['router']['name']) - edge_rename.assert_called_once_with( - edge_id, - new_name + '-' + router_id) - - def test_router_resize(self): - with self.router() as r: - with mock.patch.object(edge_appliance_driver.EdgeApplianceDriver, - 'resize_edge') as edge_resize: - new_size = 'large' - router_id = r['router']['id'] - # get the edge of this router - plugin = directory.get_plugin() - router_obj = ex_router_driver.RouterExclusiveDriver(plugin) - ctx = context.get_admin_context() - edge_id = router_obj._get_edge_id_or_raise(ctx, router_id) - - # update the router size - body = self._update('routers', router_id, - {'router': {'router_size': new_size}}) - self.assertEqual(new_size, body['router']['router_size']) - edge_resize.assert_called_once_with(edge_id, new_size) - - def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None, - validate_ext_gw=False, - distributed=False, - router_ctx=None): - if router_ctx: - self._tenant_id = self._router_tenant_id - with self.router( - arg_list=('distributed',), distributed=distributed, - set_context=True, - tenant_id=self._tenant_id) as r: - self._tenant_id = self._default_tenant_id - with self.subnet() as s1: - with self._create_l3_ext_network(vlan_id) as net: - with self.subnet(network=net, enable_dhcp=False) as s2: - self._set_net_external(s1['subnet']['network_id']) - try: - self._add_external_gateway_to_router( - r['router']['id'], - s1['subnet']['network_id'], - neutron_context=router_ctx) - body = self._show('routers', r['router']['id']) - net_id = (body['router'] - ['external_gateway_info']['network_id']) - self.assertEqual(net_id, - s1['subnet']['network_id']) - # Plug network with external mapping - self._set_net_external(s2['subnet']['network_id']) - self._add_external_gateway_to_router( - r['router']['id'], - s2['subnet']['network_id'], - neutron_context=router_ctx) - body = self._show('routers', r['router']['id']) - net_id = (body['router'] - ['external_gateway_info']['network_id']) - self.assertEqual(net_id, - s2['subnet']['network_id']) - if validate_ext_gw: - pass - finally: - # Cleanup - self._remove_external_gateway_from_router( - r['router']['id'], - s2['subnet']['network_id']) - - def test_router_update_gateway_on_l3_ext_net(self): - self._test_router_update_gateway_on_l3_ext_net() - - def test_router_update_gateway_on_l3_ext_net_with_non_admin(self): - ctx = context.Context(user_id=None, - tenant_id=self._router_tenant_id, - is_admin=False) - self._test_router_update_gateway_on_l3_ext_net(router_ctx=ctx) - - def test_router_update_gateway_on_l3_ext_net_with_vlan(self): - self._test_router_update_gateway_on_l3_ext_net(444) - - def test_router_update_gateway_with_existing_floatingip(self): - with self._create_l3_ext_network() as net: - with self.subnet(network=net, enable_dhcp=False) as subnet: - with self.floatingip_with_assoc() as fip: - self._add_external_gateway_to_router( - fip['floatingip']['router_id'], - subnet['subnet']['network_id'], - expected_code=webob.exc.HTTPConflict.code) - - def test_router_list_by_tenant_id(self): - with self.router(), self.router(): - with self.router(tenant_id='custom') as router: - self._test_list_resources('router', [router], - query_params="tenant_id=custom") - - def test_create_l3_ext_network_with_vlan(self): - self._test_create_l3_ext_network(666) - - def test_floatingip_with_assoc_fails(self): - self._test_floatingip_with_assoc_fails( - self._plugin_name + '._check_and_get_fip_assoc') - - def test_floatingip_with_invalid_create_port(self): - self._test_floatingip_with_invalid_create_port(self._plugin_name) - - def test_floatingip_update(self): - super(TestExclusiveRouterTestCase, self).test_floatingip_update( - constants.FLOATINGIP_STATUS_DOWN) - - def test_floating_ip_no_snat(self): - """Cannot add floating ips to a router with disabled snat""" - with self.router() as r1,\ - self.subnet() as ext_subnet,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.port(subnet=s1) as private_port: - # Add interfaces to the router - self._router_interface_action( - 'add', r1['router']['id'], - s1['subnet']['id'], None) - self._set_net_external(ext_subnet['subnet']['network_id']) - self._add_external_gateway_to_router( - r1['router']['id'], - ext_subnet['subnet']['network_id']) - # disable snat - self._update_router_enable_snat( - r1['router']['id'], - ext_subnet['subnet']['network_id'], - False) - # create a floating ip and associate it to the router should fail - self.assertRaises( - object, - self._make_floatingip, - self.fmt, ext_subnet['subnet']['network_id'], - private_port['port']['id']) - - # now enable snat and try again - self._update_router_enable_snat( - r1['router']['id'], - ext_subnet['subnet']['network_id'], - True) - self._make_floatingip( - self.fmt, ext_subnet['subnet']['network_id'], - private_port['port']['id']) - - # now shouldn't be able to disable snat - self.assertRaises( - object, - self._update_router_enable_snat, - r1['router']['id'], - ext_subnet['subnet']['network_id'], - False) - - def test_floatingip_disassociate(self): - with self.port() as p: - private_sub = {'subnet': {'id': - p['port']['fixed_ips'][0]['subnet_id']}} - with self.floatingip_no_assoc(private_sub) as fip: - self.assertEqual(fip['floatingip']['status'], - constants.FLOATINGIP_STATUS_DOWN) - port_id = p['port']['id'] - body = self._update('floatingips', fip['floatingip']['id'], - {'floatingip': {'port_id': port_id}}) - self.assertEqual(body['floatingip']['port_id'], port_id) - self.assertEqual(body['floatingip']['status'], - constants.FLOATINGIP_STATUS_ACTIVE) - # Disassociate - body = self._update('floatingips', fip['floatingip']['id'], - {'floatingip': {'port_id': None}}) - body = self._show('floatingips', fip['floatingip']['id']) - self.assertIsNone(body['floatingip']['port_id']) - self.assertIsNone(body['floatingip']['fixed_ip_address']) - self.assertEqual(body['floatingip']['status'], - constants.FLOATINGIP_STATUS_DOWN) - - def test_update_floatingip_with_edge_router_update_failure(self): - p = directory.get_plugin() - with self.subnet() as subnet,\ - self.port(subnet=subnet) as p1,\ - self.port(subnet=subnet) as p2: - p1_id = p1['port']['id'] - p2_id = p2['port']['id'] - with self.floatingip_with_assoc(port_id=p1_id) as fip: - with self._mock_edge_router_update_with_exception(): - self.assertRaises(object, - p.update_floatingip, - context.get_admin_context(), - fip['floatingip']['id'], - floatingip={'floatingip': - {'port_id': p2_id}}) - res = self._list( - 'floatingips', query_params="port_id=%s" % p1_id) - self.assertEqual(len(res['floatingips']), 1) - res = self._list( - 'floatingips', query_params="port_id=%s" % p2_id) - self.assertEqual(len(res['floatingips']), 0) - - def test_create_floatingip_with_edge_router_update_failure(self): - p = directory.get_plugin() - with self.subnet(cidr='200.0.0.0/24') as public_sub: - public_network_id = public_sub['subnet']['network_id'] - self._set_net_external(public_network_id) - with self.port() as private_port: - port_id = private_port['port']['id'] - tenant_id = private_port['port']['tenant_id'] - subnet_id = private_port['port']['fixed_ips'][0]['subnet_id'] - with self.router() as r: - self._add_external_gateway_to_router( - r['router']['id'], - public_sub['subnet']['network_id']) - self._router_interface_action('add', - r['router']['id'], - subnet_id, - None) - floatingip = {'floatingip': { - 'tenant_id': tenant_id, - 'floating_network_id': public_network_id, - 'port_id': port_id}} - - with self._mock_edge_router_update_with_exception(): - self.assertRaises(object, - p.create_floatingip, - context.get_admin_context(), - floatingip=floatingip) - res = self._list( - 'floatingips', query_params="port_id=%s" % port_id) - self.assertEqual(len(res['floatingips']), 0) - # Cleanup - self._router_interface_action('remove', - r['router']['id'], - subnet_id, - None) - self._remove_external_gateway_from_router( - r['router']['id'], public_network_id) - - @contextlib.contextmanager - def _mock_edge_router_update_with_exception(self): - nsx_router_update = PLUGIN_NAME + '._update_edge_router' - with mock.patch(nsx_router_update) as update_edge: - update_edge.side_effect = object() - yield update_edge - - @mock.patch.object(edge_utils, "update_firewall") - def test_router_interfaces_with_update_firewall(self, mock): - s1_cidr = '10.0.0.0/24' - s2_cidr = '11.0.0.0/24' - with self.router() as r,\ - self.subnet(cidr=s1_cidr) as s1,\ - self.subnet(cidr=s2_cidr) as s2: - - self._router_interface_action('add', - r['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r['router']['id'], - s2['subnet']['id'], - None) - expected_cidrs = [s1_cidr, s2_cidr] - expected_fw = [{'action': 'allow', - 'enabled': True, - 'name': 'Subnet Rule', - 'source_ip_address': expected_cidrs, - 'destination_ip_address': expected_cidrs} - ] + self._get_md_proxy_fw_rules() - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual(self._recursive_sort_list(expected_fw), - self._recursive_sort_list(fw_rules)) - self._router_interface_action('remove', - r['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('remove', - r['router']['id'], - s2['subnet']['id'], - None) - - @mock.patch.object(edge_utils, "update_firewall") - def test_router_interfaces_with_update_firewall_metadata(self, mock): - self.plugin_instance.metadata_proxy_handler = mock.Mock() - s1_cidr = '10.0.0.0/24' - s2_cidr = '11.0.0.0/24' - with self.router() as r,\ - self.subnet(cidr=s1_cidr) as s1,\ - self.subnet(cidr=s2_cidr) as s2: - self._router_interface_action('add', - r['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r['router']['id'], - s2['subnet']['id'], - None) - # build the list of expected fw rules - expected_cidrs = [s1_cidr, s2_cidr] - fw_rule = {'action': 'allow', - 'enabled': True, - 'name': 'Subnet Rule', - 'source_ip_address': expected_cidrs, - 'destination_ip_address': expected_cidrs} - vse_rule = {'action': 'allow', - 'enabled': True, - 'name': 'VSERule', - 'source_vnic_groups': ['vse'], - 'destination_vnic_groups': ['external']} - dest_intern = [md_proxy.INTERNAL_SUBNET] - md_inter = {'action': 'deny', - 'destination_ip_address': dest_intern, - 'enabled': True, - 'name': 'MDInterEdgeNet'} - dest_srvip = [md_proxy.METADATA_IP_ADDR] - vsmdienet = {'action': 'allow', - 'destination_ip_address': [md_proxy.INTERNAL_SUBNET], - 'enabled': True, - 'name': 'VSEMDInterEdgeNet', - 'source_vnic_groups': ['vse']} - md_srvip = {'action': 'allow', - 'destination_ip_address': dest_srvip, - 'destination_port': '80,443,8775', - 'enabled': True, - 'name': 'MDServiceIP', - 'protocol': 'tcp'} - expected_fw = [fw_rule, - vsmdienet, - vse_rule, - md_inter, - md_srvip] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual(self._recursive_sort_list(expected_fw), - self._recursive_sort_list(fw_rules)) - - # Also test the md_srvip conversion: - drv = edge_firewall_driver.EdgeFirewallDriver() - rule = drv._convert_firewall_rule(md_srvip) - exp_service = {'service': [{'port': [80, 443, 8775], - 'protocol': 'tcp'}]} - exp_rule = {'action': 'accept', - 'application': exp_service, - 'destination': {'ipAddress': dest_srvip}, - 'enabled': True, - 'name': 'MDServiceIP'} - self.assertEqual(exp_rule, rule) - - self._router_interface_action('remove', - r['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('remove', - r['router']['id'], - s2['subnet']['id'], - None) - - @mock.patch.object(edge_utils, "update_firewall") - def test_router_interfaces_with_update_firewall_metadata_conf(self, mock): - """Test the metadata proxy firewall rule with configured ports - """ - cfg.CONF.set_override('metadata_service_allowed_ports', - ['55', ' 66 ', '55', '77'], group='nsxv') - self.plugin_instance.metadata_proxy_handler = mock.Mock() - s1_cidr = '10.0.0.0/24' - with self.router() as r,\ - self.subnet(cidr=s1_cidr) as s1: - self._router_interface_action('add', - r['router']['id'], - s1['subnet']['id'], - None) - # build the expected fw rule - # at this stage the string of ports is not sorted/unique/validated - dest_srvip = [md_proxy.METADATA_IP_ADDR] - rule_name = 'MDServiceIP' - md_srvip = {'action': 'allow', - 'destination_ip_address': dest_srvip, - 'destination_port': '80,443,8775,55,66,55,77', - 'enabled': True, - 'name': rule_name, - 'protocol': 'tcp'} - # compare it to the rule with the same name - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - rule_found = False - for fw_rule in fw_rules: - if (validators.is_attr_set(fw_rule.get("name")) and - fw_rule['name'] == rule_name): - self.assertEqual(md_srvip, fw_rule) - rule_found = True - break - self.assertTrue(rule_found) - - # Also test the rule conversion - # Ports should be sorted & unique, and ignore non numeric values - drv = edge_firewall_driver.EdgeFirewallDriver() - rule = drv._convert_firewall_rule(md_srvip) - exp_service = {'service': [{'port': [55, 66, 77, 80, 443, 8775], - 'protocol': 'tcp'}]} - exp_rule = {'action': 'accept', - 'application': exp_service, - 'destination': {'ipAddress': dest_srvip}, - 'enabled': True, - 'name': 'MDServiceIP'} - self.assertEqual(exp_rule, rule) - - @mock.patch.object(edge_utils, "update_firewall") - def test_router_interfaces_different_tenants_update_firewall(self, mock): - tenant_id = _uuid() - other_tenant_id = _uuid() - s1_cidr = '10.0.0.0/24' - s2_cidr = '11.0.0.0/24' - with self.router(tenant_id=tenant_id) as r,\ - self.network(tenant_id=tenant_id) as n1,\ - self.network(tenant_id=other_tenant_id) as n2,\ - self.subnet(network=n1, cidr=s1_cidr) as s1,\ - self.subnet(network=n2, cidr=s2_cidr) as s2: - - self._router_interface_action('add', - r['router']['id'], - s2['subnet']['id'], - None) - self._router_interface_action('add', - r['router']['id'], - s1['subnet']['id'], - None, - tenant_id=tenant_id) - expected_cidrs = [s1_cidr, s2_cidr] - expected_fw = [{'action': 'allow', - 'enabled': True, - 'name': 'Subnet Rule', - 'source_ip_address': expected_cidrs, - 'destination_ip_address': expected_cidrs} - ] + self._get_md_proxy_fw_rules() - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual(self._recursive_sort_list(expected_fw), - self._recursive_sort_list(fw_rules)) - self._router_interface_action('remove', - r['router']['id'], - s1['subnet']['id'], - None, - tenant_id=tenant_id) - - self._router_interface_action('remove', - r['router']['id'], - s2['subnet']['id'], - None) - expected_fw = self._get_md_proxy_fw_rules() - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual(expected_fw, fw_rules) - - def test_create_router_gateway_fails(self): - self.skipTest('not supported') - - def test_migrate_exclusive_router_to_shared(self): - with self._create_l3_ext_network() as net: - with self.subnet(network=net, enable_dhcp=False) as s: - data = {'router': {'tenant_id': 'whatever'}} - data['router']['name'] = 'router1' - data['router']['external_gateway_info'] = { - 'network_id': s['subnet']['network_id']} - data['router']['router_type'] = 'exclusive' - - router_req = self.new_create_request('routers', data, - self.fmt) - res = router_req.get_response(self.ext_api) - router = self.deserialize(self.fmt, res) - # update the router type: - router_id = router['router']['id'] - self._update('routers', router_id, - {'router': {'router_type': 'shared'}}) - - # get the updated router and check it's type - body = self._show('routers', router_id) - self.assertEqual('shared', body['router']['router_type']) - - @mock.patch.object(edge_utils.EdgeManager, - 'update_interface_addr') - def test_router_update_gateway_with_different_external_subnet(self, mock): - # This test calls the backend, so we need a mock for the edge_utils - super( - TestExclusiveRouterTestCase, - self).test_router_update_gateway_with_different_external_subnet() - - @mock.patch.object(edge_utils.EdgeManager, - 'update_interface_addr') - def test_router_add_interface_multiple_ipv6_subnets_same_net(self, mock): - # This test calls the backend, so we need a mock for the edge_utils - super( - TestExclusiveRouterTestCase, - self).test_router_add_interface_multiple_ipv6_subnets_same_net() - - def _fake_rename_edge(self, edge_id, name): - raise vcns_exc.VcnsApiException( - status=400, header={'status': 200}, uri='fake_url', response='') - - def test_create_router_with_update_error(self): - p = directory.get_plugin() - - # make sure there is an available edge so we will use backend update - available_edge = {'edge_id': 'edge-11', 'router_id': 'fake_id'} - nsxv_db.add_nsxv_router_binding( - context.get_admin_context().session, available_edge['router_id'], - available_edge['edge_id'], None, constants.ACTIVE) - with mock.patch.object(p.edge_manager, - '_get_available_router_binding', - return_value=available_edge): - # Mock for update_edge task failure - with mock.patch.object( - p.edge_manager.nsxv_manager, 'rename_edge', - side_effect=self._fake_rename_edge): - router = {'router': {'admin_state_up': True, - 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', - 'tenant_id': 'fake_tenant', - 'router_type': 'exclusive'}} - # router creation should succeed - returned_router = p.create_router(context.get_admin_context(), - router) - # router status should be 'error' - self.assertEqual(constants.ERROR, returned_router['status']) - - # check the same after get_router - new_router = p.get_router(context.get_admin_context(), - returned_router['id']) - self.assertEqual(constants.ERROR, new_router['status']) - - def test_create_router_with_bad_az_hint(self): - p = directory.get_plugin() - router = {'router': {'admin_state_up': True, - 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', - 'tenant_id': 'fake_tenant', - 'router_type': 'exclusive', - 'availability_zone_hints': ['bad_hint']}} - self.assertRaises(n_exc.NeutronException, - p.create_router, - context.get_admin_context(), - router) - - def test_create_router_with_az_hint(self): - az_name = 'az7' - set_az_in_config(az_name) - p = directory.get_plugin() - p._availability_zones_data = nsx_az.NsxVAvailabilityZones() - p._get_edge_id_by_rtr_id = p.real_get_edge - - router = {'router': {'admin_state_up': True, - 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', - 'tenant_id': 'fake_tenant', - 'router_type': 'exclusive', - 'availability_zone_hints': [az_name]}} - - # router creation should succeed - returned_router = p.create_router(context.get_admin_context(), - router) - self.assertEqual([az_name], - returned_router['availability_zone_hints']) - self.assertEqual([az_name], - returned_router['availability_zones']) - - def test_create_router_with_default_az(self): - az_name = 'az7' - set_az_in_config(az_name) - cfg.CONF.set_override('default_availability_zones', [az_name]) - p = directory.get_plugin() - p._availability_zones_data = nsx_az.NsxVAvailabilityZones() - p._get_edge_id_by_rtr_id = p.real_get_edge - - router = {'router': {'admin_state_up': True, - 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', - 'tenant_id': 'fake_tenant', - 'router_type': 'exclusive'}} - - # router creation should succeed - returned_router = p.create_router(context.get_admin_context(), - router) - # Neutron commit e6c3686cd8 changed create_router behaviour and - # availability zone hints are populated as well - self.assertEqual([az_name], - returned_router['availability_zone_hints']) - self.assertEqual([az_name], - returned_router['availability_zones']) - - def test_floatingip_update_to_same_port_id_twice(self): - self.skipTest('Plugin changes floating port status') - - def test_router_add_interface_ipv6_subnet(self): - self.skipTest('Not supported') - - def test_update_router_interface_port_ipv6_subnet_ext_ra(self): - self.skipTest('Not supported') - - def test_router_add_gateway_multiple_subnets_ipv6(self): - self.skipTest('not supported') - - def test_router_add_interface_by_port_other_tenant_address_out_of_pool( - self): - # multiple fixed ips per port are not supported - self.skipTest('not supported') - - def test_router_add_interface_by_port_other_tenant_address_in_pool(self): - # multiple fixed ips per port are not supported - self.skipTest('not supported') - - def test_router_add_interface_by_port_admin_address_out_of_pool(self): - # multiple fixed ips per port are not supported - self.skipTest('not supported') - - def test_update_subnet_gateway_for_external_net(self): - plugin = directory.get_plugin() - router_obj = ex_router_driver.RouterExclusiveDriver(plugin) - with mock.patch.object(plugin, '_find_router_driver', - return_value=router_obj): - with mock.patch.object(router_obj, - '_update_nexthop') as update_nexthop: - super(TestExclusiveRouterTestCase, - self).test_update_subnet_gateway_for_external_net() - self.assertTrue(update_nexthop.called) - - def _test_create_subnetpool(self, prefixes, expected=None, - admin=False, **kwargs): - keys = kwargs.copy() - keys.setdefault('tenant_id', self._tenant_id) - with self.subnetpool(prefixes, admin, **keys) as subnetpool: - self._validate_resource(subnetpool, keys, 'subnetpool') - if expected: - self._compare_resource(subnetpool, expected, 'subnetpool') - return subnetpool - - def test_router_no_snat_with_different_address_scope(self): - """Test that if the router has no snat, you cannot add an interface - from a different address scope than the gateway. - """ - # create an external network on one address scope - with self.address_scope(name='as1') as addr_scope, \ - self.network() as ext_net: - self._set_net_external(ext_net['network']['id']) - as_id = addr_scope['address_scope']['id'] - subnet = netaddr.IPNetwork('10.10.10.0/24') - subnetpool = self._test_create_subnetpool( - [subnet.cidr], name='sp1', - min_prefixlen='24', address_scope_id=as_id) - subnetpool_id = subnetpool['subnetpool']['id'] - data = {'subnet': { - 'network_id': ext_net['network']['id'], - 'subnetpool_id': subnetpool_id, - 'ip_version': 4, - 'enable_dhcp': False, - 'tenant_id': ext_net['network']['tenant_id']}} - req = self.new_create_request('subnets', data) - ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) - - # create a regular network on another address scope - with self.address_scope(name='as2') as addr_scope2, \ - self.network() as net: - as_id2 = addr_scope2['address_scope']['id'] - subnet2 = netaddr.IPNetwork('20.10.10.0/24') - subnetpool2 = self._test_create_subnetpool( - [subnet2.cidr], name='sp2', - min_prefixlen='24', address_scope_id=as_id2) - subnetpool_id2 = subnetpool2['subnetpool']['id'] - data = {'subnet': { - 'network_id': net['network']['id'], - 'subnetpool_id': subnetpool_id2, - 'ip_version': 4, - 'tenant_id': net['network']['tenant_id']}} - req = self.new_create_request('subnets', data) - int_subnet = self.deserialize( - self.fmt, req.get_response(self.api)) - - # create a no snat router with this gateway - with self.router() as r: - self._add_external_gateway_to_router( - r['router']['id'], - ext_subnet['subnet']['network_id']) - self._update_router_enable_snat( - r['router']['id'], - ext_subnet['subnet']['network_id'], - False) - - # should fail adding the interface to the router - err_code = webob.exc.HTTPBadRequest.code - self._router_interface_action('add', - r['router']['id'], - int_subnet['subnet']['id'], - None, - err_code) - - def _create_subnet_and_add_to_router(self, subnetpool_id, router_id): - # create a regular network on the given subnet pool - with self.network() as net: - data = {'subnet': { - 'network_id': net['network']['id'], - 'subnetpool_id': subnetpool_id, - 'ip_version': 4, - 'tenant_id': net['network']['tenant_id']}} - req = self.new_create_request('subnets', data) - int_subnet = self.deserialize( - self.fmt, req.get_response(self.api)) - - # Add the interface to the router - self._router_interface_action( - 'add', - router_id, - int_subnet['subnet']['id'], - None) - return int_subnet - - def test_router_no_snat_with_same_address_scope(self): - """Test that if the router has no snat, you can add an interface - from the same address scope as the gateway. - """ - # create an external network on one address scope - with self.address_scope(name='as1') as addr_scope, \ - self.network() as ext_net: - self._set_net_external(ext_net['network']['id']) - as_id = addr_scope['address_scope']['id'] - subnet = netaddr.IPNetwork('10.10.10.0/21') - subnetpool = self._test_create_subnetpool( - [subnet.cidr], name='sp1', - min_prefixlen='24', address_scope_id=as_id) - subnetpool_id = subnetpool['subnetpool']['id'] - data = {'subnet': { - 'network_id': ext_net['network']['id'], - 'subnetpool_id': subnetpool_id, - 'ip_version': 4, - 'enable_dhcp': False, - 'tenant_id': ext_net['network']['tenant_id']}} - req = self.new_create_request('subnets', data) - ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) - - # create a regular network on the same address scope - # and create a no snat router with this gateway - with self.router() as r: - self._add_external_gateway_to_router( - r['router']['id'], - ext_subnet['subnet']['network_id']) - self._update_router_enable_snat( - r['router']['id'], - ext_subnet['subnet']['network_id'], - False) - - # should succeed adding the interface to the router - self._create_subnet_and_add_to_router( - subnetpool_id, r['router']['id']) - - def test_router_address_scope_snat_rules(self): - """Test that if the router interface had the same address scope - as the gateway - snat rule is not added, but firewall rule is. - """ - # create an external network on one address scope - with self.address_scope(name='as1') as addr_scope, \ - self.network() as ext_net: - self._set_net_external(ext_net['network']['id']) - as_id = addr_scope['address_scope']['id'] - subnet = netaddr.IPNetwork('10.10.10.0/21') - subnetpool = self._test_create_subnetpool( - [subnet.cidr], name='sp1', - min_prefixlen='24', address_scope_id=as_id) - subnetpool_id = subnetpool['subnetpool']['id'] - data = {'subnet': { - 'network_id': ext_net['network']['id'], - 'subnetpool_id': subnetpool_id, - 'ip_version': 4, - 'enable_dhcp': False, - 'tenant_id': ext_net['network']['tenant_id']}} - req = self.new_create_request('subnets', data) - ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) - - # create a regular network on the same address scope - # and create a router with this gateway - with self.router() as r: - self._add_external_gateway_to_router( - r['router']['id'], - ext_subnet['subnet']['network_id']) - - # Add the interface to the router - with mock.patch.object( - edge_utils, 'update_nat_rules') as update_nat,\ - mock.patch.object( - edge_utils, 'update_firewall') as update_fw: - - int_subnet = self._create_subnet_and_add_to_router( - subnetpool_id, r['router']['id']) - - # make sure snat rules are not added - update_nat.assert_called_once_with( - mock.ANY, mock.ANY, r['router']['id'], [], []) - - # check fw rules - fw_rules = update_fw.call_args[0][3][ - 'firewall_rule_list'] - exp_fw_len = 6 if self.with_md_proxy else 2 - pool_rule_ind = 5 if self.with_md_proxy else 1 - pool_rule = fw_rules[pool_rule_ind] - self.assertEqual(exp_fw_len, len(fw_rules)) - self.assertEqual('Allocation Pool Rule', - pool_rule['name']) - self.assertEqual('allow', pool_rule['action']) - self.assertEqual(int_subnet['subnet']['cidr'], - pool_rule['destination_ip_address'][0]) - self.assertEqual('external', - pool_rule['source_vnic_groups'][0]) - - def test_router_address_scope_fw_rules(self): - """Test that if the router interfaces has different address scope - there are separate fw rules - """ - # create a router, networks, and address scopes - with self.address_scope(name='as1') as addr_scope1, \ - self.address_scope(name='as2') as addr_scope2, \ - self.router() as r: - - as1_id = addr_scope1['address_scope']['id'] - as2_id = addr_scope2['address_scope']['id'] - pool1 = netaddr.IPNetwork('10.10.10.0/21') - subnetpool1 = self._test_create_subnetpool( - [pool1.cidr], name='sp1', - min_prefixlen='24', address_scope_id=as1_id) - pool2 = netaddr.IPNetwork('20.20.20.0/21') - subnetpool2 = self._test_create_subnetpool( - [pool2.cidr], name='sp2', - min_prefixlen='24', address_scope_id=as2_id) - subnetpool_id1 = subnetpool1['subnetpool']['id'] - subnetpool_id2 = subnetpool2['subnetpool']['id'] - - # Add the interfaces to the router - with mock.patch.object( - edge_utils, 'update_nat_rules'),\ - mock.patch.object(edge_utils, 'update_firewall') as update_fw: - # create subnets on the 2 subnet pools, and attach to router - subnet1 = self._create_subnet_and_add_to_router( - subnetpool_id1, r['router']['id']) - subnet2 = self._create_subnet_and_add_to_router( - subnetpool_id2, r['router']['id']) - subnet3 = self._create_subnet_and_add_to_router( - subnetpool_id2, r['router']['id']) - - expected_rules = [ - {'enabled': True, - 'destination_ip_address': [subnet1['subnet']['cidr']], - 'action': 'allow', - 'name': 'Subnet Rule', - 'source_ip_address': [subnet1['subnet']['cidr']]}, - {'enabled': True, - 'destination_ip_address': [subnet2['subnet']['cidr'], - subnet3['subnet']['cidr']], - 'action': 'allow', - 'name': 'Subnet Rule', - 'source_ip_address': [subnet2['subnet']['cidr'], - subnet3['subnet']['cidr']]} - ] + self._get_md_proxy_fw_rules() - - # check the final fw rules - fw_rules = update_fw.call_args[0][3][ - 'firewall_rule_list'] - self.assertEqual(len(expected_rules), len(fw_rules)) - self.assertEqual(self._recursive_sort_list(expected_rules), - self._recursive_sort_list(fw_rules)) - - def _prepare_external_subnet_on_address_scope(self, - ext_net, - address_scope): - - self._set_net_external(ext_net['network']['id']) - as_id = address_scope['address_scope']['id'] - subnet = netaddr.IPNetwork('10.10.10.0/21') - subnetpool = self._test_create_subnetpool( - [subnet.cidr], name='sp1', - min_prefixlen='24', address_scope_id=as_id) - subnetpool_id = subnetpool['subnetpool']['id'] - data = {'subnet': { - 'network_id': ext_net['network']['id'], - 'subnetpool_id': subnetpool_id, - 'ip_version': 4, - 'enable_dhcp': False, - 'tenant_id': ext_net['network']['tenant_id']}} - req = self.new_create_request('subnets', data) - ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) - return ext_subnet['subnet'] - - def _test_router_address_scope_change(self, change_gw=False): - """When subnetpool address scope changes, and router that was - originally under same address scope, results having different - address scopes, relevant snat rules are added. - """ - # create an external network on one address scope - with self.address_scope(name='as1') as addr_scope, \ - self.network() as ext_net: - ext_subnet = self._prepare_external_subnet_on_address_scope( - ext_net, addr_scope) - - # create a router with this gateway - with self.router() as r: - self._add_external_gateway_to_router( - r['router']['id'], - ext_subnet['network_id']) - - # create a regular network on same address scope - # and verify no snat change - as_id = addr_scope['address_scope']['id'] - subnet2 = netaddr.IPNetwork('40.10.10.0/24') - subnetpool2 = self._test_create_subnetpool( - [subnet2.cidr], name='sp2', - min_prefixlen='24', address_scope_id=as_id) - subnetpool2_id = subnetpool2['subnetpool']['id'] - - self._create_subnet_and_add_to_router( - subnetpool2_id, r['router']['id']) - - # change address scope of the first subnetpool - with self.address_scope(name='as2') as addr_scope2,\ - mock.patch.object(edge_utils, - 'update_nat_rules') as update_nat,\ - mock.patch.object(edge_utils, - 'update_firewall') as update_fw: - - as2_id = addr_scope2['address_scope']['id'] - data = {'subnetpool': { - 'address_scope_id': as2_id}} - - if change_gw: - subnetpool_to_update = ext_subnet['subnetpool_id'] - else: - subnetpool_to_update = subnetpool2_id - - req = self.new_update_request('subnetpools', data, - subnetpool_to_update) - req.get_response(self.api) - - # Verify that the snat & fw rule are being updated - update_nat.assert_called_once() - update_fw.assert_called_once() - - def test_router_address_scope_change(self): - self._test_router_address_scope_change() - - def test_router_address_scope_gw_change(self): - self._test_router_address_scope_change(change_gw=True) - - def test_router_add_interface_delete_port_after_failure(self): - with self.router() as r, self.subnet(enable_dhcp=False) as s: - plugin = directory.get_plugin() - # inject a failure in the update port that happens at the end - # to ensure the port gets deleted - with mock.patch.object( - plugin, 'update_port', - side_effect=n_exc.InvalidInput(error_message='x')): - self._router_interface_action('add', - r['router']['id'], - s['subnet']['id'], - None, - webob.exc.HTTPBadRequest.code) - exp_num_of_ports = 1 if self.with_md_proxy else 0 - ports = plugin.get_ports(context.get_admin_context()) - self.assertEqual(exp_num_of_ports, len(ports)) - - -class ExtGwModeTestCase(NsxVPluginV2TestCase, - test_ext_gw_mode.ExtGwModeIntTestCase): - def test_router_gateway_set_fail_after_port_create(self): - self.skipTest("TBD") - - -class NsxVSecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase): - def setUp(self, - plugin=PLUGIN_NAME, - ext_mgr=None, - service_plugins=None): - test_utils.override_nsx_ini_test() - mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) - mock_vcns_instance = mock_vcns.start() - self.fc2 = fake_vcns.FakeVcns() - mock_vcns_instance.return_value = self.fc2 - edge_utils.query_dhcp_service_config = mock.Mock(return_value=[]) - mock_create_dhcp_service = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, 'create_dhcp_edge_service')) - mock_create_dhcp_service.start() - mock_update_dhcp_service = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, 'update_dhcp_edge_service')) - mock_update_dhcp_service.start() - mock_delete_dhcp_service = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, 'delete_dhcp_edge_service')) - mock_delete_dhcp_service.start() - mock_check_backup_edge_pools = mock.patch("%s.%s" % ( - vmware.EDGE_MANAGE_NAME, '_check_backup_edge_pools')) - mock_check_backup_edge_pools.start() - - c_utils.spawn_n = mock.Mock(side_effect=lambda f: f()) - super(NsxVSecurityGroupsTestCase, self).setUp(plugin=plugin, - ext_mgr=ext_mgr) - self.plugin = directory.get_plugin() - self.addCleanup(self.fc2.reset_all) - self.original_subnet = self.subnet - - def no_dhcp_subnet(self, *args, **kwargs): - if 'enable_dhcp' in kwargs: - return self.original_subnet(*args, **kwargs) - return self.original_subnet(*args, enable_dhcp=False, **kwargs) - - -class NsxVTestSecurityGroup(ext_sg.TestSecurityGroups, - NsxVSecurityGroupsTestCase): - - @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') - def setUp(self, mock_deploy, - plugin=PLUGIN_NAME, - ext_mgr=None, - service_plugins=None): - - super(NsxVTestSecurityGroup, self).setUp( - plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) - plugin_instance = directory.get_plugin() - plugin_instance._get_edge_id_by_rtr_id = mock.Mock() - plugin_instance._get_edge_id_by_rtr_id.return_value = False - plugin_instance._get_edge_id_and_az_by_rtr_id = mock.Mock() - plugin_instance._get_edge_id_and_az_by_rtr_id.return_value = ( - False, False) - - @with_no_dhcp_subnet - def test_list_ports_security_group(self): - return super(NsxVTestSecurityGroup, - self).test_list_ports_security_group() - - def test_vnic_security_group_membership(self): - p = directory.get_plugin() - self.fc2.add_member_to_security_group = ( - mock.Mock().add_member_to_security_group) - self.fc2.remove_member_from_security_group = ( - mock.Mock().remove_member_from_security_group) - nsx_sg_id = str(self.fc2._securitygroups['ids']) - device_id = _uuid() - port_index = 0 - # The expected vnic-id format by NsxV - vnic_id = '%s.%03d' % (device_id, port_index) - with self.port(device_id=device_id, - device_owner='compute:None') as port: - (self.fc2.add_member_to_security_group - .assert_called_once_with(p.sg_container_id, nsx_sg_id)) - self.fc2.add_member_to_security_group.reset_mock() - data = {'port': {'vnic_index': port_index}} - self.new_update_request('ports', data, - port['port']['id']).get_response(self.api) - # The vnic should be added as a member to the nsx-security-groups - # which match the port security-groups - (self.fc2.add_member_to_security_group - .assert_called_once_with(nsx_sg_id, vnic_id)) - - # The vnic should be removed from the nsx-security-groups which match - # the deleted port security-groups - #TODO(kobis): Port is not removed automatically - # (self.fc2.remove_member_from_security_group - # .assert_called_once_with(nsx_sg_id, vnic_id)) - - def test_create_secgroup_deleted_upon_fw_section_create_fail(self): - _context = context.Context('', 'tenant_id') - sg = {'security_group': {'name': 'default', - 'tenant_id': 'tenant_id', - 'description': ''}} - expected_id = str(self.fc2._securitygroups['ids']) - with mock.patch.object(self.fc2, - 'create_section') as create_section: - with mock.patch.object(self.fc2, - 'delete_security_group') as delete_sg: - - create_section.side_effect = webob.exc.HTTPInternalServerError - self.assertRaises(webob.exc.HTTPInternalServerError, - self.plugin.create_security_group, - _context.elevated(), sg, default_sg=True) - delete_sg.assert_called_once_with(expected_id) - - def test_create_security_group_rule_duplicate_rules(self): - name = 'webservers' - description = 'my webservers' - with mock.patch.object(self.plugin.nsx_v.vcns, - 'remove_rule_from_section') as rm_rule_mock: - with self.security_group(name, description) as sg: - rule = self._build_security_group_rule( - sg['security_group']['id'], 'ingress', - constants.PROTO_NAME_TCP, '22', '22') - self._create_security_group_rule(self.fmt, rule) - res = self._create_security_group_rule(self.fmt, rule) - self.deserialize(self.fmt, res) - self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) - rm_rule_mock.assert_called_once_with(mock.ANY, mock.ANY) - - def test_create_security_group_rule_with_specific_id(self): - # This test is aimed to test the security-group db mixin - pass - - def _plugin_update_security_group(self, context, id, logging): - data = {'security_group': {'logging': logging}} - security_group = ( - self.plugin.update_security_group(context, id, data)) - return security_group - - def _plugin_create_security_group(self, context, logging=False): - data = {'security_group': {'name': 'SG', - 'tenant_id': 'tenant_id', - 'description': ''}} - if logging: - data['security_group']['logging'] = True - security_group = ( - self.plugin.create_security_group(context, data, False)) - return security_group - - def test_create_security_group_default_logging(self): - _context = context.get_admin_context() - sg = self._plugin_create_security_group(_context) - self.assertFalse(sg['logging']) - - def test_create_security_group_with_logging(self): - _context = context.get_admin_context() - sg = self._plugin_create_security_group(_context, logging=True) - self.assertTrue(sg['logging']) - - def test_update_security_group_with_logging(self): - _context = context.get_admin_context() - sg = self._plugin_create_security_group(_context) - sg = self._plugin_update_security_group(_context, sg['id'], True) - self.assertTrue(sg['logging']) - - def _create_default_sg(self, ctx): - self.plugin._ensure_default_security_group(ctx, 'tenant_id') - - def test_create_security_group_default_nsx_name(self): - _context = context.get_admin_context() - self._create_default_sg(_context) - with mock.patch.object(self.plugin.nsx_v.vcns, - 'create_security_group', - return_value=({}, '3')) as nsxv_create: - self._plugin_create_security_group(_context) - created_sg = nsxv_create.call_args[0] - created_name = created_sg[0]['securitygroup']['name'] - self.assertTrue(re.match(r'SG \(.*\)', created_name)) - - def test_create_security_group_non_default_nsx_name(self): - # Use non default nsx name format - cfg.CONF.set_override('nsx_sg_name_format', '%(name)s [%(id)s]', - group="nsxv") - - _context = context.get_admin_context() - self._create_default_sg(_context) - with mock.patch.object(self.plugin.nsx_v.vcns, - 'create_security_group', - return_value=({}, '3')) as nsxv_create: - self._plugin_create_security_group(_context) - created_sg = nsxv_create.call_args[0] - created_name = created_sg[0]['securitygroup']['name'] - self.assertTrue(re.match(r'SG \[.*\]', created_name)) - - def test_create_security_group_rule_bulk(self): - """Verify that bulk rule create updates the backend section once""" - fake_update_sect = self.fc2.update_section - - def mock_update_section(section_uri, request, h): - return fake_update_sect(section_uri, request, h) - plugin = directory.get_plugin() - with self.security_group() as sg,\ - mock.patch.object(plugin.nsx_v.vcns, 'update_section', - side_effect=mock_update_section) as update_sect: - rule1 = self._build_security_group_rule(sg['security_group']['id'], - 'ingress', - 'tcp', '22', - '22', '10.0.0.1/24') - rule2 = self._build_security_group_rule(sg['security_group']['id'], - 'ingress', - 'tcp', '23', - '23', '10.0.0.1/24') - rules = {'security_group_rules': [rule1['security_group_rule'], - rule2['security_group_rule']]} - res = self._create_security_group_rule(self.fmt, rules) - ret = self.deserialize(self.fmt, res) - self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) - self.assertEqual(2, len(ret['security_group_rules'])) - update_sect.assert_called_once() - - def test_create_security_group_rule_protocol_as_number_range(self): - self.skipTest('not supported') - - def test_create_security_group_rule_protocol_as_number_with_port(self): - self.skipTest('not supported') - - def test_create_security_group_rule_with_remote_group(self): - with self.security_group() as sg1, self.security_group() as sg2: - security_group_id = sg1['security_group']['id'] - direction = "ingress" - remote_group_id = sg2['security_group']['id'] - protocol = "tcp" - keys = [('remote_group_id', remote_group_id), - ('security_group_id', security_group_id), - ('direction', direction), - ('protocol', protocol)] - with self.security_group_rule( - security_group_id, direction=direction, protocol=protocol, - remote_group_id=remote_group_id) as rule: - for k, v, in keys: - self.assertEqual(rule['security_group_rule'][k], v) - - def test_delete_security_group_rule_with_remote_group(self): - com_plugin.subscribe() - with self.security_group() as sg1, self.security_group() as sg2: - security_group_id = sg1['security_group']['id'] - direction = "ingress" - remote_group_id = sg2['security_group']['id'] - protocol = "tcp" - with self.security_group_rule( - security_group_id, direction=direction, protocol=protocol, - remote_group_id=remote_group_id) as rule,\ - mock.patch.object( - self.plugin, "delete_security_group_rule") as del_rule: - # delete sg2 - self._delete('security-groups', remote_group_id, - webob.exc.HTTPNoContent.code) - # verify the rule was deleted - del_rule.assert_called_once_with( - mock.ANY, rule["security_group_rule"]["id"]) - - def test_create_security_group_rule_remote_address_group_id(self): - self.skipTest('No support for SG address groups') - - def test_delete_address_group_in_use(self): - self.skipTest('No support for SG address groups') - - def test_create_security_group_rule_multiple_remotes(self): - self.skipTest('No support for SG address groups') - - def test_list_security_groups_with_shared_filter_false(self): - self.skipTest('No support for SG shared field') - - -class TestVdrTestCase(L3NatTest, L3NatTestCaseBase, - test_l3_plugin.L3NatDBIntTestCase, - IPv6ExpectedFailuresTestMixin, - NsxVPluginV2TestCase): - - def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): - # init the availability zones in the configuration of the plugin - self.az_name = 'az7' - set_az_in_config(self.az_name) - super(TestVdrTestCase, self).setUp( - plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) - self.plugin_instance.nsx_v.is_subnet_in_use = mock.Mock() - self.plugin_instance.nsx_v.is_subnet_in_use.return_value = False - self._default_tenant_id = self._tenant_id - self._router_tenant_id = 'test-router-tenant' - - def _get_md_proxy_fw_rules(self): - return [] - - @mock.patch.object(edge_utils.EdgeManager, - 'update_interface_addr') - def test_router_update_gateway_with_different_external_subnet(self, mock): - # This test calls the backend, so we need a mock for the edge_utils - super( - TestVdrTestCase, - self).test_router_update_gateway_with_different_external_subnet() - - def test_floatingip_multi_external_one_internal(self): - self.skipTest('skipped') - - def test_router_add_gateway_multiple_subnets_ipv6(self): - self.skipTest('not supported') - - def test_router_add_interface_ipv6_subnet(self): - self.skipTest('Not supported') - - def test_update_router_interface_port_ipv6_subnet_ext_ra(self): - self.skipTest('Not supported') - - def test_router_add_interface_dup_subnet2_returns_400(self): - self.skipTest('skipped') - - def test_floatingip_same_external_and_internal(self): - self.skipTest('skipped') - - def test_router_add_interface_by_port_other_tenant_address_out_of_pool( - self): - # multiple fixed ips per port are not supported - self.skipTest('not supported') - - def test_router_add_interface_by_port_other_tenant_address_in_pool(self): - # multiple fixed ips per port are not supported - self.skipTest('not supported') - - def test_router_add_interface_by_subnet_other_tenant_subnet_returns_400( - self): - # distributes router creation by another tenant is blocked by policy - self.skipTest('not supported') - - def test_router_add_interface_by_port_admin_address_out_of_pool(self): - # multiple fixed ips per port are not supported - self.skipTest('not supported') - - def test_create_router_fail_at_the_backend(self): - p = directory.get_plugin() - edge_manager = p.edge_manager - with mock.patch.object(edge_manager, 'create_lrouter', - side_effect=[n_exc.NeutronException]): - router = {'router': {'admin_state_up': True, - 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', - 'tenant_id': 'fake_tenant', - 'distributed': True}} - self.assertRaises(n_exc.NeutronException, - p.create_router, - context.get_admin_context(), - router) - self._test_list_resources('router', ()) - - def test_update_port_device_id_to_different_tenants_router(self): - self.skipTest('TBD') - - def test_router_add_and_remove_gateway_tenant_ctx(self): - self.skipTest('TBD') - - def _create_router(self, fmt, tenant_id, name=None, - admin_state_up=None, set_context=False, - arg_list=None, **kwargs): - tenant_id = tenant_id or _uuid() - data = {'router': {'tenant_id': tenant_id}} - if name: - data['router']['name'] = name - if admin_state_up: - data['router']['admin_state_up'] = admin_state_up - for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): - # Arg must be present and not empty - if arg in kwargs and kwargs[arg]: - data['router'][arg] = kwargs[arg] - - if 'distributed' in kwargs: - data['router']['distributed'] = kwargs['distributed'] - else: - data['router']['distributed'] = True - if ('availability_zone_hints' in kwargs and - kwargs['availability_zone_hints'] is not None): - data['router']['availability_zone_hints'] = kwargs[ - 'availability_zone_hints'] - - if kwargs.get('router_type'): - data['router']['router_type'] = kwargs.get('router_type') - - router_req = self.new_create_request('routers', data, fmt) - if set_context and tenant_id: - # create a specific auth context for this request - router_req.environ['neutron.context'] = context.Context( - '', tenant_id) - - return router_req.get_response(self.ext_api) - - def _test_router_plr_binding(self, expected_size='compact', - availability_zone=None): - """Test PLR router bindings - - Create a distributed router with an external network and check - that the router was created as it should from the binding entry - """ - # create a distributed router - tenant_id = _uuid() - router_ctx = context.Context('', tenant_id) - az_hints = [availability_zone] if availability_zone else None - res = self._create_router(self.fmt, tenant_id, distributed=True, - availability_zone_hints=az_hints) - r = self.deserialize(self.fmt, res) - self.assertIn('router', r) - - with self._create_l3_ext_network() as net: - with self.subnet(network=net, enable_dhcp=False) as s2: - - # Plug network with external mapping - self._set_net_external(s2['subnet']['network_id']) - self._add_external_gateway_to_router( - r['router']['id'], - s2['subnet']['network_id'], - neutron_context=router_ctx) - body = self._show('routers', r['router']['id']) - net_id = (body['router'] - ['external_gateway_info']['network_id']) - self.assertEqual(net_id, - s2['subnet']['network_id']) - - # make sure the plr router was created, with the expected data - plr_id = self.plugin_instance.edge_manager.get_plr_by_tlr_id( - router_ctx, r['router']['id']) - binding = nsxv_db.get_nsxv_router_binding( - router_ctx.session, plr_id) - self.assertEqual(expected_size, binding['appliance_size']) - self.assertEqual('ACTIVE', binding['status']) - self.assertIsNotNone(binding['edge_id']) - self.assertEqual('service', binding['edge_type']) - self.assertTrue(binding['router_id'].startswith('plr')) - if availability_zone: - self.assertEqual( - availability_zone, binding['availability_zone']) - else: - self.assertEqual('default', binding['availability_zone']) - - # Cleanup - self._remove_external_gateway_from_router( - r['router']['id'], - s2['subnet']['network_id']) - - def test_router_plr_binding_default_size(self): - self._test_router_plr_binding() - - def test_router_plr_binding_configured_size(self): - cfg.CONF.set_override('exclusive_router_appliance_size', - 'large', group="nsxv") - self._test_router_plr_binding(expected_size='large') - - def test_router_plr_binding_default_az(self): - self._test_router_plr_binding(availability_zone='default') - - def test_router_plr_binding_with_az(self): - self._test_router_plr_binding(availability_zone=self.az_name) - - def test_router_binding_with_az(self): - """Check distributed router creation with an availability zone - """ - # create a distributed router - tenant_id = _uuid() - router_ctx = context.Context('', tenant_id) - res = self._create_router(self.fmt, tenant_id, distributed=True, - availability_zone_hints=[self.az_name]) - r = self.deserialize(self.fmt, res) - self.assertIn('router', r) - - # check that we have an edge for this router, with the correct - # availability zone - binding = nsxv_db.get_nsxv_router_binding( - router_ctx.session, r['router']['id']) - - self.assertEqual('compact', binding['appliance_size']) - self.assertEqual('ACTIVE', binding['status']) - self.assertIsNotNone(binding['edge_id']) - self.assertEqual('vdr', binding['edge_type']) - self.assertEqual(binding['router_id'], r['router']['id']) - self.assertEqual(self.az_name, binding['availability_zone']) - - def _test_router_create_with_distributed(self, dist_input, dist_expected, - return_code=201, **kwargs): - data = {'tenant_id': 'whatever'} - data['name'] = 'router1' - data['distributed'] = dist_input - for k, v in kwargs.items(): - data[k] = v - router_req = self.new_create_request( - 'routers', {'router': data}, self.fmt) - res = router_req.get_response(self.ext_api) - self.assertEqual(return_code, res.status_int) - if res.status_int == 201: - router = self.deserialize(self.fmt, res) - self.assertIn('distributed', router['router']) - if dist_input: - self.assertNotIn('router_type', router['router']) - self.assertEqual(dist_expected, - router['router']['distributed']) - - def test_create_router_fails_with_router_type(self): - self._test_router_create_with_distributed(True, True, - return_code=400, - router_type="shared") - - def test_router_create_distributed(self): - self._test_router_create_with_distributed(True, True) - - def test_router_create_not_distributed(self): - self._test_router_create_with_distributed(False, False) - - def test_router_create_distributed_unspecified(self): - self._test_router_create_with_distributed(None, False) - - def _test_create_router_with_az_hint(self, with_hint): - # init the availability zones in the plugin - az_name = 'az7' - set_az_in_config(az_name) - p = directory.get_plugin() - p._availability_zones_data = nsx_az.NsxVAvailabilityZones() - - # create a router with/without hints - router = {'router': {'admin_state_up': True, - 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', - 'tenant_id': 'FAKE_TENANT', - 'distributed': True}} - if with_hint: - router['router']['availability_zone_hints'] = [az_name] - returned_router = p.create_router(context.get_admin_context(), - router) - # availability zones is still empty because the router is not attached - if with_hint: - self.assertEqual([az_name], - returned_router['availability_zone_hints']) - else: - self.assertEqual([], - returned_router['availability_zone_hints']) - - edge_id = edge_utils.get_router_edge_id( - context.get_admin_context(), returned_router['id']) - res_az = nsxv_db.get_edge_availability_zone( - context.get_admin_context().session, edge_id) - expected_az = az_name if with_hint else 'default' - self.assertEqual(expected_az, res_az) - - def test_create_router_with_az_hint(self): - self._test_create_router_with_az_hint(True) - - def test_create_router_without_az_hint(self): - self._test_create_router_with_az_hint(False) - - def test_floatingip_with_assoc_fails(self): - self._test_floatingip_with_assoc_fails( - self._plugin_name + '._check_and_get_fip_assoc') - - def test_floatingip_update(self): - super(TestVdrTestCase, self).test_floatingip_update( - constants.FLOATINGIP_STATUS_DOWN) - - def test_floatingip_with_invalid_create_port(self): - self._test_floatingip_with_invalid_create_port(self._plugin_name) - - def test_router_add_gateway_invalid_network_returns_404(self): - with self.router() as r: - self._add_external_gateway_to_router( - r['router']['id'], - uuidutils.generate_uuid(), - expected_code=webob.exc.HTTPNotFound.code) - - def test_router_add_interfaces_with_multiple_subnets_on_same_network(self): - with self.router() as r,\ - self.network() as n,\ - self.subnet(network=n) as s1,\ - self.subnet(network=n, cidr='11.0.0.0/24') as s2: - self._router_interface_action('add', - r['router']['id'], - s1['subnet']['id'], - None) - err_code = webob.exc.HTTPBadRequest.code - self._router_interface_action('add', - r['router']['id'], - s2['subnet']['id'], - None, - err_code) - self._router_interface_action('remove', - r['router']['id'], - s1['subnet']['id'], - None) - - def test_router_add_interface_with_external_net_fail(self): - with self.router() as r,\ - self.network() as n,\ - self.subnet(network=n) as s: - # Set the network as an external net - net_id = n['network']['id'] - self._set_net_external(net_id) - err_code = webob.exc.HTTPBadRequest.code - self._router_interface_action('add', - r['router']['id'], - s['subnet']['id'], - None, - err_code) - - def test_different_type_routers_add_interfaces_on_same_network_pass(self): - with self.router() as dist, \ - self.router(distributed=False, router_type='shared') as shared, \ - self.router(distributed=False, router_type='exclusive') as excl: - with self.network() as n: - with self.subnet(network=n) as s1, \ - self.subnet(network=n, cidr='11.0.0.0/24') as s2, \ - self.subnet(network=n, cidr='12.0.0.0/24') as s3: - self._router_interface_action('add', - shared['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - excl['router']['id'], - s2['subnet']['id'], - None) - self._router_interface_action('add', - dist['router']['id'], - s3['subnet']['id'], - None) - self._router_interface_action('remove', - dist['router']['id'], - s3['subnet']['id'], - None) - self._router_interface_action('remove', - excl['router']['id'], - s2['subnet']['id'], - None) - self._router_interface_action('remove', - shared['router']['id'], - s1['subnet']['id'], - None) - - def test_router_update_type_fails(self): - """Check distributed router cannot change it's type - """ - # create a distributed router - tenant_id = _uuid() - res = self._create_router(self.fmt, tenant_id, distributed=True) - r = self.deserialize(self.fmt, res) - router_id = r['router']['id'] - - # make sure changing the type fails - self._update('routers', router_id, - {'router': {'router_type': 'shared'}}, - expected_code=400) - self._update('routers', router_id, - {'router': {'router_type': 'exclusive'}}, - expected_code=400) - self._update('routers', router_id, - {'router': {'distributed': False}}, - expected_code=400) - # make sure keeping the type is ok - self._update('routers', router_id, - {'router': {'distributed': True}}, - expected_code=200) - - def test_router_update_size_fails(self): - """Check distributed router cannot change it's type - """ - # create a distributed router - tenant_id = _uuid() - res = self._create_router(self.fmt, tenant_id, distributed=True) - r = self.deserialize(self.fmt, res) - router_id = r['router']['id'] - - # make sure changing the type fails - self._update('routers', router_id, - {'router': {'router_size': 'small'}}, - expected_code=400) - - def test_router_add_interface_multiple_ipv4_subnets(self): - self.skipTest('TBD') - - def test_router_remove_ipv6_subnet_from_interface(self): - self.skipTest('TBD') - - def test_router_add_interface_multiple_ipv6_subnets_same_net(self): - self.skipTest('TBD') - - def test_router_add_interface_multiple_ipv6_subnets_different_net(self): - self.skipTest('TBD') - - def test_create_router_gateway_fails(self): - self.skipTest('not supported') - - def test_floatingip_update_to_same_port_id_twice(self): - self.skipTest('Plugin changes floating port status') - - def test_update_subnet_gateway_for_external_net(self): - plugin = directory.get_plugin() - router_obj = dist_router_driver.RouterDistributedDriver(plugin) - with mock.patch.object(plugin, '_find_router_driver', - return_value=router_obj): - with mock.patch.object(router_obj, - '_update_nexthop') as update_nexthop: - super(TestVdrTestCase, - self).test_update_subnet_gateway_for_external_net() - self.assertTrue(update_nexthop.called) - - def test_router_add_interface_ipv6_port_existing_network_returns_400(self): - """Ensure unique IPv6 router ports per network id. - Adding a router port containing one or more IPv6 subnets with the same - network id as an existing router port should fail. This is so - there is no ambiguity regarding on which port to add an IPv6 subnet - when executing router-interface-add with a subnet and no port. - """ - with self.network() as n, self.router() as r: - with self.subnet(network=n, cidr='fd00::/64', - ip_version=6, enable_dhcp=False) as s1, ( - self.subnet(network=n, cidr='fd01::/64', - ip_version=6, enable_dhcp=False)) as s2: - with self.port(subnet=s1) as p: - exp_code = webob.exc.HTTPBadRequest.code - self._router_interface_action('add', - r['router']['id'], - s2['subnet']['id'], - None, - expected_code=exp_code) - self._router_interface_action('add', - r['router']['id'], - None, - p['port']['id'], - expected_code=exp_code) - - def test_router_update_with_size_fail(self): - """Distributed router currently does not support router size update - """ - with self.router() as r: - router_id = r['router']['id'] - body = self._show('routers', router_id) - body['router']['router_size'] = 'small' - self._update('routers', router_id, body, - expected_code=400, - neutron_context=context.get_admin_context()) - - -class TestNSXvAllowedAddressPairs(NsxVPluginV2TestCase, - test_addr_pair.TestAllowedAddressPairs): - - def setUp(self, plugin=PLUGIN_NAME): - super(TestNSXvAllowedAddressPairs, self).setUp(plugin=plugin) - - # NOTE: the tests below are skipped due to the fact that they update the - # mac address. The NSX|V does not support address pairs when a MAC address - # is configured. - def test_create_port_allowed_address_pairs(self): - pass - - def test_update_add_address_pairs(self): - pass - - def test_equal_to_max_allowed_address_pair(self): - pass - - def test_update_port_security_off_address_pairs(self): - pass - - def test_create_port_security_true_allowed_address_pairs(self): - pass - - def test_create_port_security_false_allowed_address_pairs(self): - pass - - def _test_create_port_remove_allowed_address_pairs(self, update_value): - pass - - def test_create_overlap_with_fixed_ip(self): - pass - - def test_create_port_with_cidr_address_pair(self): - with self.network() as net: - address_pairs = [{'mac_address': '00:00:00:00:00:01', - 'ip_address': '192.168.1.0/24'}] - self._create_port(self.fmt, net['network']['id'], - expected_res_status=webob.exc.HTTPBadRequest.code, - arg_list=(addrp_apidef.ADDRESS_PAIRS,), - allowed_address_pairs=address_pairs) - - def test_create_port_with_address_pair_existing_fixed_ip_fail(self): - address_pairs1 = [{'ip_address': '10.0.0.2'}] - with self.network() as network: - with self.subnet(network=network, cidr='10.0.0.0/24', - enable_dhcp=False) as subnet: - fixed_ips1 = [{'subnet_id': subnet['subnet']['id'], - 'ip_address': '10.0.0.4'}] - fixed_ips2 = [{'subnet_id': subnet['subnet']['id'], - 'ip_address': '10.0.0.5'}] - self._create_port(self.fmt, network['network']['id'], - arg_list=(addrp_apidef.ADDRESS_PAIRS, - 'fixed_ips'), - allowed_address_pairs=address_pairs1, - fixed_ips=fixed_ips1) - res = self._create_port(self.fmt, network['network']['id'], - arg_list=(addrp_apidef.ADDRESS_PAIRS, - 'fixed_ips'), - allowed_address_pairs=address_pairs1, - fixed_ips=fixed_ips2) - self.assertEqual(res.status_int, 400) - - -class TestNSXPortSecurity(test_psec.TestPortSecurity, - NsxVPluginV2TestCase): - def setUp(self, plugin=PLUGIN_NAME): - super(TestNSXPortSecurity, self).setUp(plugin=plugin) - - def test_create_port_fails_with_secgroup_and_port_security_false(self): - # Security Groups can be used even when port-security is disabled - pass - - def test_update_port_security_off_with_security_group(self): - # Security Groups can be used even when port-security is disabled - pass - - def test_create_port_with_security_group_and_net_sec_false(self): - pass - - def _create_compute_port(self, network_name, device_id, port_security): - # create a network without port security - res = self._create_network('json', network_name, True) - net = self.deserialize('json', res) - - # create a compute port with this network and a device - res = self._create_port('json', net['network']['id'], - arg_list=('port_security_enabled', - 'device_id', - 'device_owner',), - port_security_enabled=port_security, - device_id=device_id, - device_owner='compute:None') - return self.deserialize('json', res) - - def _add_vnic_to_port(self, port_id, add_exclude, vnic_index): - """Add vnic to a port and check if the device was added to the - exclude list - """ - plugin = self._get_core_plugin_with_dvs() - vm_moref = 'dummy_moref' - with mock.patch.object(plugin._vcm, 'get_vm_moref', - return_value=vm_moref): - with mock.patch.object( - plugin.nsx_v.vcns, - 'add_vm_to_exclude_list') as exclude_list_add: - data = {'port': {'vnic_index': vnic_index}} - self.new_update_request( - 'ports', data, port_id).get_response(self.api) - if add_exclude: - # make sure the vm was added to the exclude list - exclude_list_add.assert_called_once_with(vm_moref) - else: - self.assertFalse(exclude_list_add.called) - - def _del_vnic_from_port(self, port_id, del_exclude): - """Delete the vnic & device id from the port and check if - the device was removed from the exclude list - """ - plugin = self._get_core_plugin_with_dvs() - vm_moref = 'dummy_moref' - with mock.patch.object(plugin._vcm, 'get_vm_moref', - return_value=vm_moref): - with mock.patch.object( - plugin.nsx_v.vcns, - 'delete_vm_from_exclude_list') as exclude_list_del: - data = {'port': {'vnic_index': None, 'device_id': ''}} - self.new_update_request( - 'ports', data, port_id).get_response(self.api) - if del_exclude: - # make sure the vm was added to the exclude list - exclude_list_del.assert_called_once_with(vm_moref) - else: - self.assertFalse(exclude_list_del.called) - - def _del_port_with_vnic(self, port_id, del_exclude): - """Delete port with vnic, and check if the device was removed - from the exclude list - """ - plugin = self._get_core_plugin_with_dvs() - vm_moref = 'dummy_moref' - with mock.patch.object(plugin._vcm, 'get_vm_moref', - return_value=vm_moref): - with mock.patch.object( - plugin.nsx_v.vcns, - 'delete_vm_from_exclude_list') as exclude_list_del: - self.new_delete_request( - 'ports', port_id).get_response(self.api) - if del_exclude: - # make sure the vm was added to the exclude list - exclude_list_del.assert_called_once_with(vm_moref) - else: - self.assertFalse(exclude_list_del.called) - - def test_update_port_no_security_with_vnic(self): - device_id = _uuid() - # create a compute port without port security - port = self._create_compute_port('net1', device_id, False) - - # add vnic to the port - self._add_vnic_to_port(port['port']['id'], True, 3) - - # delete vnic from the port - self._del_vnic_from_port(port['port']['id'], True) - - def test_update_multiple_port_no_security_with_vnic(self): - device_id = _uuid() - # create a compute port without port security - port1 = self._create_compute_port('net1', device_id, False) - # add vnic to the port - self._add_vnic_to_port(port1['port']['id'], True, 3) - - # create another compute port without port security on the same device - port2 = self._create_compute_port('net2', device_id, False) - # add vnic to the port (no need to add to exclude list again) - self._add_vnic_to_port(port2['port']['id'], False, 4) - - # delete vnics from the port - self._del_vnic_from_port(port1['port']['id'], False) - self._del_vnic_from_port(port2['port']['id'], True) - - def test_update_mixed_port_no_security_with_vnic(self): - device_id = _uuid() - # create a compute port without port security - port1 = self._create_compute_port('net1', device_id, True) - # add vnic to the port - self._add_vnic_to_port(port1['port']['id'], False, 3) - - irrelevant_device_id = _uuid() - # create a compute port without port security for a different device - port2 = self._create_compute_port('net1', irrelevant_device_id, True) - # add vnic to the port - self._add_vnic_to_port(port2['port']['id'], False, 3) - - # create another compute port without port security on the same device - port3 = self._create_compute_port('net2', device_id, False) - # add vnic to the port (no need to add to exclude list again) - self._add_vnic_to_port(port3['port']['id'], True, 4) - - # delete vnics from the port - self._del_vnic_from_port(port1['port']['id'], False) - self._del_vnic_from_port(port3['port']['id'], True) - self._del_vnic_from_port(port2['port']['id'], False) - - def test_delete_port_no_security_with_vnic(self): - device_id = _uuid() - # create a compute port without port security - port = self._create_compute_port('net1', device_id, False) - - # add vnic to the port - self._add_vnic_to_port(port['port']['id'], True, 3) - - # delete port with the vnic - self._del_port_with_vnic(port['port']['id'], True) - - def test_delete_multiple_port_no_security_with_vnic(self): - device_id = _uuid() - # create a compute port without port security - port1 = self._create_compute_port('net1', device_id, False) - # add vnic to the port - self._add_vnic_to_port(port1['port']['id'], True, 3) - - # create another compute port without port security on the same device - port2 = self._create_compute_port('net2', device_id, False) - # add vnic to the port (no need to add to exclude list again) - self._add_vnic_to_port(port2['port']['id'], False, 4) - - # delete ports with the vnics - self._del_port_with_vnic(port2['port']['id'], False) - self._del_port_with_vnic(port1['port']['id'], True) - - def test_detach_port_no_sec(self): - device_id = _uuid() - # create a compute port without port security - port = self._create_compute_port('net1', device_id, False) - # add vnic to the port - self._add_vnic_to_port(port['port']['id'], True, 3) - - # detach the port - with mock.patch.object( - self.fc2, - 'inactivate_vnic_assigned_addresses') as mock_inactivte: - self._del_vnic_from_port(port['port']['id'], True) - # inactivate spoofguard should not be called - self.assertFalse(mock_inactivte.called) - - def test_detach_port_with_sec(self): - device_id = _uuid() - # create a compute port without port security - port = self._create_compute_port('net1', device_id, True) - # add vnic to the port - self._add_vnic_to_port(port['port']['id'], False, 3) - - # detach the port - with mock.patch.object( - self.fc2, - 'inactivate_vnic_assigned_addresses') as mock_inactivte: - self._del_vnic_from_port(port['port']['id'], False) - # inactivate spoofguard should be called - self.assertTrue(mock_inactivte.called) - - def _toggle_port_security(self, port_id, enable_port_security, - update_exclude): - """Enable/disable port security on a port, and verify that the exclude - list was updated as expected - """ - plugin = self._get_core_plugin_with_dvs() - vm_moref = 'dummy_moref' - data = {'port': {'port_security_enabled': enable_port_security}} - with mock.patch.object(plugin._vcm, 'get_vm_moref', - return_value=vm_moref): - if enable_port_security: - with mock.patch.object( - plugin.nsx_v.vcns, - 'delete_vm_from_exclude_list') as exclude_list_del: - self.new_update_request( - 'ports', data, port_id).get_response(self.api) - if update_exclude: - # make sure the vm was added to the exclude list - exclude_list_del.assert_called_once_with(vm_moref) - else: - self.assertFalse(exclude_list_del.called) - else: - with mock.patch.object( - plugin.nsx_v.vcns, - 'add_vm_to_exclude_list') as exclude_list_add: - self.new_update_request( - 'ports', data, port_id).get_response(self.api) - if update_exclude: - # make sure the vm was added to the exclude list - exclude_list_add.assert_called_once_with(vm_moref) - else: - self.assertFalse(exclude_list_add.called) - - def test_update_port_security_with_vnic(self): - device_id = _uuid() - # create a compute port without port security - port = self._create_compute_port('net1', device_id, False) - - # add vnic to the port - self._add_vnic_to_port(port['port']['id'], True, 3) - - # enable port security - self._toggle_port_security(port['port']['id'], True, True) - - # disable port security - self._toggle_port_security(port['port']['id'], False, True) - - # delete vnic from the port - self._del_vnic_from_port(port['port']['id'], True) - - def test_update_multiple_port_security_with_vnic(self): - device_id = _uuid() - # create a compute port without port security - port1 = self._create_compute_port('net1', device_id, False) - - # add vnic to the port - self._add_vnic_to_port(port1['port']['id'], True, 3) - - # create another compute port without port security - port2 = self._create_compute_port('net2', device_id, False) - - # add vnic to the port - self._add_vnic_to_port(port2['port']['id'], False, 4) - - # enable port security on both ports - self._toggle_port_security(port1['port']['id'], True, False) - self._toggle_port_security(port2['port']['id'], True, True) - - # disable port security on both ports - self._toggle_port_security(port1['port']['id'], False, True) - self._toggle_port_security(port2['port']['id'], False, False) - - def test_service_insertion(self): - # init the plugin mocks - p = directory.get_plugin() - self.fc2.add_member_to_security_group = ( - mock.Mock().add_member_to_security_group) - self.fc2.remove_member_from_security_group = ( - mock.Mock().remove_member_from_security_group) - - # mock the service insertion handler - p._si_handler = mock.Mock() - p._si_handler.enabled = True - p._si_handler.sg_id = '11' - - # create a compute port with port security - device_id = _uuid() - port = self._create_compute_port('net1', device_id, True) - - # add vnic to the port, and verify that the port was added to the - # service insertion security group - vnic_id = 3 - vnic_index = '%s.%03d' % (device_id, vnic_id) - self.fc2.add_member_to_security_group.reset_mock() - self._add_vnic_to_port(port['port']['id'], False, vnic_id) - self.fc2.add_member_to_security_group.assert_any_call( - p._si_handler.sg_id, vnic_index) - - # disable the port security and make sure it is removed from the - # security group - self.fc2.remove_member_from_security_group.reset_mock() - self._toggle_port_security(port['port']['id'], False, True) - self.fc2.remove_member_from_security_group.assert_any_call( - p._si_handler.sg_id, vnic_index) - - def test_service_insertion_notify(self): - # create a compute ports with/without port security - device_id = _uuid() - # create 2 compute ports with port security - port1 = self._create_compute_port('net1', device_id, True) - self._add_vnic_to_port(port1['port']['id'], False, 1) - port2 = self._create_compute_port('net2', device_id, True) - self._add_vnic_to_port(port2['port']['id'], False, 2) - # create 1 compute port without port security - port3 = self._create_compute_port('net3', device_id, False) - self._add_vnic_to_port(port3['port']['id'], True, 3) - - # init the plugin mocks - p = directory.get_plugin() - self.fc2.add_member_to_security_group = ( - mock.Mock().add_member_to_security_group) - - # call the function (that should be called from the flow classifier - # driver) and verify it adds all relevant ports to the group - # Since it uses spawn_n, we should mock it. - orig_spawn = c_utils.spawn_n - c_utils.spawn_n = mock.Mock(side_effect=lambda f, x: f(x, None)) - p.add_vms_to_service_insertion(sg_id='aaa') - # back to normal - c_utils.spawn_n = orig_spawn - self.assertEqual(2, self.fc2.add_member_to_security_group.call_count) - - def test_toggle_non_compute_port_security(self): - # create a network without port security - res = self._create_network('json', 'net1', True) - net = self.deserialize('json', res) - - # create a port with this network and a device - res = self._create_port('json', net['network']['id'], - arg_list=('port_security_enabled',), - port_security_enabled=True) - port = self.deserialize('json', res) - port_id = port['port']['id'] - - # Disable port security - data = {'port': {'port_security_enabled': False}} - updated_port = self.deserialize( - 'json', - self.new_update_request('ports', data, - port_id).get_response(self.api)) - self.assertFalse(updated_port['port']['port_security_enabled']) - shown_port = self.deserialize( - 'json', - self.new_show_request('ports', - port_id).get_response(self.api)) - self.assertFalse(shown_port['port']['port_security_enabled']) - - # Enable port security - data = {'port': {'port_security_enabled': True}} - updated_port = self.deserialize( - 'json', - self.new_update_request('ports', data, - port_id).get_response(self.api)) - self.assertTrue(updated_port['port']['port_security_enabled']) - shown_port = self.deserialize( - 'json', - self.new_show_request('ports', - port_id).get_response(self.api)) - self.assertTrue(shown_port['port']['port_security_enabled']) - - -class TestSharedRouterTestCase(L3NatTest, L3NatTestCaseBase, - test_l3_plugin.L3NatTestCaseMixin, - NsxVPluginV2TestCase): - - def _create_router(self, fmt, tenant_id, name=None, - admin_state_up=None, set_context=False, - arg_list=None, **kwargs): - tenant_id = tenant_id or _uuid() - data = {'router': {'tenant_id': tenant_id}} - if name: - data['router']['name'] = name - if admin_state_up: - data['router']['admin_state_up'] = admin_state_up - for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): - # Arg must be present and not empty - if arg in kwargs and kwargs[arg]: - data['router'][arg] = kwargs[arg] - - data['router']['router_type'] = kwargs.get('router_type', 'shared') - - router_req = self.new_create_request('routers', data, fmt) - if set_context and tenant_id: - # create a specific auth context for this request - router_req.environ['neutron.context'] = context.Context( - '', tenant_id) - - return router_req.get_response(self.ext_api) - - @mock.patch.object(edge_utils.EdgeManager, - 'update_interface_addr') - def test_router_add_interface_multiple_ipv6_subnets_same_net(self, mock): - super(TestSharedRouterTestCase, - self).test_router_add_interface_multiple_ipv6_subnets_same_net() - - def test_router_create_with_no_edge(self): - name = 'router1' - tenant_id = _uuid() - expected_value = [('name', name), ('tenant_id', tenant_id), - ('admin_state_up', True), ('status', 'ACTIVE'), - ('external_gateway_info', None)] - with self.router(name='router1', admin_state_up=True, - tenant_id=tenant_id) as router: - for k, v in expected_value: - self.assertEqual(router['router'][k], v) - self.assertEqual( - [], - self.plugin_instance.edge_manager.get_routers_on_same_edge( - context.get_admin_context(), router['router']['id'])) - - def test_router_create_with_size_fail_at_backend(self): - data = {'router': { - 'tenant_id': 'whatever', - 'router_type': 'shared', - 'router_size': 'large'}} - router_req = self.new_create_request('routers', data, self.fmt) - res = router_req.get_response(self.ext_api) - router = self.deserialize(self.fmt, res) - msg = ('Bad router request: ' - 'Cannot specify router-size for shared router.') - self.assertEqual("BadRequest", router['NeutronError']['type']) - self.assertEqual(msg, router['NeutronError']['message']) - - def test_router_create_with_gwinfo_with_no_edge(self): - with self._create_l3_ext_network() as net: - with self.subnet(network=net, enable_dhcp=False) as s: - data = {'router': {'tenant_id': 'whatever'}} - data['router']['name'] = 'router1' - data['router']['external_gateway_info'] = { - 'network_id': s['subnet']['network_id']} - router_req = self.new_create_request('routers', data, - self.fmt) - res = router_req.get_response(self.ext_api) - router = self.deserialize(self.fmt, res) - self.assertEqual( - s['subnet']['network_id'], - (router['router']['external_gateway_info'] - ['network_id'])) - self.assertEqual( - [], - self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - router['router']['id'])) - - def test_router_update_with_routes_fail(self): - """Shared router currently does not support static routes - """ - with self.router() as r: - router_id = r['router']['id'] - body = self._show('routers', router_id) - body['router']['routes'] = [{'destination': '5.5.5.5/32', - 'nexthop': '6.6.6.6'}] - self._update('routers', router_id, body, - expected_code=400, - neutron_context=context.get_admin_context()) - - def test_router_update_gateway_with_no_edge(self): - with self.router() as r: - with self.subnet() as s1: - with self._create_l3_ext_network() as net: - with self.subnet(network=net, enable_dhcp=False) as s2: - self._set_net_external(s1['subnet']['network_id']) - try: - self._add_external_gateway_to_router( - r['router']['id'], - s1['subnet']['network_id']) - body = self._show('routers', r['router']['id']) - net_id = (body['router'] - ['external_gateway_info']['network_id']) - self.assertEqual(net_id, - s1['subnet']['network_id']) - self.assertEqual( - [], - self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r['router']['id'])) - # Plug network with external mapping - self._set_net_external(s2['subnet']['network_id']) - self._add_external_gateway_to_router( - r['router']['id'], - s2['subnet']['network_id']) - body = self._show('routers', r['router']['id']) - net_id = (body['router'] - ['external_gateway_info']['network_id']) - self.assertEqual(net_id, - s2['subnet']['network_id']) - self.assertEqual( - [], - self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r['router']['id'])) - finally: - # Cleanup - self._remove_external_gateway_from_router( - r['router']['id'], - s2['subnet']['network_id']) - - def test_router_update_gateway_with_existing_floatingip_with_edge(self): - with self._create_l3_ext_network() as net: - with self.subnet(network=net, enable_dhcp=False) as subnet: - with self.floatingip_with_assoc() as fip: - self._add_external_gateway_to_router( - fip['floatingip']['router_id'], - subnet['subnet']['network_id'], - expected_code=webob.exc.HTTPConflict.code) - self.assertNotEqual( - [], - self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - fip['floatingip']['router_id'])) - - def test_router_set_gateway_with_interfaces_with_edge(self): - with self.router() as r, self.subnet() as s1: - self._set_net_external(s1['subnet']['network_id']) - try: - self._add_external_gateway_to_router( - r['router']['id'], - s1['subnet']['network_id']) - body = self._show('routers', r['router']['id']) - net_id = (body['router'] - ['external_gateway_info']['network_id']) - self.assertEqual(net_id, - s1['subnet']['network_id']) - self.assertEqual( - [], - self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r['router']['id'])) - - with self.subnet(cidr='11.0.0.0/24') as s11: - with self.subnet(cidr='12.0.0.0/24') as s12: - - self._router_interface_action('add', - r['router']['id'], - s11['subnet']['id'], - None) - self._router_interface_action('add', - r['router']['id'], - s12['subnet']['id'], - None) - self.assertIsNotNone( - self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r['router']['id'])) - self._router_interface_action('remove', - r['router']['id'], - s11['subnet']['id'], - None) - self.assertIsNotNone( - self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r['router']['id'])) - self._router_interface_action('remove', - r['router']['id'], - s12['subnet']['id'], - None) - self.assertEqual( - [], - self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r['router']['id'])) - finally: - # Cleanup - self._remove_external_gateway_from_router( - r['router']['id'], - s1['subnet']['network_id']) - - @mock.patch.object(edge_utils, "update_firewall") - def test_routers_set_gateway_with_nosnat(self, mock): - expected_fw1 = [{'action': 'allow', - 'enabled': True, - 'name': 'Subnet Rule', - 'source_ip_address': [], - 'destination_ip_address': []}] - expected_fw2 = [{'action': 'allow', - 'enabled': True, - 'name': 'Subnet Rule', - 'source_ip_address': [], - 'destination_ip_address': []}] - nosnat_fw1 = [{'action': 'allow', - 'enabled': True, - 'name': 'No SNAT Rule', - 'source_vnic_groups': ["external"], - 'destination_ip_address': []}] - nosnat_fw2 = [{'action': 'allow', - 'enabled': True, - 'name': 'No SNAT Rule', - 'source_vnic_groups': ["external"], - 'destination_ip_address': []}] - with self.router() as r1, self.router() as r2,\ - self.subnet() as ext_subnet,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(cidr='12.0.0.0/24') as s2: - - self._set_net_external(ext_subnet['subnet']['network_id']) - self._router_interface_action( - 'add', r1['router']['id'], - s1['subnet']['id'], None) - expected_fw1[0]['source_ip_address'] = ['11.0.0.0/24'] - expected_fw1[0]['destination_ip_address'] = ['11.0.0.0/24'] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list( - self._get_md_proxy_fw_rules() + expected_fw1), - self._recursive_sort_list(fw_rules)) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - self._add_external_gateway_to_router( - r1['router']['id'], - ext_subnet['subnet']['network_id']) - self._add_external_gateway_to_router( - r2['router']['id'], - ext_subnet['subnet']['network_id']) - expected_fw2[0]['source_ip_address'] = ['12.0.0.0/24'] - expected_fw2[0]['destination_ip_address'] = ['12.0.0.0/24'] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list( - self._get_md_proxy_fw_rules() + expected_fw1 + - expected_fw2), - self._recursive_sort_list(fw_rules)) - self._update_router_enable_snat( - r1['router']['id'], - ext_subnet['subnet']['network_id'], - False) - nosnat_fw1[0]['destination_ip_address'] = ['11.0.0.0/24'] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list( - self._get_md_proxy_fw_rules() + expected_fw1 + - expected_fw2 + nosnat_fw1), - self._recursive_sort_list(fw_rules)) - self._update_router_enable_snat( - r2['router']['id'], - ext_subnet['subnet']['network_id'], - False) - nosnat_fw2[0]['destination_ip_address'] = ['12.0.0.0/24'] - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list( - self._get_md_proxy_fw_rules() + expected_fw1 + - expected_fw2 + nosnat_fw1 + nosnat_fw2), - self._recursive_sort_list(fw_rules)) - self._update_router_enable_snat( - r2['router']['id'], - ext_subnet['subnet']['network_id'], - True) - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list( - self._get_md_proxy_fw_rules() + expected_fw1 + - expected_fw2 + nosnat_fw1), - self._recursive_sort_list(fw_rules)) - self._router_interface_action('remove', - r2['router']['id'], - s2['subnet']['id'], - None) - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list( - self._get_md_proxy_fw_rules() + expected_fw1 + nosnat_fw1), - self._recursive_sort_list(fw_rules)) - self._remove_external_gateway_from_router( - r1['router']['id'], - ext_subnet['subnet']['network_id']) - fw_rules = mock.call_args[0][3]['firewall_rule_list'] - self.assertEqual( - self._recursive_sort_list( - self._get_md_proxy_fw_rules() + expected_fw1), - self._recursive_sort_list(fw_rules)) - self._router_interface_action('remove', - r1['router']['id'], - s1['subnet']['id'], - None) - self._remove_external_gateway_from_router( - r2['router']['id'], - ext_subnet['subnet']['network_id']) - - def test_routers_with_interface_on_same_edge(self): - with self.router() as r1, self.router() as r2,\ - self.subnet(cidr='11.0.0.0/24') as s11,\ - self.subnet(cidr='12.0.0.0/24') as s12: - self._router_interface_action('add', - r1['router']['id'], - s11['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s12['subnet']['id'], - None) - routers_expected = [r1['router']['id'], r2['router']['id']] - routers_1 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r1['router']['id'])) - self.assertEqual(set(routers_expected), set(routers_1)) - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(set(routers_expected), set(routers_2)) - self._router_interface_action('remove', - r1['router']['id'], - s11['subnet']['id'], - None) - self._router_interface_action('remove', - r2['router']['id'], - s12['subnet']['id'], - None) - - def test_routers_with_overlap_interfaces(self): - with self.router() as r1, self.router() as r2,\ - self.subnet(cidr='11.0.0.0/24') as s11,\ - self.subnet(cidr='11.0.0.0/24') as s12: - self._router_interface_action('add', - r1['router']['id'], - s11['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s12['subnet']['id'], - None) - r1_expected = [r1['router']['id']] - routers_1 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r1['router']['id'])) - self.assertEqual(r1_expected, routers_1) - r2_expected = [r2['router']['id']] - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(r2_expected, routers_2) - self._router_interface_action('remove', - r1['router']['id'], - s11['subnet']['id'], - None) - self._router_interface_action('remove', - r2['router']['id'], - s12['subnet']['id'], - None) - - def test_routers_with_overlap_interfaces_with_migration(self): - with self.router() as r1, self.router() as r2,\ - self.subnet(cidr='11.0.0.0/24') as s11,\ - self.subnet(cidr='12.0.0.0/24') as s12,\ - self.subnet(cidr='11.0.0.0/24') as s13: - self._router_interface_action('add', - r1['router']['id'], - s11['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s12['subnet']['id'], - None) - r1_expected = [r1['router']['id'], r2['router']['id']] - routers_1 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r1['router']['id'])) - self.assertEqual(set(r1_expected), set(routers_1)) - self._router_interface_action('add', - r2['router']['id'], - s13['subnet']['id'], - None) - r1_expected = [r1['router']['id']] - routers_1 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r1['router']['id'])) - self.assertEqual(r1_expected, routers_1) - self._router_interface_action('remove', - r1['router']['id'], - s11['subnet']['id'], - None) - self._router_interface_action('remove', - r2['router']['id'], - s12['subnet']['id'], - None) - self._router_interface_action('remove', - r2['router']['id'], - s13['subnet']['id'], - None) - - def test_routers_with_different_subnet_on_same_network(self): - with self.router() as r1, self.router() as r2,\ - self.network() as net,\ - self.subnet(network=net, cidr='12.0.0.0/24') as s1,\ - self.subnet(network=net, cidr='13.0.0.0/24') as s2: - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(1, len(routers_2)) - self._router_interface_action('remove', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('remove', - r2['router']['id'], - s2['subnet']['id'], - None) - - def test_routers_with_different_subnet_on_same_network_migration(self): - with self.router() as r1, self.router() as r2, self.network() as net,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(network=net, cidr='12.0.0.0/24') as s2,\ - self.subnet(network=net, cidr='13.0.0.0/24') as s3: - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(2, len(routers_2)) - self._router_interface_action('add', - r2['router']['id'], - s3['subnet']['id'], - None) - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(2, len(routers_2)) - self._router_interface_action('remove', - r2['router']['id'], - s3['subnet']['id'], - None) - self._router_interface_action('add', - r1['router']['id'], - s3['subnet']['id'], - None) - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(1, len(routers_2)) - self._router_interface_action('remove', - r1['router']['id'], - s3['subnet']['id'], - None) - self._router_interface_action('remove', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('remove', - r2['router']['id'], - s2['subnet']['id'], - None) - - def test_routers_set_same_gateway_on_same_edge(self): - with self.router() as r1, self.router() as r2,\ - self.network() as ext_net,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(cidr='12.0.0.0/24') as s2,\ - self.subnet(network=ext_net, cidr='13.0.0.0/24'): - self._set_net_external(ext_net['network']['id']) - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - self._add_external_gateway_to_router( - r1['router']['id'], - ext_net['network']['id']) - self._add_external_gateway_to_router( - r2['router']['id'], - ext_net['network']['id']) - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(2, len(routers_2)) - - self._router_interface_action('remove', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('remove', - r2['router']['id'], - s2['subnet']['id'], - None) - self._remove_external_gateway_from_router( - r1['router']['id'], - ext_net['network']['id']) - self._remove_external_gateway_from_router( - r2['router']['id'], - ext_net['network']['id']) - - def test_routers_set_different_gateway_on_different_edge(self): - with self.router() as r1, self.router() as r2,\ - self.network() as ext1, self.network() as ext2,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(cidr='12.0.0.0/24') as s2,\ - self.subnet(network=ext1, cidr='13.0.0.0/24'),\ - self.subnet(network=ext2, cidr='14.0.0.0/24'): - self._set_net_external(ext1['network']['id']) - self._set_net_external(ext2['network']['id']) - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - self._add_external_gateway_to_router( - r1['router']['id'], - ext1['network']['id']) - self._add_external_gateway_to_router( - r2['router']['id'], - ext1['network']['id']) - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(2, len(routers_2)) - self._add_external_gateway_to_router( - r2['router']['id'], - ext2['network']['id']) - routers_2 = (self.plugin_instance.edge_manager. - get_routers_on_same_edge( - context.get_admin_context(), - r2['router']['id'])) - self.assertEqual(1, len(routers_2)) - - self._router_interface_action('remove', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('remove', - r2['router']['id'], - s2['subnet']['id'], - None) - self._remove_external_gateway_from_router( - r1['router']['id'], - ext1['network']['id']) - self._remove_external_gateway_from_router( - r2['router']['id'], - ext2['network']['id']) - - def test_get_available_and_conflicting_ids_with_no_conflict(self): - with self.router() as r1, self.router() as r2,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(cidr='12.0.0.0/24') as s2: - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - router_driver = (self.plugin_instance._router_managers. - get_tenant_router_driver(context, 'shared')) - available_router_ids, conflict_router_ids = ( - router_driver._get_available_and_conflicting_ids( - context.get_admin_context(), r1['router']['id'])) - self.assertIn(r2['router']['id'], available_router_ids) - self.assertEqual(0, len(conflict_router_ids)) - - def test_get_available_and_conflicting_ids_with_conflict(self): - with self.router() as r1, self.router() as r2,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(cidr='11.0.0.0/24') as s2: - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - router_driver = (self.plugin_instance._router_managers. - get_tenant_router_driver(context, 'shared')) - available_router_ids, conflict_router_ids = ( - router_driver._get_available_and_conflicting_ids( - context.get_admin_context(), r1['router']['id'])) - self.assertIn(r2['router']['id'], conflict_router_ids) - self.assertEqual(0, len(available_router_ids)) - - def test_get_available_and_conflicting_ids_with_diff_gw(self): - with self.router() as r1, self.router() as r2,\ - self.network() as ext1, self.network() as ext2,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(cidr='12.0.0.0/24') as s2,\ - self.subnet(network=ext1, cidr='13.0.0.0/24'),\ - self.subnet(network=ext2, cidr='14.0.0.0/24'): - self._set_net_external(ext1['network']['id']) - self._set_net_external(ext2['network']['id']) - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - self._add_external_gateway_to_router( - r1['router']['id'], - ext1['network']['id']) - self._add_external_gateway_to_router( - r2['router']['id'], - ext2['network']['id']) - router_driver = (self.plugin_instance._router_managers. - get_tenant_router_driver(context, 'shared')) - available_router_ids, conflict_router_ids = ( - router_driver._get_available_and_conflicting_ids( - context.get_admin_context(), r1['router']['id'])) - self.assertIn(r2['router']['id'], conflict_router_ids) - self.assertEqual(0, len(available_router_ids)) - - def test_get_available_and_conflicting_ids_with_tenants(self): - cfg.CONF.set_override('share_edges_between_tenants', False, - group="nsxv") - with self.router(tenant_id='fake1') as r1,\ - self.router(tenant_id='fake2') as r2,\ - self.subnet(cidr='11.0.0.0/24') as s1,\ - self.subnet(cidr='12.0.0.0/24') as s2: - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - self._router_interface_action('add', - r2['router']['id'], - s2['subnet']['id'], - None) - router_driver = (self.plugin_instance._router_managers. - get_tenant_router_driver(context, 'shared')) - available_router_ids, conflict_router_ids = ( - router_driver._get_available_and_conflicting_ids( - context.get_admin_context(), r1['router']['id'])) - self.assertIn(r2['router']['id'], conflict_router_ids) - self.assertEqual(0, len(available_router_ids)) - - def test_migrate_shared_router_to_exclusive(self): - with self.router(name='r7') as r1, \ - self.subnet(cidr='11.0.0.0/24') as s1: - self._router_interface_action('add', - r1['router']['id'], - s1['subnet']['id'], - None) - - # update the router type: - router_id = r1['router']['id'] - self._update('routers', router_id, - {'router': {'router_type': 'exclusive'}}) - - # get the updated router and check it's type - body = self._show('routers', router_id) - self.assertEqual('exclusive', body['router']['router_type']) - - def _test_create_router_with_az_hint(self, with_hint): - # init the availability zones in the plugin - az_name = 'az7' - set_az_in_config(az_name) - p = directory.get_plugin() - p._availability_zones_data = nsx_az.NsxVAvailabilityZones() - - # create a router with/without hints - router = {'router': {'admin_state_up': True, - 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', - 'tenant_id': 'FAKE_TENANT', - 'router_type': 'shared'}} - if with_hint: - router['router']['availability_zone_hints'] = [az_name] - returned_router = p.create_router(context.get_admin_context(), - router) - # availability zones is still empty because the router is not attached - if with_hint: - self.assertEqual([az_name], - returned_router['availability_zone_hints']) - else: - self.assertEqual([], - returned_router['availability_zone_hints']) - self.assertEqual([], - returned_router['availability_zones']) - - # Add interface so the router will be attached to an edge - with self.subnet() as s1: - router_id = returned_router['id'] - self._router_interface_action('add', - router_id, - s1['subnet']['id'], - None) - edge_id = edge_utils.get_router_edge_id( - context.get_admin_context(), router_id) - res_az = nsxv_db.get_edge_availability_zone( - context.get_admin_context().session, edge_id) - expected_az = az_name if with_hint else 'default' - self.assertEqual(expected_az, res_az) - - def test_create_router_with_az_hint(self): - self._test_create_router_with_az_hint(True) - - def test_create_router_without_az_hint(self): - self._test_create_router_with_az_hint(False) - - def test_router_update_with_size_fail(self): - """Shared router currently does not support router size update - """ - with self.router() as r: - router_id = r['router']['id'] - body = self._show('routers', router_id) - body['router']['router_size'] = 'small' - self._update('routers', router_id, body, - expected_code=400, - neutron_context=context.get_admin_context()) - - -class TestRouterFlavorTestCase(extension.ExtensionTestCase, - test_l3_plugin.L3NatTestCaseMixin, - L3NatTest - ): - - FLAVOR_PLUGIN = 'neutron.services.flavors.flavors_plugin.FlavorsPlugin' - - def _mock_add_flavor_id(dummy, router_res, router_db): - # this function is a registered callback so we can't mock it - # in a regular way. - # need to change behavior for this test suite only, since - # there is no "unregister_dict_extend_funcs" - if router_res['name'] == 'router_with_flavor': - router_res['flavor_id'] = 'raspberry' - - def setUp(self, plugin=PLUGIN_NAME): - # init the core plugin and flavors plugin - service_plugins = {plugin_const.FLAVORS: self.FLAVOR_PLUGIN} - super(TestRouterFlavorTestCase, self).setUp( - plugin=plugin, service_plugins=service_plugins) - self.plugin = directory.get_plugin() - self.plugin._flv_plugin = directory.get_plugin(plugin_const.FLAVORS) - self.plugin._process_router_flavor_create = mock.Mock() - - resource_extend.register_funcs( - l3_apidef.ROUTERS, [self._mock_add_flavor_id]) - - # init the availability zones - self.az_name = 'az7' - set_az_in_config(self.az_name) - self.plugin._availability_zones_data = ( - nsx_az.NsxVAvailabilityZones()) - self._iteration = 1 - - def assertSyslogConfig(self, expected): - """Verify syslog was updated in fake driver - - Test assumes edge ids are created sequentially starting from edge-1 - """ - edge_id = ('edge-%s' % self._iteration) - actual = self.plugin.nsx_v.vcns.get_edge_syslog(edge_id)[1] - if not expected: - # test expects no syslog to be configured - self.assertNotIn('serverAddresses', actual) - return - - self.assertEqual(expected['protocol'], actual['protocol']) - self.assertEqual(expected['server_ip'], - actual['serverAddresses']['ipAddress'][0]) - if 'server2_ip' in expected: - self.assertEqual(expected['server2_ip'], - actual['serverAddresses']['ipAddress'][1]) - - def _test_router_create_with_flavor( - self, metainfo, expected_data, - create_type=None, - create_size=None, - create_az=None): - - router_data = {'flavor_id': 'dummy', - 'tenant_id': 'whatever', - 'name': 'router_with_flavor', - 'admin_state_up': True} - - if create_type is not None: - router_data['router_type'] = create_type - if create_size is not None: - router_data['router_size'] = create_size - if create_az is not None: - router_data['availability_zone_hints'] = [create_az] - - flavor_data = {'service_type': plugin_const.L3, - 'enabled': True, - 'service_profiles': ['profile_id']} - - # Mock the flavors plugin - with mock.patch(self.FLAVOR_PLUGIN + '.get_flavor', - return_value=flavor_data): - with mock.patch(self.FLAVOR_PLUGIN + '.get_service_profile', - return_value={'metainfo': metainfo}): - router = self.plugin.create_router( - context.get_admin_context(), - {'router': router_data}) - # syslog data is not part of router config - # and needs to be validated separately - if 'syslog' in expected_data.keys(): - self.assertSyslogConfig(expected_data['syslog']) - - for key, expected_val in expected_data.items(): - if key != 'syslog': - self.assertEqual(expected_val, router[key]) - - def test_router_create_with_flavor_different_sizes(self): - """Create exclusive router with size in flavor - """ - for size in ['compact', 'large', 'xlarge', 'quadlarge']: - metainfo = "{'router_size':'%s'}" % size - expected_router = {'router_type': 'exclusive', - 'router_size': size} - self._test_router_create_with_flavor( - metainfo, expected_router, - create_type='exclusive') - - def test_router_create_with_flavor_ex_different_sizes(self): - """Create exclusive router with size and type in flavor - """ - for size in ['compact', 'large', 'xlarge', 'quadlarge']: - metainfo = "{'router_size':'%s','router_type':'exclusive'}" % size - expected_router = {'router_type': 'exclusive', - 'router_size': size} - self._test_router_create_with_flavor( - metainfo, expected_router) - - def test_router_create_with_flavor_az(self): - """Create exclusive router with availability zone in flavor - """ - metainfo = "{'availability_zone_hints':'%s'}" % self.az_name - expected_router = {'router_type': 'exclusive', - 'availability_zone_hints': [self.az_name], - 'distributed': False} - self._test_router_create_with_flavor( - metainfo, expected_router, - create_type='exclusive') - - def test_router_create_with_flavor_shared(self): - """Create shared router with availability zone and type in flavor - """ - metainfo = ("{'availability_zone_hints':'%s'," - "'router_type':'shared'}" % self.az_name) - expected_router = {'router_type': 'shared', - 'availability_zone_hints': [self.az_name], - 'distributed': False} - self._test_router_create_with_flavor( - metainfo, expected_router) - - def test_router_create_with_flavor_distributed(self): - """Create distributed router with availability zone and type in flavor - """ - metainfo = ("{'availability_zone_hints':'%s'," - "'distributed':true}" % self.az_name) - expected_router = {'distributed': True, - 'availability_zone_hints': [self.az_name]} - self._test_router_create_with_flavor( - metainfo, expected_router) - - def test_router_flavor_error_parsing(self): - """Use the wrong format for the flavor metainfo - - It should be ignored, and default values are used - """ - metainfo = "xxx" - expected_router = {'distributed': False, - 'router_type': 'shared'} - self._test_router_create_with_flavor( - metainfo, expected_router) - - def test_router_create_with_syslog_flavor(self): - """Create exclusive router with syslog config in flavor""" - # Basic config - server IP only - ip = '1.1.1.10' - expected_router = {'router_type': 'exclusive', - 'syslog': {'protocol': 'tcp', - 'server_ip': ip}} - - metainfo = ("{'router_type':'exclusive'," - "'syslog':{'server_ip':'%s'}}" % ip) - - self._iteration = 1 - self._test_router_create_with_flavor( - metainfo, expected_router) - - # Advanced config - secondary server IP, protocol and loglevel - ip2 = '1.1.1.11' - for protocol in ['tcp', 'udp']: - for loglevel in ['none', 'debug', 'info', 'warning', 'error']: - expected_router = {'router_type': 'exclusive', - 'syslog': {'protocol': protocol, - 'server_ip': ip, 'server2_ip': ip2}} - - metainfo = ("{'router_type':'exclusive'," - "'syslog':{'server_ip':'%s', 'server2_ip':'%s'," - "'protocol':'%s', 'log_level':'%s'}}" % - (ip, ip2, protocol, loglevel)) - - self._iteration += 1 - self._test_router_create_with_flavor( - metainfo, expected_router) - - def test_router_create_with_syslog_flavor_error(self): - """Create router based on flavor with badly formed syslog metadata - - Syslog metadata should be ignored - """ - expected_router = {'router_type': 'exclusive', - 'syslog': None} - - self._iteration = 0 - bad_defs = ("'server_ip':'1.1.1.1', 'protocol':'http2'", - "'server2_ip':'2.2.2.2'", - "'protocol':'tcp'", - "'server_ip':'1.1.1.1', 'protocol':'udp','log_level':'pro'", - "'log_level':'error'") - for meta in bad_defs: - metainfo = "{'router_type':'exclusive', 'syslog': {%s}}" % meta - - self._iteration += 1 - self._test_router_create_with_flavor( - metainfo, expected_router) - - def _test_router_create_with_flavor_error( - self, metainfo, error_code, - create_type=None, - create_size=None, - create_az=None): - - router_data = {'flavor_id': 'dummy', - 'tenant_id': 'whatever', - 'name': 'test_router', - 'admin_state_up': True} - - if create_type is not None: - router_data['router_type'] = create_type - if create_size is not None: - router_data['router_size'] = create_size - if create_az is not None: - router_data['availability_zone_hints'] = [create_az] - - flavor_data = {'service_type': plugin_const.L3, - 'enabled': True, - 'service_profiles': ['profile_id']} - - # Mock the flavors plugin - with mock.patch(self.FLAVOR_PLUGIN + '.get_flavor', - return_value=flavor_data): - with mock.patch(self.FLAVOR_PLUGIN + '.get_service_profile', - return_value={'metainfo': metainfo}): - self.assertRaises(error_code, - self.plugin.create_router, - context.get_admin_context(), - {'router': router_data}) - - def test_router_flavor_size_conflict(self): - metainfo = "{'router_size':'large','router_type':'exclusive'}" - self._test_router_create_with_flavor_error( - metainfo, n_exc.BadRequest, - create_size='compact') - - def test_router_flavor_type_conflict(self): - metainfo = "{'router_size':'large','router_type':'exclusive'}" - self._test_router_create_with_flavor_error( - metainfo, n_exc.BadRequest, - create_type='shared') - - def test_router_flavor_az_conflict(self): - metainfo = ("{'availability_zone_hints':'%s'," - "'distributed':true}" % self.az_name) - self._test_router_create_with_flavor_error( - metainfo, n_exc.BadRequest, - create_az=['az2']) - - -class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt, - NsxVPluginV2TestCase): - - def setUp(self, plugin=None): - super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp( - plugin=PLUGIN_NAME) - - def test_create_port_with_extradhcpopts(self): - opt_list = [{'opt_name': 'bootfile-name', - 'opt_value': 'pxelinux.0'}, - {'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - - params = {edo_ext.EXTRADHCPOPTS: opt_list, - 'arg_list': (edo_ext.EXTRADHCPOPTS,)} - - with self.port(**params) as port: - self._check_opts(opt_list, - port['port'][edo_ext.EXTRADHCPOPTS]) - - def test_create_port_with_extradhcpopts_ipv6_opt_version(self): - self.skipTest('No DHCP v6 Support yet') - - def test_create_port_with_extradhcpopts_ipv4_opt_version(self): - opt_list = [{'opt_name': 'bootfile-name', - 'opt_value': 'pxelinux.0', - 'ip_version': 4}, - {'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123', - 'ip_version': 4}] - - params = {edo_ext.EXTRADHCPOPTS: opt_list, - 'arg_list': (edo_ext.EXTRADHCPOPTS,)} - - with self.port(**params) as port: - self._check_opts(opt_list, - port['port'][edo_ext.EXTRADHCPOPTS]) - - def test_update_port_with_extradhcpopts_with_same(self): - opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, - {'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] - expected_opts = opt_list[:] - for i in expected_opts: - if i['opt_name'] == upd_opts[0]['opt_name']: - i['opt_value'] = upd_opts[0]['opt_value'] - break - self._test_update_port_with_extradhcpopts(opt_list, upd_opts, - expected_opts) - - def test_update_port_with_additional_extradhcpopt(self): - opt_list = [{'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] - expected_opts = copy.deepcopy(opt_list) - expected_opts.append(upd_opts[0]) - self._test_update_port_with_extradhcpopts(opt_list, upd_opts, - expected_opts) - - def test_update_port_with_extradhcpopts(self): - opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, - {'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] - expected_opts = copy.deepcopy(opt_list) - for i in expected_opts: - if i['opt_name'] == upd_opts[0]['opt_name']: - i['opt_value'] = upd_opts[0]['opt_value'] - break - self._test_update_port_with_extradhcpopts(opt_list, upd_opts, - expected_opts) - - def test_update_port_with_extradhcpopt_delete(self): - opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, - {'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] - expected_opts = [] - - expected_opts = [opt for opt in opt_list - if opt['opt_name'] != 'bootfile-name'] - self._test_update_port_with_extradhcpopts(opt_list, upd_opts, - expected_opts) - - def test_update_port_adding_extradhcpopts(self): - opt_list = [] - upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, - {'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - expected_opts = copy.deepcopy(upd_opts) - self._test_update_port_with_extradhcpopts(opt_list, upd_opts, - expected_opts) - - def test_update_port_with_blank_name_extradhcpopt(self): - opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, - {'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - upd_opts = [{'opt_name': ' ', 'opt_value': 'pxelinux.0'}] - - params = {edo_ext.EXTRADHCPOPTS: opt_list, - 'arg_list': (edo_ext.EXTRADHCPOPTS,)} - - with self.port(**params) as port: - update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} - - req = self.new_update_request('ports', update_port, - port['port']['id']) - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) - - def test_create_port_with_empty_router_extradhcpopts(self): - self.skipTest('No DHCP support option for router') - - def test_update_port_with_blank_router_extradhcpopt(self): - self.skipTest('No DHCP support option for router') - - def test_update_port_with_extradhcpopts_ipv6_change_value(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_with_extradhcpopts_add_another_ver_opt(self): - self.skipTest('No DHCP v6 Support yet') - - def test_update_port_with_blank_string_extradhcpopt(self): - opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, - {'opt_name': 'tftp-server', - 'opt_value': '123.123.123.123'}] - upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': ' '}] - - params = {edo_ext.EXTRADHCPOPTS: opt_list, - 'arg_list': (edo_ext.EXTRADHCPOPTS,)} - - with self.port(**params) as port: - update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} - - req = self.new_update_request('ports', update_port, - port['port']['id']) - res = req.get_response(self.api) - self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) - - def test_create_port_with_none_extradhcpopts(self): - opt_list = [{'opt_name': 'bootfile-name', - 'opt_value': None}, - {'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - expected = [{'opt_name': 'tftp-server-address', - 'opt_value': '123.123.123.123'}] - - params = {edo_ext.EXTRADHCPOPTS: opt_list, - 'arg_list': (edo_ext.EXTRADHCPOPTS,)} - - with self.port(**params) as port: - self._check_opts(expected, - port['port'][edo_ext.EXTRADHCPOPTS]) - - def test_create_port_with_extradhcpopts_codes(self): - opt_list = [{'opt_name': '85', - 'opt_value': 'cafecafe'}] - - params = {edo_ext.EXTRADHCPOPTS: opt_list, - 'arg_list': (edo_ext.EXTRADHCPOPTS,)} - - with self.port(**params) as port: - self._check_opts(opt_list, - port['port'][edo_ext.EXTRADHCPOPTS]) - - def test_update_port_with_extradhcpopts_codes(self): - opt_list = [{'opt_name': '85', - 'opt_value': 'cafecafe'}] - upd_opts = [{'opt_name': '85', - 'opt_value': '01010101'}] - expected_opts = copy.deepcopy(opt_list) - for i in expected_opts: - if i['opt_name'] == upd_opts[0]['opt_name']: - i['opt_value'] = upd_opts[0]['opt_value'] - break - self._test_update_port_with_extradhcpopts(opt_list, upd_opts, - expected_opts) diff --git a/vmware_nsx/tests/unit/nsx_v/vshield/__init__.py b/vmware_nsx/tests/unit/nsx_v/vshield/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py b/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py deleted file mode 100644 index 6b0b8aa876..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py +++ /dev/null @@ -1,1624 +0,0 @@ -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import xml.etree.ElementTree as ET - -import netaddr -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from vmware_nsx._i18n import _ -from vmware_nsx.plugins.nsx_v.vshield.common import constants -from vmware_nsx.plugins.nsx_v.vshield.common import exceptions - -SECTION_LOCATION_HEADER = '/api/4.0/firewall/globalroot-0/config/%s/%s' - - -class FakeVcns(object): - - errors = { - 303: exceptions.ResourceRedirect, - 400: exceptions.RequestBad, - 403: exceptions.Forbidden, - 404: exceptions.ResourceNotFound, - 415: exceptions.MediaTypeUnsupport, - 503: exceptions.ServiceUnavailable - } - - def __init__(self, unique_router_name=True): - self._jobs = {} - self._job_idx = 0 - self._edges = {} - self._edge_idx = 0 - self._lswitches = {} - self._unique_router_name = unique_router_name - self._fake_nsx_api = None - self.fake_firewall_dict = {} - self.temp_firewall = { - "firewallRules": { - "firewallRules": [] - } - } - self.fake_ipsecvpn_dict = {} - self.temp_ipsecvpn = { - 'featureType': "ipsec_4.0", - 'enabled': True, - 'sites': {'sites': []}} - self._fake_virtualservers_dict = {} - self._fake_pools_dict = {} - self._fake_monitors_dict = {} - self._fake_app_profiles_dict = {} - self._fake_loadbalancer_config = {} - self._fake_virtual_wires = {} - self._virtual_wire_id = 0 - self._fake_portgroups = {} - self._portgroup_id = 0 - self._securitygroups = {'ids': 0, 'names': set()} - self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()} - self._dhcp_bindings = {} - self._spoofguard_policies = [] - self._ipam_pools = {} - - def do_request(self, method, uri, params=None, format='json', **kwargs): - pass - - def set_fake_nsx_api(self, fake_nsx_api): - self._fake_nsx_api = fake_nsx_api - - def _validate_edge_name(self, name): - for edge_id, edge in self._edges.items(): - if edge['name'] == name: - return False - return True - - def deploy_edge(self, request): - if (self._unique_router_name and - not self._validate_edge_name(request['name'])): - header = { - 'status': 400 - } - msg = ('Edge name should be unique for tenant. Edge %s ' - 'already exists for default tenant.') % request['name'] - response = { - 'details': msg, - 'errorCode': 10085, - 'rootCauseString': None, - 'moduleName': 'vShield Edge', - 'errorData': None - } - return (header, jsonutils.dumps(response)) - - self._edge_idx = self._edge_idx + 1 - edge_id = "edge-%d" % self._edge_idx - self._edges[edge_id] = { - 'name': request['name'], - 'request': request, - 'nat_rules': None, - 'nat_rule_id': 0, - 'interface_index': 1 - } - header = { - 'status': 200, - 'location': 'https://host/api/4.0/edges/%s' % edge_id - } - response = '' - return (header, response) - - def update_edge(self, edge_id, request): - if edge_id not in self._edges: - raise Exception(_("Edge %s does not exist") % edge_id) - edge = self._edges[edge_id] - edge['name'] = request['name'] - header = { - 'status': 200 - } - response = '' - return (header, response) - - def get_edge_id(self, job_id): - if job_id not in self._jobs: - raise Exception(_("Job %s does not nexist") % job_id) - - header = { - 'status': 200 - } - response = { - 'edgeId': self._jobs[job_id] - } - return (header, response) - - def delete_edge(self, edge_id): - if edge_id not in self._edges: - raise Exception(_("Edge %s does not exist") % edge_id) - del self._edges[edge_id] - header = { - 'status': 200 - } - response = '' - return (header, response) - - def add_vdr_internal_interface(self, edge_id, interface): - interface = interface['interfaces'][0] - if not self._edges[edge_id].get('interfaces'): - self._edges[edge_id]['interfaces'] = [] - index = len(self._edges[edge_id]['interfaces']) - interface['index'] = str(index) - self._edges[edge_id]['interfaces'].append(interface) - header = { - 'status': 200 - } - response = {"interfaces": [{"index": str(index)}]} - return (header, response) - - def get_edge_interfaces(self, edge_id): - if not self._edges[edge_id].get('interfaces'): - self._edges[edge_id]['interfaces'] = [] - header = { - 'status': 200 - } - response = {"interfaces": self._edges[edge_id].get('interfaces', [])} - return (header, response) - - def get_virtual_wires(self): - return [] - - def update_vdr_internal_interface( - self, edge_id, interface_index, interface): - header = { - 'status': 200 - } - response = '' - return (header, response) - - def get_vdr_internal_interface(self, edge_id, interface_index): - response = {} - header = { - 'status': 200 - } - for interface in self._edges[edge_id].get('interfaces', []): - if int(interface['index']) == int(interface_index): - response = interface - return (header, response) - - def delete_vdr_internal_interface(self, edge_id, interface_index): - for interface in self._edges[edge_id].get('interfaces', []): - if int(interface['index']) == int(interface_index): - header = { - 'status': 200 - } - break - header = {'status': 404} - response = '' - return (header, response) - - def get_interfaces(self, edge_id): - header = { - 'status': 200 - } - response = {'vnics': {}} - return (header, response) - - def update_interface(self, edge_id, vnic): - header = { - 'status': 200 - } - response = '' - return (header, response) - - def delete_interface(self, edge_id, vnic_index): - header = { - 'status': 200 - } - response = '' - return (header, response) - - def query_interface(self, edge_id, vnic_index): - header = { - 'status': 200 - } - response = { - 'label': 'vNic_1', - 'name': 'internal1', - 'addressGroups': { - 'addressGroups': [{'primaryAddress': '1.1.1.1'}]}, - 'portgroupId': '1', - 'mtu': 1500, - 'type': 'trunk', - 'subInterfaces': {'subInterfaces': []}, - 'isConnected': True - } - return (header, response) - - def reconfigure_dhcp_service(self, edge_id, request): - header = { - 'status': 201 - } - response = '' - return (header, response) - - def query_dhcp_configuration(self, edge_id): - header = { - 'status': 200 - } - response = { - "featureType": "dhcp_4.0", - "version": 14, - "enabled": True, - "staticBindings": {"staticBindings": [{ - "macAddress": "fa:16:3e:e6:ad:ce", - "bindingId": "binding-1"}]}, - "ipPools": {"ipPools": []} - } - return (header, response) - - def create_dhcp_binding(self, edge_id, request): - if not self._dhcp_bindings.get(edge_id): - self._dhcp_bindings[edge_id] = {} - self._dhcp_bindings[edge_id]['idx'] = 0 - binding_idx = self._dhcp_bindings[edge_id]['idx'] - binding_idx_str = "binding-" + str(binding_idx) - self._dhcp_bindings[edge_id][binding_idx_str] = request - self._dhcp_bindings[edge_id]['idx'] = binding_idx + 1 - header = { - 'status': 200, - 'location': '/dhcp/config/bindings/%s' % binding_idx_str - } - response = '' - return (header, response) - - def delete_dhcp_binding(self, edge_id, binding_id): - if binding_id not in self._dhcp_bindings[edge_id]: - raise Exception(_("binding %s does not exist") % binding_id) - del self._dhcp_bindings[edge_id][binding_id] - header = { - 'status': 200 - } - response = '' - return (header, response) - - def get_dhcp_binding(self, edge_id, binding_id): - if binding_id not in self._dhcp_bindings[edge_id]: - raise Exception(_("binding %s does not exist") % binding_id) - response = self._dhcp_bindings[edge_id][binding_id] - header = { - 'status': 200 - } - return (header, response) - - def create_bridge(self, edge_id, request): - if edge_id not in self._edges: - raise Exception(_("Edge %s does not exist") % edge_id) - header = { - 'status': 204 - } - response = '' - return (header, response) - - def delete_bridge(self, edge_id): - if edge_id not in self._edges: - raise Exception(_("Edge %s does not exist") % edge_id) - header = { - 'status': 204 - } - response = '' - return (header, response) - - def get_nat_config(self, edge_id): - if edge_id not in self._edges: - raise Exception(_("Edge %s does not exist") % edge_id) - edge = self._edges[edge_id] - rules = edge['nat_rules'] - if rules is None: - rules = { - 'rules': { - 'natRulesDtos': [] - }, - 'version': 1 - } - header = { - 'status': 200 - } - rules['version'] = 1 - return (header, rules) - - def update_nat_config(self, edge_id, nat): - if edge_id not in self._edges: - raise Exception(_("Edge %s does not exist") % edge_id) - edge = self._edges[edge_id] - max_rule_id = edge['nat_rule_id'] - rules = copy.deepcopy(nat) - for rule in rules['rules']['natRulesDtos']: - rule_id = rule.get('ruleId', 0) - if rule_id > max_rule_id: - max_rule_id = rule_id - for rule in rules['rules']['natRulesDtos']: - if 'ruleId' not in rule: - max_rule_id = max_rule_id + 1 - rule['ruleId'] = max_rule_id - edge['nat_rules'] = rules - edge['nat_rule_id'] = max_rule_id - header = { - 'status': 200 - } - response = '' - return (header, response) - - def delete_nat_rule(self, edge_id, rule_id): - if edge_id not in self._edges: - raise Exception(_("Edge %s does not exist") % edge_id) - - edge = self._edges[edge_id] - rules = edge['nat_rules'] - rule_to_delete = None - for rule in rules['rules']['natRulesDtos']: - if rule_id == rule['ruleId']: - rule_to_delete = rule - break - if rule_to_delete is None: - raise Exception(_("Rule id %d doest not exist") % rule_id) - - rules['rules']['natRulesDtos'].remove(rule_to_delete) - - header = { - 'status': 200 - } - response = '' - return (header, response) - - def get_edge_status(self, edge_id): - if edge_id not in self._edges: - raise Exception(_("Edge %s does not exist") % edge_id) - - header = { - 'status': 200 - } - response = { - 'edgeStatus': 'GREEN' - } - return (header, response) - - def get_edge(self, edge_id): - if edge_id not in self._edges: - raise exceptions.VcnsGeneralException( - _("Edge %s does not exist!") % edge_id) - header = { - 'status': 200 - } - response = { - 'name': 'fake-edge', - 'id': edge_id, - 'appliances': {'appliances': []} - } - return (header, response) - - def get_edges(self): - edges = [] - for edge_id in self._edges: - edges.append({ - 'id': edge_id, - 'edgeStatus': 'GREEN', - 'name': self._edges[edge_id]['name'] - }) - return edges - - def get_vdn_switch(self, dvs_id): - header = { - 'status': 200 - } - response = { - 'name': 'fake-switch', - 'id': dvs_id, - 'teamingPolicy': 'ETHER_CHANNEL' - } - return (header, response) - - def update_vdn_switch(self, switch): - header = { - 'status': 200 - } - response = '' - return (header, response) - - def update_routes(self, edge_id, routes): - header = { - 'status': 200 - } - response = '' - return (header, response) - - def create_lswitch(self, lsconfig): - # The lswitch is created via VCNS API so the fake nsx_api will not - # see it. Added to fake nsx_api here. - if self._fake_nsx_api: - lswitch = self._fake_nsx_api._add_lswitch( - jsonutils.dumps(lsconfig)) - else: - lswitch = lsconfig - lswitch['uuid'] = uuidutils.generate_uuid() - self._lswitches[lswitch['uuid']] = lswitch - header = { - 'status': 200 - } - lswitch['_href'] = '/api/ws.v1/lswitch/%s' % lswitch['uuid'] - return (header, lswitch) - - def delete_lswitch(self, id): - if id not in self._lswitches: - raise Exception(_("Lswitch %s does not exist") % id) - del self._lswitches[id] - if self._fake_nsx_api: - # TODO(fank): fix the hack - del self._fake_nsx_api._fake_lswitch_dict[id] - header = { - 'status': 200 - } - response = '' - return (header, response) - - def sync_firewall(self): - header = {'status': 204} - response = "" - return self.return_helper(header, response) - - def update_firewall(self, edge_id, fw_req): - self.fake_firewall_dict[edge_id] = fw_req - rules = self.fake_firewall_dict[edge_id][ - 'firewallRules']['firewallRules'] - index = 10 - for rule in rules: - rule['ruleId'] = index - index += 10 - header = {'status': 204} - response = "" - return self.return_helper(header, response) - - def delete_firewall(self, edge_id): - header = {'status': 404} - if edge_id in self.fake_firewall_dict: - header = {'status': 204} - del self.fake_firewall_dict[edge_id] - response = "" - return self.return_helper(header, response) - - def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): - if edge_id not in self.fake_firewall_dict: - raise Exception(_("Edge %s does not exist") % edge_id) - header = {'status': 404} - rules = self.fake_firewall_dict[edge_id][ - 'firewallRules']['firewallRules'] - for rule in rules: - if rule['ruleId'] == int(vcns_rule_id): - header['status'] = 204 - rule.update(fwr_req) - break - response = "" - return self.return_helper(header, response) - - def delete_firewall_rule(self, edge_id, vcns_rule_id): - if edge_id not in self.fake_firewall_dict: - raise Exception(_("Edge %s does not exist") % edge_id) - header = {'status': 404} - rules = self.fake_firewall_dict[edge_id][ - 'firewallRules']['firewallRules'] - for index in range(len(rules)): - if rules[index]['ruleId'] == int(vcns_rule_id): - header['status'] = 204 - del rules[index] - break - response = "" - return self.return_helper(header, response) - - def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): - if edge_id not in self.fake_firewall_dict: - raise Exception(_("Edge %s does not exist") % edge_id) - header = {'status': 404} - rules = self.fake_firewall_dict[edge_id][ - 'firewallRules']['firewallRules'] - pre = 0 - for index in range(len(rules)): - if rules[index]['ruleId'] == int(ref_vcns_rule_id): - rules.insert(index, fwr_req) - rules[index]['ruleId'] = (int(ref_vcns_rule_id) + pre) / 2 - header = { - 'status': 204, - 'location': "https://host/api/4.0/edges/edge_id/firewall" - "/config/rules/%s" % rules[index]['ruleId']} - break - pre = int(rules[index]['ruleId']) - response = "" - return self.return_helper(header, response) - - def add_firewall_rule(self, edge_id, fwr_req): - if edge_id not in self.fake_firewall_dict: - self.fake_firewall_dict[edge_id] = self.temp_firewall - rules = self.fake_firewall_dict[edge_id][ - 'firewallRules']['firewallRules'] - rules.append(fwr_req) - index = len(rules) - rules[index - 1]['ruleId'] = index * 10 - header = { - 'status': 204, - 'location': "https://host/api/4.0/edges/edge_id/firewall" - "/config/rules/%s" % rules[index - 1]['ruleId']} - response = "" - return self.return_helper(header, response) - - def get_firewall(self, edge_id): - if edge_id not in self.fake_firewall_dict: - self.fake_firewall_dict[edge_id] = self.temp_firewall - header = {'status': 204} - response = self.fake_firewall_dict[edge_id] - return self.return_helper(header, response) - - def get_firewall_rule(self, edge_id, vcns_rule_id): - if edge_id not in self.fake_firewall_dict: - raise Exception(_("Edge %s does not exist") % edge_id) - header = {'status': 404} - response = "" - rules = self.fake_firewall_dict[edge_id][ - 'firewallRules']['firewallRules'] - for rule in rules: - if rule['ruleId'] == int(vcns_rule_id): - header['status'] = 204 - response = rule - break - return self.return_helper(header, response) - - def is_name_unique(self, objs_dict, name): - return name not in [obj_dict['name'] - for obj_dict in objs_dict.values()] - - def create_vip(self, edge_id, vip_new): - header = {'status': 403} - response = "" - if not self._fake_virtualservers_dict.get(edge_id): - self._fake_virtualservers_dict[edge_id] = {} - if not self.is_name_unique(self._fake_virtualservers_dict[edge_id], - vip_new['name']): - return self.return_helper(header, response) - vip_vseid = uuidutils.generate_uuid() - self._fake_virtualservers_dict[edge_id][vip_vseid] = vip_new - header = { - 'status': 204, - 'location': "https://host/api/4.0/edges/edge_id" - "/loadbalancer/config/%s" % vip_vseid} - return self.return_helper(header, response) - - def get_vip(self, edge_id, vip_vseid): - header = {'status': 404} - response = "" - if not self._fake_virtualservers_dict.get(edge_id) or ( - not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - response = self._fake_virtualservers_dict[edge_id][vip_vseid] - return self.return_helper(header, response) - - def update_vip(self, edge_id, vip_vseid, vip_new): - header = {'status': 404} - response = "" - if not self._fake_virtualservers_dict.get(edge_id) or ( - not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - self._fake_virtualservers_dict[edge_id][vip_vseid].update( - vip_new) - return self.return_helper(header, response) - - def delete_vip(self, edge_id, vip_vseid): - header = {'status': 404} - response = "" - if not self._fake_virtualservers_dict.get(edge_id) or ( - not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - del self._fake_virtualservers_dict[edge_id][vip_vseid] - return self.return_helper(header, response) - - def create_pool(self, edge_id, pool_new): - header = {'status': 403} - response = "" - if not self._fake_pools_dict.get(edge_id): - self._fake_pools_dict[edge_id] = {} - if not self.is_name_unique(self._fake_pools_dict[edge_id], - pool_new['name']): - return self.return_helper(header, response) - pool_vseid = uuidutils.generate_uuid() - self._fake_pools_dict[edge_id][pool_vseid] = pool_new - header = { - 'status': 204, - 'location': "https://host/api/4.0/edges/edge_id" - "/loadbalancer/config/%s" % pool_vseid} - return self.return_helper(header, response) - - def get_pool(self, edge_id, pool_vseid): - header = {'status': 404} - response = "" - if not self._fake_pools_dict.get(edge_id) or ( - not self._fake_pools_dict[edge_id].get(pool_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - response = self._fake_pools_dict[edge_id][pool_vseid] - return self.return_helper(header, response) - - def update_pool(self, edge_id, pool_vseid, pool_new): - header = {'status': 404} - response = "" - if not self._fake_pools_dict.get(edge_id) or ( - not self._fake_pools_dict[edge_id].get(pool_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - self._fake_pools_dict[edge_id][pool_vseid].update( - pool_new) - return self.return_helper(header, response) - - def delete_pool(self, edge_id, pool_vseid): - header = {'status': 404} - response = "" - if not self._fake_pools_dict.get(edge_id) or ( - not self._fake_pools_dict[edge_id].get(pool_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - del self._fake_pools_dict[edge_id][pool_vseid] - return self.return_helper(header, response) - - def create_health_monitor(self, edge_id, monitor_new): - if not self._fake_monitors_dict.get(edge_id): - self._fake_monitors_dict[edge_id] = {} - monitor_vseid = uuidutils.generate_uuid() - self._fake_monitors_dict[edge_id][monitor_vseid] = monitor_new - header = { - 'status': 204, - 'location': "https://host/api/4.0/edges/edge_id" - "/loadbalancer/config/%s" % monitor_vseid} - response = "" - return self.return_helper(header, response) - - def get_health_monitor(self, edge_id, monitor_vseid): - header = {'status': 404} - response = "" - if not self._fake_monitors_dict.get(edge_id) or ( - not self._fake_monitors_dict[edge_id].get(monitor_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - response = self._fake_monitors_dict[edge_id][monitor_vseid] - return self.return_helper(header, response) - - def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): - header = {'status': 404} - response = "" - if not self._fake_monitors_dict.get(edge_id) or ( - not self._fake_monitors_dict[edge_id].get(monitor_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - self._fake_monitors_dict[edge_id][monitor_vseid].update( - monitor_new) - return self.return_helper(header, response) - - def delete_health_monitor(self, edge_id, monitor_vseid): - header = {'status': 404} - response = "" - if not self._fake_monitors_dict.get(edge_id) or ( - not self._fake_monitors_dict[edge_id].get(monitor_vseid)): - return self.return_helper(header, response) - header = {'status': 204} - del self._fake_monitors_dict[edge_id][monitor_vseid] - return self.return_helper(header, response) - - def create_app_profile(self, edge_id, app_profile): - if not self._fake_app_profiles_dict.get(edge_id): - self._fake_app_profiles_dict[edge_id] = {} - app_profileid = uuidutils.generate_uuid() - self._fake_app_profiles_dict[edge_id][app_profileid] = app_profile - header = { - 'status': 204, - 'location': "https://host/api/4.0/edges/edge_id" - "/loadbalancer/config/%s" % app_profileid} - response = "" - return self.return_helper(header, response) - - def update_app_profile(self, edge_id, app_profileid, app_profile): - header = {'status': 404} - response = "" - if not self._fake_app_profiles_dict.get(edge_id) or ( - not self._fake_app_profiles_dict[edge_id].get(app_profileid)): - return self.return_helper(header, response) - header = {'status': 204} - self._fake_app_profiles_dict[edge_id][app_profileid].update( - app_profile) - return self.return_helper(header, response) - - def delete_app_profile(self, edge_id, app_profileid): - header = {'status': 404} - response = "" - if not self._fake_app_profiles_dict.get(edge_id) or ( - not self._fake_app_profiles_dict[edge_id].get(app_profileid)): - return self.return_helper(header, response) - header = {'status': 204} - del self._fake_app_profiles_dict[edge_id][app_profileid] - return self.return_helper(header, response) - - def create_app_rule(self, edge_id, app_rule): - app_ruleid = uuidutils.generate_uuid() - header = { - 'status': 204, - 'location': "https://host/api/4.0/edges/edge_id" - "/loadbalancer/config/%s" % app_ruleid} - response = "" - return self.return_helper(header, response) - - def update_app_rule(self, edge_id, app_ruleid, app_rule): - pass - - def delete_app_rule(self, edge_id, app_ruleid): - pass - - def get_loadbalancer_config(self, edge_id): - header = {'status': 204} - response = {'config': False} - if self._fake_loadbalancer_config[edge_id]: - response['config'] = self._fake_loadbalancer_config[edge_id] - return self.return_helper(header, response) - - def get_loadbalancer_statistics(self, edge_id): - return [{}, {}] - - def update_ipsec_config(self, edge_id, ipsec_config): - self.fake_ipsecvpn_dict[edge_id] = ipsec_config - header = {'status': 204} - response = "" - return self.return_helper(header, response) - - def delete_ipsec_config(self, edge_id): - header = {'status': 404} - if edge_id in self.fake_ipsecvpn_dict: - header = {'status': 204} - del self.fake_ipsecvpn_dict[edge_id] - response = "" - return self.return_helper(header, response) - - def get_ipsec_config(self, edge_id): - if edge_id not in self.fake_ipsecvpn_dict: - self.fake_ipsecvpn_dict[edge_id] = self.temp_ipsecvpn - header = {'status': 204} - response = self.fake_ipsecvpn_dict[edge_id] - return self.return_helper(header, response) - - def enable_service_loadbalancer(self, edge_id, config): - header = {'status': 204} - response = "" - self._fake_loadbalancer_config[edge_id] = True - return self.return_helper(header, response) - - def create_virtual_wire(self, vdn_scope_id, request): - self._virtual_wire_id += 1 - header = {'status': 200} - virtual_wire = 'virtualwire-%s' % self._virtual_wire_id - data = {'name': request['virtualWireCreateSpec']['name'], - 'objectId': virtual_wire} - self._fake_virtual_wires.update({virtual_wire: data}) - return (header, virtual_wire) - - def delete_virtual_wire(self, virtualwire_id): - del self._fake_virtual_wires[virtualwire_id] - header = { - 'status': 200 - } - response = '' - return (header, response) - - def create_port_group(self, dvs_id, request): - self._portgroup_id += 1 - header = {'status': 200} - portgroup = 'dvportgroup-%s' % self._portgroup_id - data = {'name': request['networkSpec']['networkName'], - 'objectId': portgroup} - self._fake_portgroups.update({portgroup: data}) - return (header, portgroup) - - def delete_port_group(self, dvs_id, portgroup_id): - del self._fake_portgroups[portgroup_id] - header = { - 'status': 200 - } - response = '' - return (header, response) - - def return_helper(self, header, response): - status = int(header['status']) - if 200 <= status <= 300: - return (header, response) - if status in self.errors: - cls = self.errors[status] - else: - cls = exceptions.VcnsApiException - raise cls( - status=status, header=header, uri='fake_url', response=response) - - def _get_bad_req_response(self, details, error_code, module_name): - bad_req_response_format = """ - -
%(details)s
- %(error_code)s - %(module_name)s -
- """ - return bad_req_response_format % { - 'details': details, - 'error_code': error_code, - 'module_name': module_name, - } - - def _get_section_location(self, type, section_id): - return SECTION_LOCATION_HEADER % (type, section_id) - - def _get_section_id_from_uri(self, section_uri): - return section_uri.split('/')[-1] - - def _section_not_found(self, section_id): - msg = "Invalid section id found : %s" % section_id - response = self._get_bad_req_response(msg, 100089, 'vShield App') - headers = {'status': 400} - return (headers, response) - - def _unknown_error(self): - msg = "Unknown Error Occurred.Please look into tech support logs." - response = self._get_bad_req_response(msg, 100046, 'vShield App') - headers = {'status': 400} - return (headers, response) - - def create_security_group(self, request): - sg = request['securitygroup'] - if sg['name'] in self._securitygroups['names']: - status = 400 - msg = ("Another object with same name : %s already exists in " - "the current scope : globalroot-0." % sg['name']) - response = self._get_bad_req_response(msg, 210, 'core-services') - else: - sg_id = str(self._securitygroups['ids']) - self._securitygroups['ids'] += 1 - sg['members'] = set() - self._securitygroups[sg_id] = sg - self._securitygroups['names'].add(sg['name']) - status, response = 201, sg_id - return ({'status': status}, response) - - def update_security_group(self, sg_id, sg_name, description): - sg = self._securitygroups[sg_id] - self._securitygroups['names'].remove(sg['name']) - sg['name'] = sg_name - sg['description'] = description - self._securitygroups['names'].add(sg_name) - return {'status': 200}, '' - - def delete_security_group(self, securitygroup_id): - try: - del self._securitygroups[securitygroup_id] - except KeyError: - status = 404 - msg = ("The requested object : %s could " - "not be found. Object identifiers are case sensitive." - % securitygroup_id) - response = self._get_bad_req_response(msg, 210, 'core-services') - else: - status, response = 200, '' - return ({'status': status}, response) - - def get_security_group_id(self, sg_name): - for k, v in self._securitygroups.items(): - if k not in ('ids', 'names') and v['name'] == sg_name: - return k - - def get_security_group(self, sg_id): - sg = self._securitygroups.get(sg_id) - if sg: - return ('%s"%s"' - '' - % (sg_id, sg.get("name"))) - - def list_security_groups(self): - response = "" - header = {'status': 200} - for k in self._securitygroups.keys(): - if k not in ('ids', 'names'): - response += self.get_security_group(k) - response = "%s" % response - return header, response - - def create_redirect_section(self, request): - return self.create_section('layer3redirect', request) - - def create_section(self, type, request, - insert_top=False, insert_before=None): - section = ET.fromstring(request) - section_name = section.attrib.get('name') - if section_name in self._sections['names']: - msg = "Section with name %s already exists." % section_name - response = self._get_bad_req_response(msg, 100092, 'vShield App') - headers = {'status': 400} - else: - section_id = str(self._sections['section_ids']) - section.attrib['id'] = 'section-%s' % section_id - _section = self._sections[section_id] = {'name': section_name, - 'etag': 'Etag-0', - 'rules': {}} - self._sections['names'].add(section_name) - for rule in section.findall('rule'): - rule_id = str(self._sections['rule_ids']) - rule.attrib['id'] = rule_id - _section['rules'][rule_id] = ET.tostring(rule) - self._sections['rule_ids'] += 1 - response = ET.tostring(section) - headers = { - 'status': 201, - 'location': self._get_section_location(type, section_id), - 'etag': _section['etag'] - } - self._sections['section_ids'] += 1 - return (headers, response) - - def update_section(self, section_uri, request, h): - section = ET.fromstring(request) - section_id = section.attrib.get('id') - section_name = section.attrib.get('name') - if section_id not in self._sections: - return self._section_not_found(section_id) - _section = self._sections[section_id] - if (_section['name'] != section_name and - section_name in self._sections['names']): - # There's a section with this name already - headers, response = self._unknown_error() - else: - # Different Etag every successful update - _section['etag'] = ('Etag-1' if _section['etag'] == 'Etag-0' - else 'Etag-0') - self._sections['names'].remove(_section['name']) - _section['name'] = section_name - self._sections['names'].add(section_name) - for rule in section.findall('rule'): - if not rule.attrib.get('id'): - rule.attrib['id'] = str(self._sections['rule_ids']) - self._sections['rule_ids'] += 1 - rule_id = rule.attrib.get('id') - _section['rules'][rule_id] = ET.tostring(rule) - _, response = self._get_section(section_id) - headers = { - 'status': 200, - 'location': self._get_section_location(type, section_id), - 'etag': _section['etag'] - } - return (headers, response) - - def delete_section(self, section_uri): - section_id = self._get_section_id_from_uri(section_uri) - if section_id not in self._sections: - headers, response = self._unknown_error() - else: - section_name = self._sections[section_id]['name'] - del self._sections[section_id] - self._sections['names'].remove(section_name) - response = '' - headers = {'status': 204} - return (headers, response) - - def get_section(self, section_uri): - section_id = self._get_section_id_from_uri(section_uri) - if section_id not in self._sections: - headers, response = self._section_not_found(section_id) - else: - return self._get_section(section_id) - - def get_section_rules(self, section_uri): - return [] - - def _get_section(self, section_id): - section_rules = ( - b''.join(self._sections[section_id]['rules'].values())) - response = ('
%s
' - % (section_id, - self._sections[section_id]['name'], - section_rules)) - headers = {'status': 200, - 'etag': self._sections[section_id]['etag']} - return (headers, response) - - def get_section_id(self, section_name): - for k, v in self._sections.items(): - if (k not in ('section_ids', 'rule_ids', 'names') and - v['name'] == section_name): - return k - - def update_section_by_id(self, id, type, request): - pass - - def get_default_l3_id(self): - return 1234 - - def get_dfw_config(self): - response = "" - for sec_id in self._sections.keys(): - if sec_id.isdigit(): - h, r = self._get_section(str(sec_id)) - response += r - response = "%s" % response - headers = {'status': 200} - return (headers, response) - - def remove_rule_from_section(self, section_uri, rule_id): - section_id = self._get_section_id_from_uri(section_uri) - if section_id not in self._sections: - headers, response = self._section_not_found(section_id) - else: - section = self._sections[section_id] - if rule_id in section['rules']: - del section['rules'][rule_id] - response = '' - headers = {'status': 204} - else: - headers, response = self._unknown_error() - return (headers, response) - - def add_member_to_security_group(self, security_group_id, member_id): - if security_group_id not in self._securitygroups: - msg = ("The requested object : %s could not be found." - "Object identifiers are case " - "sensitive.") % security_group_id - response = self._get_bad_req_response(msg, 202, 'core-services') - headers = {'status': 404} - else: - self._securitygroups[security_group_id]['members'].add(member_id) - response = '' - headers = {'status': 200} - return (headers, response) - - def remove_member_from_security_group(self, security_group_id, member_id): - if security_group_id not in self._securitygroups: - msg = ("The requested object : %s could not be found." - "Object identifiers are " - "case sensitive.") % security_group_id - response = self._get_bad_req_response(msg, 202, 'core-services') - headers = {'status': 404} - else: - self._securitygroups[security_group_id]['members'].remove( - member_id) - response = '' - headers = {'status': 200} - return (headers, response) - - def create_spoofguard_policy(self, enforcement_points, name, enable): - policy = {'name': name, - 'enforcementPoints': [{'id': enforcement_points[0]}], - 'operationMode': 'MANUAL' if enable else 'DISABLE'} - policy_id = len(self._spoofguard_policies) - self._spoofguard_policies.append(policy) - return None, 'spoofguardpolicy-%s' % policy_id - - def _get_index(self, policy_id): - return int(policy_id.split('-')[-1]) - - def update_spoofguard_policy(self, policy_id, - enforcement_points, name, enable): - policy = {'name': name, - 'enforcementPoints': [{'id': enforcement_points[0]}], - 'operationMode': 'MANUAL' if enable else 'DISABLE'} - self._spoofguard_policies[self._get_index(policy_id)] = policy - return None, '' - - def delete_spoofguard_policy(self, policy_id): - self._spoofguard_policies[self._get_index(policy_id)] = {} - - def get_spoofguard_policy(self, policy_id): - try: - return None, self._spoofguard_policies[self._get_index(policy_id)] - except IndexError: - raise exceptions.VcnsGeneralException( - _("Spoofguard policy not found")) - - def get_spoofguard_policy_data(self, policy_id, list_type='INACTIVE'): - return None, {'spoofguardList': []} - - def get_spoofguard_policies(self): - return None, {'policies': self._spoofguard_policies} - - def approve_assigned_addresses(self, policy_id, - vnic_id, mac_addr, addresses): - pass - - def publish_assigned_addresses(self, policy_id, vnic_id): - pass - - def configure_reservations(self): - pass - - def inactivate_vnic_assigned_addresses(self, policy_id, vnic_id): - pass - - def add_vm_to_exclude_list(self, vm_id): - pass - - def delete_vm_from_exclude_list(self, vm_id): - pass - - def get_scoping_objects(self): - response = ('' - 'Network' - 'aaa' - 'bbb' - '') - return response - - def reset_all(self): - self._jobs.clear() - self._edges.clear() - self._lswitches.clear() - self.fake_firewall_dict = {} - self._fake_virtualservers_dict = {} - self._fake_pools_dict = {} - self._fake_monitors_dict = {} - self._fake_app_profiles_dict = {} - self._fake_loadbalancer_config = {} - self._fake_virtual_wires = {} - self._virtual_wire_id = 0 - self._fake_portgroups = {} - self._portgroup_id = 0 - self._securitygroups = {'ids': 0, 'names': set()} - self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()} - self._dhcp_bindings = {} - self._ipam_pools = {} - - def validate_datacenter_moid(self, object_id, during_init=False): - return True - - def validate_network(self, object_id, during_init=False): - return True - - def validate_network_name(self, object_id, name, during_init=False): - return True - - def validate_vdn_scope(self, object_id): - return True - - def get_dvs_list(self): - return [] - - def validate_dvs(self, object_id, dvs_list=None): - return True - - def edges_lock_operation(self): - pass - - def validate_inventory(self, moref): - return True - - def get_version(self): - return '6.4.6' - - def get_tuning_configuration(self): - return { - 'lockUpdatesOnEdge': True, - 'edgeVMHealthCheckIntervalInMin': 0, - 'aggregatePublishing': False, - 'publishingTimeoutInMs': 1200000, - 'healthCheckCommandTimeoutInMs': 120000, - 'maxParallelVixCallsForHealthCheck': 25} - - def configure_aggregate_publishing(self): - pass - - def enable_ha(self, edge_id, request_config): - header = { - 'status': 201 - } - response = '' - return (header, response) - - def get_edge_syslog(self, edge_id): - if ('syslog' not in self._edges.get(edge_id)): - header = { - 'status': 400 - } - response = {} - else: - header = { - 'status': 200 - } - response = self._edges.get(edge_id)['syslog'] - return (header, response) - - def update_edge_syslog(self, edge_id, config): - if edge_id not in self._edges: - raise exceptions.VcnsGeneralException( - _("edge not found")) - self._edges[edge_id]['syslog'] = config - header = { - 'status': 204 - } - response = '' - return (header, response) - - def delete_edge_syslog(self, edge_id): - header = { - 'status': 204 - } - response = '' - return (header, response) - - def update_edge_config_with_modifier(self, edge_id, module, modifier): - header = { - 'status': 204 - } - response = '' - return (header, response) - - def change_edge_appliance_size(self, edge_id, size): - header = { - 'status': 204 - } - response = {} - return (header, response) - - def change_edge_appliance(self, edge_id, request): - header = { - 'status': 204 - } - response = {} - return (header, response) - - def get_edge_appliances(self, edge_id): - header = { - 'status': 204 - } - response = {} - return (header, response) - - def get_routes(self, edge_id): - header = { - 'status': 204 - } - response = {'staticRoutes': {'staticRoutes': []}} - return (header, response) - - def get_service_insertion_profile(self, profile_id): - headers = {'status': 200} - response = """ - %s - ServiceProfile - ServiceProfile - Service_Vendor - - - securitygroup-30 - - - """ - response_format = response % profile_id - - return (headers, response_format) - - def update_service_insertion_profile_binding(self, profile_id, request): - response = '' - headers = {'status': 200} - return (headers, response) - - def create_ipam_ip_pool(self, request): - pool_id = uuidutils.generate_uuid() - # format the request before saving it: - fixed_request = request['ipamAddressPool'] - ranges = fixed_request['ipRanges'] - for i in range(len(ranges)): - ranges[i] = ranges[i]['ipRangeDto'] - self._ipam_pools[pool_id] = {'request': fixed_request, - 'allocated': []} - header = {'status': 200} - response = pool_id - return (header, response) - - def delete_ipam_ip_pool(self, pool_id): - response = '' - if pool_id in self._ipam_pools: - pool = self._ipam_pools.pop(pool_id) - if len(pool['allocated']) > 0: - header = {'status': 400} - msg = ("Unable to delete IP pool %s. IP addresses from this " - "pool are being used." % pool_id) - response = self._get_bad_req_response( - msg, 120053, 'core-services') - else: - header = {'status': 200} - return (header, response) - else: - header = {'status': 400} - msg = ("Unable to delete IP pool %s. Pool does not exist." % - pool_id) - response = self._get_bad_req_response( - msg, 120054, 'core-services') - return self.return_helper(header, response) - - def get_ipam_ip_pool(self, pool_id): - if pool_id in self._ipam_pools: - header = {'status': 200} - response = self._ipam_pools[pool_id]['request'] - else: - header = {'status': 400} - msg = ("Unable to retrieve IP pool %s. Pool does not exist." % - pool_id) - response = self._get_bad_req_response( - msg, 120054, 'core-services') - return self.return_helper(header, response) - - def _allocate_ipam_add_ip_and_return(self, pool, ip_addr): - # build the response - response_text = ( - "" - "%(id)s" - "%(ip)s" - "%(gateway)s" - "%(prefix)s" - "subnet-44") - response_args = {'id': len(pool['allocated']), - 'gateway': pool['request']['gateway'], - 'prefix': pool['request']['prefixLength']} - - response_args['ip'] = ip_addr - response = response_text % response_args - - # add the ip to the list of allocated ips - pool['allocated'].append(ip_addr) - - header = {'status': 200} - return (header, response) - - def allocate_ipam_ip_from_pool(self, pool_id, ip_addr=None): - if pool_id in self._ipam_pools: - pool = self._ipam_pools[pool_id] - if ip_addr: - # verify that this ip was not yet allocated - if ip_addr in pool['allocated']: - header = {'status': 400} - msg = ("Unable to allocate IP from pool %(pool)s. " - "IP %(ip)s already in use." % - {'pool': pool_id, 'ip': ip_addr}) - response = self._get_bad_req_response( - msg, constants.NSX_ERROR_IPAM_ALLOCATE_IP_USED, - 'core-services') - else: - return self._allocate_ipam_add_ip_and_return( - pool, ip_addr) - else: - # get an unused ip from the pool - for ip_range in pool['request']['ipRanges']: - r = netaddr.IPRange(ip_range['startAddress'], - ip_range['endAddress']) - for ip_addr in r: - if str(ip_addr) not in pool['allocated']: - return self._allocate_ipam_add_ip_and_return( - pool, str(ip_addr)) - # if we got here - no ip was found - header = {'status': 400} - msg = ("Unable to allocate IP from pool %(pool)s. " - "All IPs have been used." % - {'pool': pool_id}) - response = self._get_bad_req_response( - msg, constants.NSX_ERROR_IPAM_ALLOCATE_ALL_USED, - 'core-services') - else: - header = {'status': 400} - msg = ("Unable to allocate IP from pool %s. Pool does not " - "exist." % pool_id) - response = self._get_bad_req_response( - msg, 120054, 'core-services') - return self.return_helper(header, response) - - def release_ipam_ip_to_pool(self, pool_id, ip_addr): - if pool_id in self._ipam_pools: - pool = self._ipam_pools[pool_id] - if ip_addr not in pool['allocated']: - header = {'status': 400} - msg = ("IP %(ip)s was not allocated from pool %(pool)s." % - {'ip': ip_addr, 'pool': pool_id}) - response = self._get_bad_req_response( - msg, 120056, 'core-services') - else: - pool['allocated'].remove(ip_addr) - response = '' - header = {'status': 200} - else: - header = {'status': 400} - msg = ("Unable to release IP to pool %s. Pool does not exist." % - pool_id) - response = self._get_bad_req_response( - msg, 120054, 'core-services') - return self.return_helper(header, response) - - def get_security_policy(self, policy_id, return_xml=True): - name = 'pol1' - description = 'dummy' - if return_xml: - response_text = ( - "" - "%(id)s" - "%(name)s" - "%(desc)s" - "") % {'id': policy_id, 'name': name, - 'desc': description} - return response_text - else: - return {'objectId': policy_id, - 'name': name, - 'description': description} - - def update_security_policy(self, policy_id, request): - pass - - def get_security_policies(self): - policies = [] - for id in ['policy-1', 'policy-2', 'policy-3']: - policies.append(self.get_security_policy(id, return_xml=False)) - return {'policies': policies} - - def list_applications(self): - applications = [{'name': 'ICMP Echo', 'objectId': 'application-333'}, - {'name': 'IPv6-ICMP Echo', - 'objectId': 'application-1001'}, - {'name': 'IPv6-ICMP Version 2 Multicast Listener', - 'objectId': 'application-3'}, - {'name': 'IPv6-ICMP Multicast Listener Query', - 'objectId': 'application-4'}] - - return applications - - def update_dynamic_routing_service(self, edge_id, request_config): - header = {'status': 201} - response = { - 'routerId': '172.24.4.12', - 'ipPrefixes': { - 'ipPrefixes': [ - {'ipAddress': '10.0.0.0/24', - 'name': 'prefix-name'} - ] - } - } - return self.return_helper(header, response) - - def get_edge_routing_config(self, edge_id): - header = {'status': 200} - response = { - 'featureType': '', - 'ospf': {}, - 'routingGlobalConfig': { - 'routerId': '172.24.4.12', - 'ipPrefixes': { - 'ipPrefixes': [ - {'ipAddress': '10.0.0.0/24', - 'name': 'prefix-name'} - ] - }, - 'logging': { - 'logLevel': 'info', - 'enable': False - }, - 'ecmp': False - } - } - return self.return_helper(header, response) - - def update_edge_routing_config(self, edge_id, request): - header = {'status': 200} - return self.return_helper(header, {}) - - def update_bgp_dynamic_routing(self, edge_id, bgp_request): - header = {"status": 201} - response = { - "localAS": 65000, - "enabled": True, - "bgpNeighbours": { - "bgpNeighbours": [ - { - "bgpFilters": { - "bgpFilters": [ - { - "action": "deny", - "direction": "in" - } - ] - }, - "password": None, - "ipAddress": "172.24.4.253", - "remoteAS": 65000 - } - ] - }, - "redistribution": { - "rules": { - "rules": [ - { - "action": "deny", - "from": { - "bgp": False, - "connected": False, - "static": False, - "ospf": False - }, - "id": 0 - }, - { - "action": "permit", - "from": { - "bgp": False, - "connected": True, - "static": True, - "ospf": False - }, - "id": 1, - "prefixName": "eee4eb79-359e-4416" - } - ] - }, - "enabled": True - } - } - return self.return_helper(header, response) - - def get_bgp_routing_config(self, edge_id): - header = {'status': 200} - response = { - "localAS": 65000, - "enabled": True, - "redistribution": { - "rules": { - "rules": [ - { - "action": "deny", - "from": { - "bgp": False, - "connected": False, - "static": False, - "ospf": False - }, - "id": 0 - }, - { - "action": "permit", - "from": { - "bgp": False, - "connected": True, - "static": True, - "ospf": False - }, - "id": 1, - "prefixName": "eee4eb79-359e-4416" - } - ] - }, - "enabled": True - } - } - return self.return_helper(header, response) - - def delete_bgp_routing_config(self, edge_id): - header = {'status': 200} - response = '' - return header, response - - def get_application_id(self, name): - return 'application-123' - - def get_tz_connectivity_info(self, vdn_scope_id): - return {'clustersInfo': [{ - 'clusterId': 'fake_cluster_moid', - 'standardNetworks': [{'id': 'fake_net'}], - 'distributedVirtualPortGroups': [{'id': 'net-1'}], - 'distributedVirtualSwitches': [{'id': 'fake_dvs_id'}], - }]} diff --git a/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py b/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py deleted file mode 100644 index e42b5126be..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py +++ /dev/null @@ -1,840 +0,0 @@ -# Copyright 2014 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from neutron.tests.unit import testlib_api -from neutron_lib import constants -from neutron_lib import context -from neutron_lib import exceptions as n_exc -from oslo_config import cfg -from oslo_utils import uuidutils - -from vmware_nsx.common import config as conf -from vmware_nsx.common import exceptions as nsx_exc -from vmware_nsx.common import nsxv_constants -from vmware_nsx.db import nsxv_db -from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az -from vmware_nsx.plugins.nsx_v.vshield.common import ( - constants as vcns_const) -from vmware_nsx.plugins.nsx_v.vshield import edge_utils -from vmware_nsx.tests import unit as vmware - -_uuid = uuidutils.generate_uuid - -#Four types of backup edge with different status -EDGE_AVAIL = 'available-' -EDGE_CREATING = 'creating-' -EDGE_ERROR1 = 'error1-' -EDGE_ERROR2 = 'error2-' -EDGE_DELETING = 'deleting-' -DEFAULT_AZ = 'default' - - -class EdgeUtilsTestCaseMixin(testlib_api.SqlTestCase): - - def setUp(self): - super(EdgeUtilsTestCaseMixin, self).setUp() - nsxv_manager_p = mock.patch(vmware.VCNS_DRIVER_NAME, autospec=True) - self.nsxv_manager = nsxv_manager_p.start() - task = mock.Mock() - nsxv_manager_p.return_value = task - self.nsxv_manager.callbacks = mock.Mock() - self.nsxv_manager.vcns = mock.Mock() - get_ver = mock.patch.object(self.nsxv_manager.vcns, - 'get_version').start() - get_ver.return_value = '6.1.4' - self.ctx = context.get_admin_context() - self.addCleanup(nsxv_manager_p.stop) - self.az = (nsx_az.NsxVAvailabilityZones(). - get_default_availability_zone()) - - def _create_router(self, name='router1'): - return {'name': name, - 'id': _uuid()} - - def _create_network(self, name='network'): - return {'name': name, - 'id': _uuid()} - - def _create_subnet(self, name='subnet'): - return {'name': name, - 'id': _uuid()} - - def _populate_vcns_router_binding(self, bindings): - for binding in bindings: - nsxv_db.init_edge_vnic_binding(self.ctx.session, - binding['edge_id']) - nsxv_db.add_nsxv_router_binding( - self.ctx.session, binding['router_id'], - binding['edge_id'], None, binding['status'], - appliance_size=binding['appliance_size'], - edge_type=binding['edge_type'], - availability_zone=binding['availability_zone']) - - -class DummyPlugin(object): - def get_network_az_by_net_id(self, context, network_id): - return (nsx_az.NsxVAvailabilityZones(). - get_default_availability_zone()) - - -class EdgeDHCPManagerTestCase(EdgeUtilsTestCaseMixin): - - def setUp(self): - super(EdgeDHCPManagerTestCase, self).setUp() - self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) - self.check = mock.patch.object(self.edge_manager, - 'check_edge_active_at_backend').start() - self.check.return_value = True - - def test_create_dhcp_edge_service(self): - fake_edge_pool = [{'status': constants.ACTIVE, - 'edge_id': 'edge-1', - 'router_id': 'backup-11111111-1111', - 'appliance_size': 'compact', - 'edge_type': 'service', - 'availability_zone': DEFAULT_AZ}, - {'status': constants.PENDING_DELETE, - 'edge_id': 'edge-2', - 'router_id': 'dhcp-22222222-2222', - 'appliance_size': 'compact', - 'edge_type': 'service', - 'availability_zone': DEFAULT_AZ}, - {'status': constants.PENDING_DELETE, - 'edge_id': 'edge-3', - 'router_id': 'backup-33333333-3333', - 'appliance_size': 'compact', - 'edge_type': 'service', - 'availability_zone': DEFAULT_AZ}] - self._populate_vcns_router_binding(fake_edge_pool) - fake_network = self._create_network() - fake_subnet = self._create_subnet(fake_network['id']) - self.edge_manager.plugin = DummyPlugin() - with mock.patch.object(self.edge_manager, - '_get_used_edges', return_value=([], [])): - self.edge_manager.create_dhcp_edge_service(self.ctx, - fake_network['id'], - fake_subnet) - self.nsxv_manager.rename_edge.assert_called_once_with('edge-1', - mock.ANY) - - def test_get_random_available_edge(self): - available_edge_ids = ['edge-1', 'edge-2'] - selected_edge_id = self.edge_manager._get_random_available_edge( - available_edge_ids) - self.assertIn(selected_edge_id, available_edge_ids) - - def test_get_random_available_edge_missing_edges_returns_none(self): - available_edge_ids = ['edge-1', 'edge-2'] - # Always return inactive(False) while checking whether the edge - # exists on the backend. - with mock.patch.object(self.edge_manager, - 'check_edge_active_at_backend', - return_value=False): - selected_edge_id = self.edge_manager._get_random_available_edge( - available_edge_ids) - # If no active edges are found on the backend, return None so that - # a new DHCP edge is created. - self.assertIsNone(selected_edge_id) - - -class EdgeUtilsTestCase(EdgeUtilsTestCaseMixin): - - def setUp(self): - super(EdgeUtilsTestCase, self).setUp() - self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) - - # Args for vcns interface configuration - self.internal_ip = '10.0.0.1' - self.uplink_ip = '192.168.111.30' - self.subnet_mask = '255.255.255.0' - self.pref_len = '24' - self.edge_id = 'dummy' - self.orig_vnics = ({}, - {'vnics': [ - {'addressGroups': - {'addressGroups': [ - {'subnetMask': self.subnet_mask, - 'subnetPrefixLength': self.pref_len, - 'primaryAddress': self.uplink_ip}]}, - 'type': 'uplink', - 'index': 1}, - {'addressGroups': - {'addressGroups': [ - {'subnetMask': self.subnet_mask, - 'subnetPrefixLength': self.pref_len, - 'primaryAddress': self.internal_ip}]}, - 'type': 'internal', - 'index': 2}]} - ) - - # Args for vcns vdr interface configuration - self.vdr_ip = '10.0.0.1' - self.vnic = 1 - self.orig_vdr = ({}, - {'index': 2, - 'addressGroups': {'addressGroups': - [{'subnetMask': self.subnet_mask, - 'subnetPrefixLength': self.pref_len, - 'primaryAddress': self.vdr_ip}]}, - 'type': 'internal'}) - - def test_create_lrouter(self): - lrouter = self._create_router() - self.nsxv_manager.deploy_edge.reset_mock() - edge_utils.create_lrouter(self.nsxv_manager, self.ctx, lrouter, - lswitch=None, dist=False, - availability_zone=self.az) - self.nsxv_manager.deploy_edge.assert_called_once_with(self.ctx, - lrouter['id'], (lrouter['name'] + '-' + lrouter['id']), - internal_network=None, dist=False, availability_zone=self.az, - appliance_size=vcns_const.SERVICE_SIZE_MAPPING['router']) - - def _test_update_intereface_primary_addr(self, old_ip, new_ip, isUplink): - fixed_vnic = {'addressGroups': - {'addressGroups': [ - {'subnetMask': self.subnet_mask, - 'subnetPrefixLength': self.pref_len, - 'primaryAddress': new_ip}] if new_ip else []}, - 'type': 'uplink' if isUplink else 'internal', - 'index': 1 if isUplink else 2} - - with mock.patch.object(self.nsxv_manager.vcns, - 'get_interfaces', return_value=self.orig_vnics): - self.edge_manager.update_interface_addr( - self.ctx, self.edge_id, old_ip, new_ip, - self.subnet_mask, is_uplink=isUplink) - self.nsxv_manager.vcns.update_interface.assert_called_once_with( - self.edge_id, fixed_vnic) - - def test_update_interface_addr_intrernal(self): - self._test_update_intereface_primary_addr( - self.internal_ip, '10.0.0.2', False) - - def test_remove_interface_primary_addr_intrernal(self): - self._test_update_intereface_primary_addr( - self.internal_ip, None, False) - - def test_update_interface_addr_uplink(self): - self._test_update_intereface_primary_addr( - self.uplink_ip, '192.168.111.31', True) - - def test_remove_interface_primary_addr_uplink(self): - self._test_update_intereface_primary_addr( - self.uplink_ip, None, True) - - def _test_update_intereface_secondary_addr(self, old_ip, new_ip): - addr_group = {'subnetMask': self.subnet_mask, - 'subnetPrefixLength': self.pref_len, - 'primaryAddress': self.uplink_ip, - 'secondaryAddresses': {'type': 'secondary_addresses', - 'ipAddress': [new_ip]}} - fixed_vnic = {'addressGroups': {'addressGroups': [addr_group]}, - 'type': 'uplink', - 'index': 1} - - with mock.patch.object(self.nsxv_manager.vcns, - 'get_interfaces', return_value=self.orig_vnics): - self.edge_manager.update_interface_addr( - self.ctx, self.edge_id, old_ip, new_ip, - self.subnet_mask, is_uplink=True) - self.nsxv_manager.vcns.update_interface.assert_called_once_with( - self.edge_id, fixed_vnic) - - def test_add_secondary_interface_addr(self): - self._test_update_intereface_secondary_addr( - None, '192.168.111.31') - - def test_update_interface_addr_fail(self): - # Old ip is not configured on the interface, so we should fail - old_ip = '192.168.111.32' - new_ip = '192.168.111.31' - - with mock.patch.object(self.nsxv_manager.vcns, - 'get_interfaces', return_value=self.orig_vnics): - self.assertRaises( - nsx_exc.NsxPluginException, - self.edge_manager.update_interface_addr, - self.ctx, self.edge_id, old_ip, new_ip, - self.subnet_mask, is_uplink=True) - - def _test_update_vdr_intereface_primary_addr(self, old_ip, - new_ip): - fixed_vnic = {'addressGroups': - {'addressGroups': [ - {'subnetMask': self.subnet_mask, - 'subnetPrefixLength': self.pref_len, - 'primaryAddress': new_ip}] if new_ip else []}, - 'type': 'internal', - 'index': 2} - - with mock.patch.object(self.nsxv_manager.vcns, - 'get_vdr_internal_interface', return_value=self.orig_vdr): - with mock.patch.object(self.nsxv_manager.vcns, - 'update_vdr_internal_interface') as vcns_update: - self.edge_manager.update_vdr_interface_addr( - self.ctx, self.edge_id, self.vnic, old_ip, new_ip, - self.subnet_mask) - vcns_update.assert_called_once_with(self.edge_id, - self.vnic, - {'interface': fixed_vnic}) - - def test_update_vdr_interface_addr_intrernal(self): - self._test_update_vdr_intereface_primary_addr( - self.vdr_ip, '20.0.0.2') - - def test_remove_vdr_interface_primary_addr_intrernal(self): - self._test_update_vdr_intereface_primary_addr( - self.vdr_ip, None) - - def test_update_vdr_interface_addr_fail(self): - # Old ip is not configured on the vdr interface, so we should fail - old_ip = '192.168.111.32' - new_ip = '192.168.111.31' - - with mock.patch.object(self.nsxv_manager.vcns, - 'get_vdr_internal_interface', return_value=self.orig_vdr): - self.assertRaises( - nsx_exc.NsxPluginException, - self.edge_manager.update_vdr_interface_addr, - self.ctx, self.edge_id, self.vnic, old_ip, new_ip, - self.subnet_mask) - - -class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): - - def setUp(self): - super(EdgeManagerTestCase, self).setUp() - cfg.CONF.set_override('backup_edge_pool', [], 'nsxv') - self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) - self.check = mock.patch.object(self.edge_manager, - 'check_edge_active_at_backend').start() - self.check.side_effect = self.check_edge_active_at_backend - self.default_edge_pool_dicts = {'default': { - nsxv_constants.SERVICE_EDGE: { - nsxv_constants.LARGE: {'minimum_pooled_edges': 1, - 'maximum_pooled_edges': 3}, - nsxv_constants.COMPACT: {'minimum_pooled_edges': 1, - 'maximum_pooled_edges': 3}}, - nsxv_constants.VDR_EDGE: {}}} - self.vdr_edge_pool_dicts = {'default': { - nsxv_constants.SERVICE_EDGE: {}, - nsxv_constants.VDR_EDGE: { - nsxv_constants.LARGE: {'minimum_pooled_edges': 1, - 'maximum_pooled_edges': 3}}}} - - def check_edge_active_at_backend(self, edge_id): - # workaround to let edge_id None pass since we wrapped router binding - # db update op. - if edge_id is None: - edge_id = "" - return not (edge_id.startswith(EDGE_ERROR1) or - edge_id.startswith(EDGE_ERROR2)) - - def test_backup_edge_pool_with_default(self): - cfg.CONF.set_override('backup_edge_pool', - ['service:large:1:3', 'service:compact:1:3'], - 'nsxv') - az = nsx_az.NsxVAvailabilityZone(None) - edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) - self.assertEqual(self.default_edge_pool_dicts['default'], - edge_pool_dicts) - - def test_backup_edge_pool_with_empty_conf(self): - cfg.CONF.set_override('backup_edge_pool', [], 'nsxv') - az = nsx_az.NsxVAvailabilityZone(None) - edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) - expect_edge_pool_dicts = { - nsxv_constants.SERVICE_EDGE: {}, - nsxv_constants.VDR_EDGE: {}} - self.assertEqual(expect_edge_pool_dicts, edge_pool_dicts) - - def test_backup_edge_pool_with_vdr_conf(self): - cfg.CONF.set_override('backup_edge_pool', ['vdr:large:1:3'], 'nsxv') - az = nsx_az.NsxVAvailabilityZone(None) - edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) - expect_edge_pool_dicts = self.vdr_edge_pool_dicts['default'] - self.assertEqual(expect_edge_pool_dicts, edge_pool_dicts) - - def test_backup_edge_pool_with_duplicate_conf(self): - cfg.CONF.set_override('backup_edge_pool', - ['service:compact:1:3', 'service::3:4'], - 'nsxv') - az = nsx_az.NsxVAvailabilityZone(None) - self.assertRaises(n_exc.Invalid, - edge_utils.parse_backup_edge_pool_opt_per_az, az) - - def _create_router_bindings(self, num, status, id_prefix, size, - edge_type, availability_zone): - if not availability_zone: - availability_zone = self.az - return [{'status': status, - 'edge_id': id_prefix + '-edge-' + str(i), - 'router_id': (vcns_const.BACKUP_ROUTER_PREFIX + - id_prefix + str(i)), - 'appliance_size': size, - 'edge_type': edge_type, - 'availability_zone': availability_zone.name} - for i in range(num)] - - def _create_available_router_bindings( - self, num, size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE, - availability_zone=None): - status = constants.ACTIVE - id_prefix = EDGE_AVAIL + size + '-' + edge_type - return self._create_router_bindings( - num, status, id_prefix, size, edge_type, - availability_zone) - - def _create_creating_router_bindings( - self, num, size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE, - availability_zone=None): - status = constants.PENDING_CREATE - id_prefix = EDGE_CREATING + size + '-' + edge_type - return self._create_router_bindings( - num, status, id_prefix, size, edge_type, - availability_zone) - - def _create_error_router_bindings( - self, num, status=constants.ERROR, - size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE, - availability_zone=None): - id_prefix = EDGE_ERROR1 + size + '-' + edge_type - return self._create_router_bindings( - num, status, id_prefix, size, edge_type, - availability_zone) - - def _create_error_router_bindings_at_backend( - self, num, status=constants.ACTIVE, - size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE, - availability_zone=None): - id_prefix = EDGE_ERROR2 + size + '-' + edge_type - return self._create_router_bindings( - num, status, id_prefix, size, edge_type, - availability_zone) - - def _create_deleting_router_bindings( - self, num, size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE, - availability_zone=None): - status = constants.PENDING_DELETE - id_prefix = EDGE_DELETING + size + '-' + edge_type - return self._create_router_bindings( - num, status, id_prefix, size, edge_type, - availability_zone) - - def _create_edge_pools(self, avail, creating, error, - error_at_backend, deleting, - size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE): - """Create a backup edge pool with different status of edges. - - Backup edges would be edges with avail, creating and error_at_backend, - while available edges would only be edges with avail status. - """ - availability_zone = self.az - return ( - self._create_error_router_bindings( - error, size=size, edge_type=edge_type, - availability_zone=availability_zone) + - self._create_deleting_router_bindings( - deleting, size=size, edge_type=edge_type, - availability_zone=availability_zone) + - self._create_error_router_bindings_at_backend( - error_at_backend, size=size, edge_type=edge_type, - availability_zone=availability_zone) + - self._create_creating_router_bindings( - creating, size=size, edge_type=edge_type, - availability_zone=availability_zone) + - self._create_available_router_bindings( - avail, size=size, edge_type=edge_type, - availability_zone=availability_zone)) - - def _create_backup_router_bindings( - self, avail, creating, error, error_at_backend, deleting, - error_status=constants.PENDING_DELETE, - error_at_backend_status=constants.PENDING_DELETE, - size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE, - availability_zone=None): - if not availability_zone: - availability_zone = self.az - return ( - self._create_error_router_bindings( - error, status=error_status, size=size, edge_type=edge_type, - availability_zone=availability_zone) + - self._create_error_router_bindings_at_backend( - error_at_backend, status=error_at_backend_status, - size=size, edge_type=edge_type, - availability_zone=availability_zone) + - self._create_creating_router_bindings( - creating, size=size, edge_type=edge_type, - availability_zone=availability_zone) + - self._create_available_router_bindings( - avail, size=size, edge_type=edge_type, - availability_zone=availability_zone) + - self._create_deleting_router_bindings( - deleting, size=size, edge_type=edge_type, - availability_zone=availability_zone)) - - def _verify_router_bindings(self, exp_bindings, act_db_bindings): - exp_dict = dict(zip([binding['router_id'] - for binding in exp_bindings], exp_bindings)) - act_bindings = [{'router_id': binding['router_id'], - 'edge_id': binding['edge_id'], - 'status': binding['status'], - 'appliance_size': binding['appliance_size'], - 'edge_type': binding['edge_type'], - 'availability_zone': binding['availability_zone']} - for binding in act_db_bindings] - act_dict = dict(zip([binding['router_id'] - for binding in act_bindings], act_bindings)) - self.assertEqual(exp_dict, act_dict) - - def test_get_backup_edge_bindings(self): - """Test get backup edges filtering out deleting and error edges.""" - pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) - self._populate_vcns_router_binding(pool_edges) - expect_backup_bindings = self._create_backup_router_bindings( - 1, 2, 0, 4, 0, - error_at_backend_status=constants.ACTIVE, - size=nsxv_constants.LARGE) - backup_bindings = self.edge_manager._get_backup_edge_bindings(self.ctx, - appliance_size=nsxv_constants.LARGE, availability_zone=self.az) - self._verify_router_bindings(expect_backup_bindings, backup_bindings) - - def test_get_available_router_bindings(self): - appliance_size = nsxv_constants.LARGE - edge_type = nsxv_constants.SERVICE_EDGE - pool_edges = (self._create_edge_pools(1, 2, 3, 0, 5) + - self._create_edge_pools( - 1, 2, 3, 0, 5, size=nsxv_constants.COMPACT)) - self._populate_vcns_router_binding(pool_edges) - expect_backup_bindings = self._create_backup_router_bindings( - 1, 2, 3, 0, 5, error_status=constants.ERROR) - binding = self.edge_manager._get_available_router_binding( - self.ctx, appliance_size=appliance_size, edge_type=edge_type, - availability_zone=self.az) - router_bindings = [ - binding_db - for binding_db in nsxv_db.get_nsxv_router_bindings( - self.ctx.session) - if (binding_db['appliance_size'] == appliance_size and - binding_db['edge_type'] == edge_type and - binding_db['availability_zone'] == 'default')] - self._verify_router_bindings(expect_backup_bindings, router_bindings) - edge_id = (EDGE_AVAIL + appliance_size + '-' + - edge_type + '-edge-' + str(0)) - self.assertEqual(edge_id, binding['edge_id']) - - def test_check_backup_edge_pool_with_max(self): - appliance_size = nsxv_constants.LARGE - edge_type = nsxv_constants.SERVICE_EDGE - pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) - self._populate_vcns_router_binding(pool_edges) - expect_pool_bindings = self._create_backup_router_bindings( - 1, 2, 3, 4, 5, - error_status=constants.ERROR, - error_at_backend_status=constants.PENDING_DELETE) - self.edge_manager._check_backup_edge_pool( - 0, 3, - appliance_size=appliance_size, edge_type=edge_type, - availability_zone=self.az) - router_bindings = [ - binding - for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session) - if (binding['appliance_size'] == appliance_size and - binding['edge_type'] == edge_type)] - self._verify_router_bindings(expect_pool_bindings, router_bindings) - - def test_check_backup_edge_pool_with_min(self): - appliance_size = nsxv_constants.LARGE - edge_type = nsxv_constants.SERVICE_EDGE - pool_edges = (self._create_edge_pools(1, 2, 3, 0, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) - self._populate_vcns_router_binding(pool_edges) - - edge_utils.eventlet = mock.Mock() - edge_utils.eventlet.spawn_n.return_value = None - - self.edge_manager._check_backup_edge_pool( - 5, 10, appliance_size=appliance_size, edge_type=edge_type, - availability_zone=self.az) - router_bindings = [ - binding - for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session) - if binding['edge_id'] is None and - binding['status'] == constants.PENDING_CREATE] - - binding_ids = [bind.router_id for bind in router_bindings] - self.assertEqual(2, len(router_bindings)) - edge_utils.eventlet.spawn_n.assert_called_with( - mock.ANY, binding_ids, appliance_size, edge_type, self.az) - - def test_check_backup_edge_pools_with_empty_conf(self): - pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + - self._create_edge_pools( - 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) - self._populate_vcns_router_binding(pool_edges) - self.edge_manager._check_backup_edge_pools() - router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) - for binding in router_bindings: - self.assertEqual(constants.PENDING_DELETE, binding['status']) - - def test_check_backup_edge_pools_with_default(self): - self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts - pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + - self._create_edge_pools( - 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) - self._populate_vcns_router_binding(pool_edges) - self.edge_manager._check_backup_edge_pools() - router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) - - expect_large_bindings = self._create_backup_router_bindings( - 1, 2, 3, 4, 5, - error_status=constants.PENDING_DELETE, - error_at_backend_status=constants.PENDING_DELETE) - large_bindings = [ - binding - for binding in router_bindings - if (binding['appliance_size'] == nsxv_constants.LARGE and - binding['edge_type'] == nsxv_constants.SERVICE_EDGE)] - self._verify_router_bindings(expect_large_bindings, large_bindings) - - expect_compact_bindings = self._create_backup_router_bindings( - 1, 2, 3, 4, 5, - error_status=constants.PENDING_DELETE, - error_at_backend_status=constants.PENDING_DELETE, - size=nsxv_constants.COMPACT) - compact_bindings = [ - binding - for binding in router_bindings - if (binding['appliance_size'] == nsxv_constants.COMPACT and - binding['edge_type'] == nsxv_constants.SERVICE_EDGE)] - self._verify_router_bindings(expect_compact_bindings, compact_bindings) - - vdr_bindings = [ - binding - for binding in router_bindings - if (binding['appliance_size'] == nsxv_constants.LARGE and - binding['edge_type'] == nsxv_constants.VDR_EDGE)] - for binding in vdr_bindings: - self.assertEqual(constants.PENDING_DELETE, binding['status']) - - def test_check_backup_edge_pools_with_vdr(self): - self.edge_manager.edge_pool_dicts = self.vdr_edge_pool_dicts - pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + - self._create_edge_pools( - 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) - self._populate_vcns_router_binding(pool_edges) - self.edge_manager._check_backup_edge_pools() - router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) - expect_vdr_bindings = self._create_backup_router_bindings( - 1, 2, 3, 4, 5, - error_status=constants.PENDING_DELETE, - error_at_backend_status=constants.PENDING_DELETE, - edge_type=nsxv_constants.VDR_EDGE) - vdr_bindings = [ - binding - for binding in router_bindings - if (binding['appliance_size'] == nsxv_constants.LARGE and - binding['edge_type'] == nsxv_constants.VDR_EDGE)] - self._verify_router_bindings(expect_vdr_bindings, vdr_bindings) - service_bindings = [ - binding - for binding in router_bindings - if binding['edge_type'] == nsxv_constants.SERVICE_EDGE] - for binding in service_bindings: - self.assertEqual(constants.PENDING_DELETE, binding['status']) - - def test_allocate_edge_appliance_with_empty(self): - self.edge_manager._clean_all_error_edge_bindings = mock.Mock() - self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name', - availability_zone=self.az) - assert not self.edge_manager._clean_all_error_edge_bindings.called - - def test_allocate_large_edge_appliance_with_default(self): - self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts - pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + - self._create_edge_pools( - 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) - self._populate_vcns_router_binding(pool_edges) - self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name', - appliance_size=nsxv_constants.LARGE, - availability_zone=self.az) - edge_id = (EDGE_AVAIL + nsxv_constants.LARGE + '-' + - nsxv_constants.SERVICE_EDGE + '-edge-' + str(0)) - self.nsxv_manager.rename_edge.assert_has_calls( - [mock.call(edge_id, 'fake_name')]) - - def test_allocate_compact_edge_appliance_with_default(self): - self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts - pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + - self._create_edge_pools( - 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) - self._populate_vcns_router_binding(pool_edges) - self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name', - appliance_size=nsxv_constants.COMPACT, - availability_zone=self.az) - edge_id = (EDGE_AVAIL + nsxv_constants.COMPACT + '-' + - nsxv_constants.SERVICE_EDGE + '-edge-' + str(0)) - self.nsxv_manager.rename_edge.assert_has_calls( - [mock.call(edge_id, 'fake_name')]) - - def test_allocate_large_edge_appliance_with_vdr(self): - self.edge_manager.edge_pool_dicts = self.vdr_edge_pool_dicts - pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + - self._create_edge_pools( - 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + - self._create_edge_pools( - 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) - self._populate_vcns_router_binding(pool_edges) - self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name', dist=True, - appliance_size=nsxv_constants.LARGE, - availability_zone=self.az) - edge_id = (EDGE_AVAIL + nsxv_constants.LARGE + '-' + - nsxv_constants.VDR_EDGE + '-edge-' + str(0)) - self.nsxv_manager.rename_edge.assert_has_calls( - [mock.call(edge_id, 'fake_name')]) - - def test_free_edge_appliance_with_empty(self): - self.edge_manager._clean_all_error_edge_bindings = mock.Mock() - self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name', - availability_zone=self.az) - self.edge_manager._free_edge_appliance( - self.ctx, 'fake_id') - assert not self.edge_manager._clean_all_error_edge_bindings.called - - def test_free_edge_appliance_with_default(self): - self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts - self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name', - availability_zone=self.az) - self.edge_manager._free_edge_appliance( - self.ctx, 'fake_id') - assert not self.nsxv_manager.delete_edge.called - self.nsxv_manager.update_edge.assert_has_calls( - [mock.call(mock.ANY, mock.ANY, mock.ANY, mock.ANY, None, - appliance_size=nsxv_constants.COMPACT, dist=False, - availability_zone=mock.ANY)]) - - def test_free_edge_appliance_with_default_with_full(self): - self.edge_pool_dicts = { - nsxv_constants.SERVICE_EDGE: { - nsxv_constants.LARGE: {'minimum_pooled_edges': 1, - 'maximum_pooled_edges': 1}, - nsxv_constants.COMPACT: {'minimum_pooled_edges': 1, - 'maximum_pooled_edges': 3}}, - nsxv_constants.VDR_EDGE: {}} - # Avoid use of eventlet greenpool as this breaks the UT - with mock.patch.object(self.edge_manager, '_get_worker_pool'): - self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name', - availability_zone=self.az) - self.edge_manager._free_edge_appliance( - self.ctx, 'fake_id') - - -class VdrTransitNetUtilDefaultTestCase(EdgeUtilsTestCaseMixin): - EXPECTED_NETMASK = '255.255.255.240' - EXPECTED_TLR_IP = '169.254.2.1' - EXPECTED_PLR_IP = conf.DEFAULT_PLR_ADDRESS - - def setUp(self): - super(VdrTransitNetUtilDefaultTestCase, self).setUp() - - def test_get_vdr_transit_network_netmask(self): - self.assertEqual(edge_utils.get_vdr_transit_network_netmask(), - self.EXPECTED_NETMASK) - - def test_get_vdr_transit_network_tlr_address(self): - self.assertEqual(edge_utils.get_vdr_transit_network_tlr_address(), - self.EXPECTED_TLR_IP) - - def test_get_vdr_transit_network_plr_address(self): - self.assertEqual(edge_utils.get_vdr_transit_network_plr_address(), - self.EXPECTED_PLR_IP) - - def test_is_overlapping_reserved_subnets(self): - self.assertTrue( - edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', - ['169.254.0.0/16'])) - self.assertTrue( - edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', - ['192.168.2.0/24', - '169.254.0.0/16'])) - self.assertFalse( - edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', - ['169.253.0.0/16'])) - self.assertFalse( - edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', - ['192.168.2.0/24', - '169.253.0.0/16'])) - - -class VdrTransitNetUtilTestCase(EdgeUtilsTestCaseMixin): - EXPECTED_NETMASK = '255.255.255.0' - EXPECTED_TLR_IP = '192.168.1.1' - EXPECTED_PLR_IP = '192.168.1.2' - - def setUp(self): - super(VdrTransitNetUtilTestCase, self).setUp() - - -class VdrTransitNetValidatorTestCase(EdgeUtilsTestCaseMixin): - def setUp(self): - super(VdrTransitNetValidatorTestCase, self).setUp() - - def _test_validator(self, cidr): - cfg.CONF.set_override('vdr_transit_network', cidr, 'nsxv') - return edge_utils.validate_vdr_transit_network() - - def test_vdr_transit_net_validator_success(self): - self.assertIsNone(self._test_validator('192.168.253.0/24')) - - def test_vdr_transit_net_validator_junk_cidr(self): - self.assertRaises(n_exc.Invalid, self._test_validator, 'not_a_subnet') - - def test_vdr_transit_net_validator_too_small_cidr(self): - self.assertRaises( - n_exc.Invalid, self._test_validator, '169.254.2.0/31') - - def test_vdr_transit_net_validator_overlap_cidr(self): - self.assertRaises( - n_exc.Invalid, self._test_validator, '169.254.0.0/16') diff --git a/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py b/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py deleted file mode 100644 index 56601a1ff3..0000000000 --- a/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py +++ /dev/null @@ -1,589 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from unittest import mock - -from eventlet import greenthread - -from neutron.tests import base -from neutron_lib import context as neutron_context -from oslo_config import cfg - -from vmware_nsx.common import exceptions as nsxv_exc -from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az -from vmware_nsx.plugins.nsx_v.vshield.common import ( - constants as vcns_const) -from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver as e_drv -from vmware_nsx.plugins.nsx_v.vshield.tasks import ( - constants as ts_const) -from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks as ts -from vmware_nsx.plugins.nsx_v.vshield import vcns_driver -from vmware_nsx.tests import unit as vmware -from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns - -VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test") - -ts.TaskManager.set_default_interval(100) - - -class VcnsDriverTaskManagerTestCase(base.BaseTestCase): - - def setUp(self): - super(VcnsDriverTaskManagerTestCase, self).setUp() - self.manager = ts.TaskManager() - self.manager.start(100) - - def tearDown(self): - self.manager.stop() - # Task manager should not leave running threads around - # if _thread is None it means it was killed in stop() - self.assertIsNone(self.manager._thread) - super(VcnsDriverTaskManagerTestCase, self).tearDown() - - def _test_task_manager_task_process_state(self, sync_exec=False): - def _task_failed(task, reason): - task.userdata['result'] = False - task.userdata['error'] = reason - - def _check_state(task, exp_state): - if not task.userdata.get('result', True): - return False - - state = task.userdata['state'] - if state != exp_state: - msg = "state %d expect %d" % ( - state, exp_state) - _task_failed(task, msg) - return False - - task.userdata['state'] = state + 1 - return True - - def _exec(task): - if not _check_state(task, 1): - return ts_const.TaskStatus.ERROR - - if task.userdata['sync_exec']: - return ts_const.TaskStatus.COMPLETED - else: - return ts_const.TaskStatus.PENDING - - def _status(task): - if task.userdata['sync_exec']: - _task_failed(task, "_status callback triggered") - - state = task.userdata['state'] - if state == 3: - _check_state(task, 3) - return ts_const.TaskStatus.PENDING - else: - _check_state(task, 4) - return ts_const.TaskStatus.COMPLETED - - def _result(task): - if task.userdata['sync_exec']: - exp_state = 3 - else: - exp_state = 5 - - _check_state(task, exp_state) - - def _start_monitor(task): - _check_state(task, 0) - - def _executed_monitor(task): - _check_state(task, 2) - - def _result_monitor(task): - if task.userdata['sync_exec']: - exp_state = 4 - else: - exp_state = 6 - - if _check_state(task, exp_state): - task.userdata['result'] = True - else: - task.userdata['result'] = False - - userdata = { - 'state': 0, - 'sync_exec': sync_exec - } - task = ts.Task('name', 'res', _exec, _status, _result, userdata) - task.add_start_monitor(_start_monitor) - task.add_executed_monitor(_executed_monitor) - task.add_result_monitor(_result_monitor) - - self.manager.add(task) - - task.wait(ts_const.TaskState.RESULT) - - self.assertTrue(userdata['result']) - - def test_task_manager_task_sync_exec_process_state(self): - self._test_task_manager_task_process_state(sync_exec=True) - - def test_task_manager_task_async_exec_process_state(self): - self._test_task_manager_task_process_state(sync_exec=False) - - def test_task_manager_task_ordered_process(self): - def _task_failed(task, reason): - task.userdata['result'] = False - task.userdata['error'] = reason - - def _exec(task): - task.userdata['executed'] = True - return ts_const.TaskStatus.PENDING - - def _status(task): - return ts_const.TaskStatus.COMPLETED - - def _result(task): - next_task = task.userdata.get('next') - if next_task: - if next_task.userdata.get('executed'): - _task_failed(next_task, "executed premature") - if task.userdata.get('result', True): - task.userdata['result'] = True - - tasks = [] - prev = None - last_task = None - for i in range(5): - name = "name-%d" % i - task = ts.Task(name, 'res', _exec, _status, _result, {}) - tasks.append(task) - if prev: - prev.userdata['next'] = task - prev = task - last_task = task - - for task in tasks: - self.manager.add(task) - - last_task.wait(ts_const.TaskState.RESULT) - - for task in tasks: - self.assertTrue(task.userdata['result']) - - def test_task_manager_task_parallel_process(self): - tasks = [] - - def _exec(task): - task.userdata['executed'] = True - return ts_const.TaskStatus.PENDING - - def _status(task): - for t in tasks: - if not t.userdata.get('executed'): - t.userdata['resut'] = False - return ts_const.TaskStatus.COMPLETED - - def _result(task): - if (task.userdata.get('result') is None and - task.status == ts_const.TaskStatus.COMPLETED): - task.userdata['result'] = True - else: - task.userdata['result'] = False - - for i in range(5): - name = "name-%d" % i - res = 'resource-%d' % i - task = ts.Task(name, res, _exec, _status, _result, {}) - tasks.append(task) - self.manager.add(task) - - for task in tasks: - task.wait(ts_const.TaskState.RESULT) - self.assertTrue(task.userdata['result']) - - def _test_task_manager_stop(self, exec_wait=False, result_wait=False, - stop_wait=0): - def _exec(task): - if exec_wait: - greenthread.sleep(0.01) - return ts_const.TaskStatus.PENDING - - def _status(task): - greenthread.sleep(0.01) - return ts_const.TaskStatus.PENDING - - def _result(task): - if result_wait: - greenthread.sleep(0) - - manager = ts.TaskManager().start(100) - manager.stop() - # Task manager should not leave running threads around - # if _thread is None it means it was killed in stop() - self.assertIsNone(manager._thread) - manager.start(100) - - alltasks = {} - for i in range(100): - res = 'res-%d' % i - tasks = [] - for i in range(100): - task = ts.Task('name', res, _exec, _status, _result) - manager.add(task) - tasks.append(task) - alltasks[res] = tasks - - greenthread.sleep(stop_wait) - manager.stop() - # Task manager should not leave running threads around - # if _thread is None it means it was killed in stop() - self.assertIsNone(manager._thread) - - for res, tasks in alltasks.items(): - for task in tasks: - self.assertEqual(ts_const.TaskStatus.ABORT, task.status) - - def test_task_manager_stop_1(self): - self._test_task_manager_stop(True, True, 0) - - def test_task_manager_stop_2(self): - self._test_task_manager_stop(True, True, 1) - - def test_task_manager_stop_3(self): - self._test_task_manager_stop(False, False, 0) - - def test_task_manager_stop_4(self): - self._test_task_manager_stop(False, False, 1) - - def test_task_pending_task(self): - def _exec(task): - task.userdata['executing'] = True - while not task.userdata['tested']: - greenthread.sleep(0) - task.userdata['executing'] = False - return ts_const.TaskStatus.COMPLETED - - userdata = { - 'executing': False, - 'tested': False - } - manager = ts.TaskManager().start(100) - task = ts.Task('name', 'res', _exec, userdata=userdata) - manager.add(task) - - while not userdata['executing']: - greenthread.sleep(0) - self.assertTrue(manager.has_pending_task()) - - userdata['tested'] = True - while userdata['executing']: - greenthread.sleep(0) - self.assertFalse(manager.has_pending_task()) - - -class VcnsDriverTestCase(base.BaseTestCase): - - def vcns_patch(self): - instance = self.mock_vcns.start() - instance.return_value.deploy_edge.side_effect = self.fc.deploy_edge - instance.return_value.get_edge_id.side_effect = self.fc.get_edge_id - instance.return_value.delete_edge.side_effect = self.fc.delete_edge - instance.return_value.update_interface.side_effect = ( - self.fc.update_interface) - instance.return_value.get_nat_config.side_effect = ( - self.fc.get_nat_config) - instance.return_value.update_nat_config.side_effect = ( - self.fc.update_nat_config) - instance.return_value.delete_nat_rule.side_effect = ( - self.fc.delete_nat_rule) - instance.return_value.get_edge_status.side_effect = ( - self.fc.get_edge_status) - instance.return_value.get_edges.side_effect = self.fc.get_edges - instance.return_value.update_routes.side_effect = ( - self.fc.update_routes) - instance.return_value.create_lswitch.side_effect = ( - self.fc.create_lswitch) - instance.return_value.delete_lswitch.side_effect = ( - self.fc.delete_lswitch) - - def setUp(self): - super(VcnsDriverTestCase, self).setUp() - - self.ctx = neutron_context.get_admin_context() - self.temp_e_drv_nsxv_db = e_drv.nsxv_db - e_drv.nsxv_db = mock.MagicMock() - self.config_parse(args=['--config-file', VCNS_CONFIG_FILE]) - - self.fc = fake_vcns.FakeVcns() - self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) - self.vcns_patch() - - self.addCleanup(self.fc.reset_all) - - self.vcns_driver = vcns_driver.VcnsDriver(self) - - self.az = (nsx_az.NsxVAvailabilityZones(). - get_default_availability_zone()) - self.edge_id = None - self.result = None - - def tearDown(self): - e_drv.nsxv_db = self.temp_e_drv_nsxv_db - self.vcns_driver.task_manager.stop() - # Task manager should not leave running threads around - # if _thread is None it means it was killed in stop() - self.assertIsNone(self.vcns_driver.task_manager._thread) - super(VcnsDriverTestCase, self).tearDown() - - def complete_edge_creation( - self, context, edge_id, name, router_id, dist, deploy_successful, - availability_zone=None, deploy_metadata=False): - pass - - def _deploy_edge(self): - self.edge_id = self.vcns_driver.deploy_edge( - self.ctx, 'router-id', 'myedge', 'internal-network', - availability_zone=self.az) - self.assertEqual('edge-1', self.edge_id) - - def test_deploy_edge_with(self): - self.vcns_driver.deploy_edge( - self.ctx, 'router-id', 'myedge', 'internal-network', - availability_zone=self.az) - status = self.vcns_driver.get_edge_status('edge-1') - self.assertEqual(vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, status) - - def test_deploy_edge_fail(self): - self.vcns_driver.deploy_edge( - self.ctx, 'router-1', 'myedge', 'internal-network', - availability_zone=self.az) - # self.vcns_driver.deploy_edge( - # self.ctx, 'router-2', 'myedge', 'internal-network', - # availability_zone=self.az) - self.assertRaises( - nsxv_exc.NsxPluginException, self.vcns_driver.deploy_edge, - self.ctx, 'router-2', 'myedge', 'internal-network', - availability_zone=self.az) - - def test_get_edge_status(self): - self._deploy_edge() - status = self.vcns_driver.get_edge_status(self.edge_id) - self.assertEqual(vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, status) - - def test_update_nat_rules(self): - self._deploy_edge() - snats = [{ - 'src': '192.168.1.0/24', - 'translated': '10.0.0.1' - }, { - 'src': '192.168.2.0/24', - 'translated': '10.0.0.2' - }, { - 'src': '192.168.3.0/24', - 'translated': '10.0.0.3' - } - ] - dnats = [{ - 'dst': '100.0.0.4', - 'translated': '192.168.1.1' - }, { - 'dst': '100.0.0.5', - 'translated': '192.168.2.1' - } - ] - - result = self.vcns_driver.update_nat_rules(self.edge_id, snats, dnats) - self.assertTrue(result) - - natcfg = self.vcns_driver.get_nat_config(self.edge_id) - rules = natcfg['rules']['natRulesDtos'] - self.assertEqual(2 * len(dnats) + len(snats), len(rules)) - self.natEquals(rules[0], dnats[0]) - self.natEquals(rules[1], self.snat_for_dnat(dnats[0])) - self.natEquals(rules[2], dnats[1]) - self.natEquals(rules[3], self.snat_for_dnat(dnats[1])) - self.natEquals(rules[4], snats[0]) - self.natEquals(rules[5], snats[1]) - self.natEquals(rules[6], snats[2]) - - def test_update_nat_rules_for_all_vnics(self): - self._deploy_edge() - snats = [{ - 'src': '192.168.1.0/24', - 'translated': '10.0.0.1' - }, { - 'src': '192.168.2.0/24', - 'translated': '10.0.0.2' - }, { - 'src': '192.168.3.0/24', - 'translated': '10.0.0.3' - } - ] - dnats = [{ - 'dst': '100.0.0.4', - 'translated': '192.168.1.1' - }, { - 'dst': '100.0.0.5', - 'translated': '192.168.2.1' - } - ] - - indices = [0, 1, 2, 3] - result = self.vcns_driver.update_nat_rules(self.edge_id, - snats, dnats, indices) - self.assertTrue(result) - - natcfg = self.vcns_driver.get_nat_config(self.edge_id) - rules = natcfg['rules']['natRulesDtos'] - - self.assertEqual(2 * len(indices) * len(dnats) + - len(indices) * len(snats), len(rules)) - - sorted_rules = sorted(rules, key=lambda k: k['vnic']) - for i in range(0, len(sorted_rules), 7): - self.natEquals(sorted_rules[i], dnats[0]) - self.natEquals(sorted_rules[i + 1], self.snat_for_dnat(dnats[0])) - self.natEquals(sorted_rules[i + 2], dnats[1]) - self.natEquals(sorted_rules[i + 3], self.snat_for_dnat(dnats[1])) - self.natEquals(sorted_rules[i + 4], snats[0]) - self.natEquals(sorted_rules[i + 5], snats[1]) - self.natEquals(sorted_rules[i + 6], snats[2]) - - def test_update_nat_rules_for_specific_vnics(self): - self._deploy_edge() - snats = [{ - 'src': '192.168.1.0/24', - 'translated': '10.0.0.1', - 'vnic_index': 5 - }, { - 'src': '192.168.2.0/24', - 'translated': '10.0.0.2' - }, { - 'src': '192.168.3.0/24', - 'translated': '10.0.0.3' - } - ] - dnats = [{ - 'dst': '100.0.0.4', - 'translated': '192.168.1.1', - 'vnic_index': 2 - }, { - 'dst': '100.0.0.5', - 'translated': '192.168.2.1' - } - ] - - result = self.vcns_driver.update_nat_rules(self.edge_id, snats, dnats) - self.assertTrue(result) - - natcfg = self.vcns_driver.get_nat_config(self.edge_id) - - rules = natcfg['rules']['natRulesDtos'] - - self.assertEqual(2 * len(dnats) + len(snats), len(rules)) - - self.natEquals(rules[0], dnats[0]) - self.assertEqual(2, rules[0]['vnic']) - self.natEquals(rules[1], self.snat_for_dnat(dnats[0])) - self.assertEqual(2, rules[1]['vnic']) - self.natEquals(rules[2], dnats[1]) - self.assertNotIn('vnic', rules[2]) - self.natEquals(rules[3], self.snat_for_dnat(dnats[1])) - self.assertNotIn('vnic', rules[3]) - self.natEquals(rules[4], snats[0]) - self.assertEqual(5, rules[4]['vnic']) - self.natEquals(rules[5], snats[1]) - self.assertNotIn('vnic', rules[5]) - self.natEquals(rules[6], snats[2]) - self.assertNotIn('vnic', rules[6]) - - def snat_for_dnat(self, dnat): - return { - 'src': dnat['translated'], - 'translated': dnat['dst'] - } - - def natEquals(self, rule, exp): - addr = exp.get('src') - if not addr: - addr = exp.get('dst') - - self.assertEqual(addr, rule['originalAddress']) - self.assertEqual(exp['translated'], rule['translatedAddress']) - - def test_update_routes(self): - self._deploy_edge() - routes = [{ - 'cidr': '192.168.1.0/24', - 'nexthop': '169.254.2.1' - }, { - 'cidr': '192.168.2.0/24', - 'nexthop': '169.254.2.1' - }, { - 'cidr': '192.168.3.0/24', - 'nexthop': '169.254.2.1' - } - ] - result = self.vcns_driver.update_routes( - self.edge_id, '10.0.0.1', routes) - self.assertTrue(result) - - def test_update_interface(self): - self._deploy_edge() - self.vcns_driver.update_interface( - 'router-id', self.edge_id, vcns_const.EXTERNAL_VNIC_INDEX, - 'network-id', address='100.0.0.3', netmask='255.255.255.0') - - def test_delete_edge(self): - self._deploy_edge() - result = self.vcns_driver.delete_edge( - self.ctx, 'router-id', self.edge_id) - self.assertTrue(result) - - def test_create_lswitch(self): - tz_config = [{ - 'transport_zone_uuid': 'tz-uuid' - }] - lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config) - self.assertEqual('lswitch', lswitch['display_name']) - self.assertEqual('LogicalSwitchConfig', lswitch['type']) - self.assertIn('uuid', lswitch) - - def test_delete_lswitch(self): - tz_config = { - 'transport_zone_uuid': 'tz-uuid' - } - lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config) - self.vcns_driver.delete_lswitch(lswitch['uuid']) - - -class VcnsDriverHATestCase(VcnsDriverTestCase): - - def setUp(self): - # add edge_ha and ha_datastore to the pre-defined configuration - self._data_store = 'fake-datastore' - self._ha_data_store = 'fake-datastore-2' - cfg.CONF.set_override('ha_datastore_id', self._ha_data_store, - group="nsxv") - cfg.CONF.set_override('edge_ha', True, group="nsxv") - - super(VcnsDriverHATestCase, self).setUp() - - self.vcns_driver.vcns.orig_deploy = self.vcns_driver.vcns.deploy_edge - self.vcns_driver.vcns.deploy_edge = self._fake_deploy_edge - - def _fake_deploy_edge(self, request): - # validate the appliance structure in the request, - # and return the regular (fake) response - found_app = request['appliances']['appliances'] - self.assertEqual(2, len(found_app)) - self.assertEqual(self._data_store, found_app[0]['datastoreId']) - self.assertEqual(self._ha_data_store, found_app[1]['datastoreId']) - return self.vcns_driver.vcns.orig_deploy(request) diff --git a/vmware_nsx/tests/unit/services/dynamic_routing/__init__.py b/vmware_nsx/tests/unit/services/dynamic_routing/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vmware_nsx/tests/unit/services/dynamic_routing/test_nsxv_bgp_driver.py b/vmware_nsx/tests/unit/services/dynamic_routing/test_nsxv_bgp_driver.py deleted file mode 100644 index 27d703562d..0000000000 --- a/vmware_nsx/tests/unit/services/dynamic_routing/test_nsxv_bgp_driver.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright 2017 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import contextlib -from unittest import mock - -from neutron.api import extensions -from neutron_dynamic_routing.db import bgp_db # noqa -from neutron_dynamic_routing import extensions as dr_extensions -from neutron_dynamic_routing.extensions import bgp as ext_bgp -from neutron_dynamic_routing.tests.unit.db import test_bgp_db -from neutron_lib.api.definitions import address_scope -from neutron_lib import context -from neutron_lib import exceptions as n_exc -from neutron_lib.plugins import directory - -from vmware_nsx.common import exceptions as exc -from vmware_nsx.db import nsxv_db -from vmware_nsx.plugins.nsx_v.drivers import ( - shared_router_driver as router_driver) -from vmware_nsx.services.dynamic_routing import bgp_plugin -from vmware_nsx.services.dynamic_routing.nsx_v import driver as bgp_driver -from vmware_nsx.tests.unit.nsx_v import test_plugin - -BGP_PLUGIN = 'vmware_nsx.services.dynamic_routing.bgp_plugin.NSXvBgpPlugin' - - -class TestNSXvBgpPlugin(test_plugin.NsxVPluginV2TestCase, - test_bgp_db.BgpTests): - def setUp(self): - extensions.append_api_extensions_path(dr_extensions.__path__) - service_plugins = {'bgp': BGP_PLUGIN} - super(TestNSXvBgpPlugin, self).setUp(service_plugins=service_plugins) - self.bgp_plugin = bgp_plugin.NSXvBgpPlugin() - self.nsxv_driver = self.bgp_plugin.drivers['nsx-v'] - self.nsxv_driver._validate_gateway_network = mock.Mock() - self.nsxv_driver._validate_bgp_configuration_on_peer_esg = ( - mock.Mock()) - self.plugin = directory.get_plugin() - self.l3plugin = self.plugin - self.plugin.init_is_complete = True - self.context = context.get_admin_context() - self.project_id = 'dummy_project' - - @contextlib.contextmanager - def gw_network(self, external=True, **kwargs): - with super(TestNSXvBgpPlugin, self).gw_network(external=external, - **kwargs) as gw_net: - if external: - gw_net['network']['router:external'] = True - gw_net['network'][address_scope.IPV4_ADDRESS_SCOPE] = True - yield gw_net - - @contextlib.contextmanager - def subnet(self, network=None, **kwargs): - if network and network['network'].get('router:external'): - kwargs['gateway_ip'] = None - kwargs['enable_dhcp'] = False - - with super(TestNSXvBgpPlugin, self).subnet(network=network, - **kwargs) as sub: - yield sub - - @contextlib.contextmanager - def router(self, **kwargs): - if 'external_gateway_info' in kwargs: - kwargs['external_gateway_info']['enable_snat'] = False - with super(TestNSXvBgpPlugin, self).router(**kwargs) as r: - yield r - - @contextlib.contextmanager - def esg_bgp_peer(self, esg_id): - data = {'name': '', - 'peer_ip': '192.168.1.10', - 'remote_as': '65000', - 'esg_id': esg_id, - 'auth_type': 'none', - 'password': '', - 'tenant_id': self.project_id} - bgp_peer = self.bgp_plugin.create_bgp_peer(self.context, - {'bgp_peer': data}) - yield bgp_peer - self.bgp_plugin.delete_bgp_peer(self.context, bgp_peer['id']) - - @contextlib.contextmanager - def bgp_speaker(self, ip_version, local_as, name='my-speaker', - advertise_fip_host_routes=True, - advertise_tenant_networks=True, - networks=None, peers=None): - data = {'ip_version': ip_version, - test_bgp_db.ADVERTISE_FIPS_KEY: advertise_fip_host_routes, - 'advertise_tenant_networks': advertise_tenant_networks, - 'local_as': local_as, 'name': name, - 'tenant_id': self.project_id} - bgp_speaker = self.bgp_plugin.create_bgp_speaker(self.context, - {'bgp_speaker': data}) - bgp_speaker_id = bgp_speaker['id'] - - if networks: - for network_id in networks: - self.bgp_plugin.add_gateway_network( - self.context, - bgp_speaker_id, - {'network_id': network_id}) - if peers: - for peer_id in peers: - self.bgp_plugin.add_bgp_peer(self.context, bgp_speaker_id, - {'bgp_peer_id': peer_id}) - - yield self.bgp_plugin.get_bgp_speaker(self.context, bgp_speaker_id) - - def test_get_external_networks_for_port_same_address_scope_v6(self): - self.skipTest("IPv6 not supported by this plugin.") - - def test_get_external_networks_for_port_different_address_scope_v6(self): - self.skipTest("IPv6 not supported by this plugin.") - - def test__get_dvr_fixed_ip_routes_by_bgp_speaker_same_scope(self): - self.skipTest("DVR specific.") - - def test_get_external_networks_for_port_different_address_scope_v4(self): - self.skipTest("DVR specific.") - - def test__get_dvr_fixed_ip_routes_by_bgp_speaker_different_scope(self): - self.skipTest("DVR specific.") - - def test__get_dvr_fixed_ip_routes_by_bgp_speaker_no_scope(self): - self.skipTest("DVR specific.") - - def test_create_v6_bgp_speaker(self): - fake_bgp_speaker = { - "bgp_speaker": { - "ip_version": 6, - "local_as": "1000", - "name": "bgp-speaker", - "tenant_id": self.project_id - } - } - self.assertRaises(n_exc.InvalidInput, - self.bgp_plugin.create_bgp_speaker, - self.context, fake_bgp_speaker) - - def test_create_v6_bgp_peer(self): - fake_bgp_peer = { - "bgp_peer": { - "auth_type": "none", - "remote_as": "1000", - "name": "bgp-peer", - "peer_ip": "fc00::/7", - "tenant_id": self.project_id - } - } - self.assertRaises(n_exc.InvalidInput, - self.bgp_plugin.create_bgp_peer, - self.context, fake_bgp_peer) - - def test_bgp_peer_esg_id(self): - edge_id = 'edge-123' - with self.esg_bgp_peer(esg_id='edge-123') as esg_peer: - self.assertEqual(edge_id, esg_peer['esg_id']) - - peer_id = esg_peer['id'] - bgp_peer = self.bgp_plugin.get_bgp_peer(self.context, peer_id) - self.assertEqual(edge_id, bgp_peer['esg_id']) - - def test_create_bgp_peer_md5_auth_no_password(self): - bgp_peer = {'bgp_peer': - {'auth_type': 'md5', 'password': None, - 'peer_ip': '10.0.0.3', - 'tenant_id': self.project_id}} - self.assertRaises(ext_bgp.InvalidBgpPeerMd5Authentication, - self.bgp_plugin.create_bgp_peer, - self.context, bgp_peer) - - def test_add_non_external_gateway_network(self): - self.nsxv_driver._validate_gateway_network = ( - bgp_driver.NSXvBgpDriver( - self.bgp_plugin)._validate_gateway_network) - with self.gw_network(external=False) as net,\ - self.subnetpool_with_address_scope(4, - prefixes=['8.0.0.0/8']) as sp: - network_id = net['network']['id'] - with self.bgp_speaker(sp['ip_version'], 1234) as speaker: - self.assertRaises(exc.NsxBgpNetworkNotExternal, - self.bgp_plugin.add_gateway_network, - self.context, speaker['id'], - {'network_id': network_id}) - - @mock.patch.object(nsxv_db, 'get_nsxv_bgp_speaker_binding', - return_value={'bgp_identifier': '10.0.0.11'}) - def test_shared_router_on_gateway_clear(self, m1): - with self.gw_network(external=True) as net,\ - self.subnetpool_with_address_scope(4, - prefixes=['10.0.0.0/24']) as sp: - with self.subnet(network=net, - subnetpool_id=sp['id']) as s1,\ - self.bgp_speaker(sp['ip_version'], 1234, - networks=[net['network']['id']]): - subnet_id = s1['subnet']['id'] - gw_info1 = {'network_id': net['network']['id'], - 'external_fixed_ips': [{'ip_address': '10.0.0.11', - 'subnet_id': subnet_id}]} - gw_info2 = {'network_id': net['network']['id'], - 'external_fixed_ips': [{'ip_address': '10.0.0.12', - 'subnet_id': subnet_id}]} - router_obj = router_driver.RouterSharedDriver(self.plugin) - with mock.patch.object(self.plugin, '_find_router_driver', - return_value=router_obj): - with self.router(external_gateway_info=gw_info1) as rtr1,\ - self.router(external_gateway_info=gw_info2) as rtr2,\ - mock.patch.object( - self.nsxv_driver, '_get_router_edge_info', - return_value=('edge-1', False)),\ - mock.patch.object( - self.plugin.edge_manager, - 'get_routers_on_same_edge', - return_value=[rtr1['id'], rtr2['id']]),\ - mock.patch.object( - self.nsxv_driver, - '_update_edge_bgp_identifier') as up_bgp: - gw_clear = {u'router': {u'external_gateway_info': {}}} - self.plugin.update_router(self.context, - rtr1['id'], - gw_clear) - up_bgp.assert_called_once_with(mock.ANY, - mock.ANY, - mock.ANY, - '10.0.0.12') - - def test__bgp_speakers_for_gateway_network_by_ip_version(self): - # REVISIT(roeyc): Base class test use ipv6 which is not supported. - pass - - def test__bgp_speakers_for_gateway_network_by_ip_version_no_binding(self): - # REVISIT(roeyc): Base class test use ipv6 which is not supported. - pass - - def test__tenant_prefixes_by_router_no_gateway_port(self): - # REVISIT(roeyc): Base class test use ipv6 which is not supported. - pass - - def test_all_routes_by_bgp_speaker_different_tenant_address_scope(self): - # REVISIT(roeyc): Base class test use ipv6 which is not supported. - pass - - def test__get_address_scope_ids_for_bgp_speaker(self): - pass - - def test__get_dvr_fip_host_routes_by_binding(self): - pass - - def test__get_dvr_fip_host_routes_by_router(self): - pass - - def test__get_fip_next_hop_dvr(self): - pass - - def test__get_fip_next_hop_legacy(self): - pass - - def test_get_routes_by_bgp_speaker_id_with_fip_dvr(self): - pass - - def test_ha_router_fips_has_no_next_hop_to_fip_agent_gateway(self): - pass - - def test_legacy_router_fips_has_no_next_hop_to_fip_agent_gateway(self): - pass - - def test_floatingip_update_callback(self): - pass - - def test_get_ipv6_tenant_subnet_routes_by_bgp_speaker_ipv6(self): - pass - - def test_get_routes_by_bgp_speaker_id_with_fip(self): - # base class tests uses no-snat router with floating ips - self.skipTest('No SNAT with floating ips not supported') - - def test_get_routes_by_bgp_speaker_binding_with_fip(self): - # base class tests uses no-snat router with floating ips - self.skipTest('No SNAT with floating ips not supported') - - def test__get_routes_by_router_with_fip(self): - # base class tests uses no-snat router with floating ips - self.skipTest('No SNAT with floating ips not supported') - - def test_add_bgp_peer_with_bad_id(self): - with self.subnetpool_with_address_scope( - 4, prefixes=['8.0.0.0/8']) as sp: - with self.bgp_speaker(sp['ip_version'], 1234) as speaker: - self.assertRaises(ext_bgp.BgpPeerNotFound, - self.bgp_plugin.add_bgp_peer, - self.context, - speaker['id'], - {'bgp_peer_id': 'aaa'}) diff --git a/vmware_nsx/tests/unit/services/flowclassifier/__init__.py b/vmware_nsx/tests/unit/services/flowclassifier/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vmware_nsx/tests/unit/services/flowclassifier/test_nsxv_driver.py b/vmware_nsx/tests/unit/services/flowclassifier/test_nsxv_driver.py deleted file mode 100644 index f0e099c3eb..0000000000 --- a/vmware_nsx/tests/unit/services/flowclassifier/test_nsxv_driver.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright 2016 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from oslo_config import cfg -from oslo_utils import importutils - -from neutron.api import extensions as api_ext -from neutron.common import config -from neutron_lib.api.definitions import portbindings -from neutron_lib import context -from neutron_lib.plugins import directory - -from networking_sfc.db import flowclassifier_db as fdb -from networking_sfc.extensions import flowclassifier -from networking_sfc.services.flowclassifier.common import context as fc_ctx -from networking_sfc.services.flowclassifier.common import exceptions as fc_exc -from networking_sfc.tests import base -from networking_sfc.tests.unit.db import test_flowclassifier_db - -from vmware_nsx.services.flowclassifier.nsx_v import driver as nsx_v_driver -from vmware_nsx.tests import unit as vmware -from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns - - -class TestNsxvFlowClassifierDriver( - test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase, - base.NeutronDbPluginV2TestCase): - - resource_prefix_map = dict([ - (k, flowclassifier.FLOW_CLASSIFIER_PREFIX) - for k in flowclassifier.RESOURCE_ATTRIBUTE_MAP.keys() - ]) - - def setUp(self): - # init the flow classifier plugin - flowclassifier_plugin = ( - test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS) - - service_plugins = { - flowclassifier.FLOW_CLASSIFIER_EXT: flowclassifier_plugin - } - fdb.FlowClassifierDbPlugin.supported_extension_aliases = [ - flowclassifier.FLOW_CLASSIFIER_EXT] - fdb.FlowClassifierDbPlugin.path_prefix = ( - flowclassifier.FLOW_CLASSIFIER_PREFIX - ) - - super(TestNsxvFlowClassifierDriver, self).setUp( - ext_mgr=None, - plugin=None, - service_plugins=service_plugins - ) - - self.flowclassifier_plugin = importutils.import_object( - flowclassifier_plugin) - ext_mgr = api_ext.PluginAwareExtensionManager( - test_flowclassifier_db.extensions_path, - { - flowclassifier.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin - } - ) - app = config.load_paste_app('extensions_test_app') - self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) - self.ctx = context.get_admin_context() - - # use the fake vcns - mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) - mock_vcns_instance = mock_vcns.start() - self.fc2 = fake_vcns.FakeVcns() - mock_vcns_instance.return_value = self.fc2 - - # use the nsxv flow classifier driver - self._profile_id = 'serviceprofile-1' - cfg.CONF.set_override('service_insertion_profile_id', - self._profile_id, 'nsxv') - cfg.CONF.set_override('service_insertion_redirect_all', - True, 'nsxv') - - self.driver = nsx_v_driver.NsxvFlowClassifierDriver() - self.driver.initialize() - - self._fc_name = 'test1' - self._fc_description = 'test 1' - self._fc_source = '10.10.0.0/24' - self._fc_dest = '20.10.0.0/24' - self._fc_prot = 'TCP' - self._fc_source_ports = range(100, 115) - self._fc_dest_ports = range(80, 81) - self._fc = {'name': self._fc_name, - 'description': self._fc_description, - 'logical_source_port': None, - 'logical_destination_port': None, - 'source_ip_prefix': self._fc_source, - 'destination_ip_prefix': self._fc_dest, - 'protocol': self._fc_prot, - 'source_port_range_min': self._fc_source_ports[0], - 'source_port_range_max': self._fc_source_ports[-1], - 'destination_port_range_min': self._fc_dest_ports[0], - 'destination_port_range_max': self._fc_dest_ports[-1]} - - def tearDown(self): - super(TestNsxvFlowClassifierDriver, self).tearDown() - - def test_driver_init(self): - self.assertEqual(self._profile_id, self.driver._profile_id) - self.assertEqual(self.driver._security_group_id, '0') - - orig_get_plugin = directory.get_plugin - - def mocked_get_plugin(plugin=None): - # mock only the core plugin - if plugin: - return orig_get_plugin(plugin) - return mock_nsxv_plugin - - mock_nsxv_plugin = mock.Mock() - fc_plugin = directory.get_plugin(flowclassifier.FLOW_CLASSIFIER_EXT) - with mock.patch.object(directory, 'get_plugin', - new=mocked_get_plugin): - with mock.patch.object( - mock_nsxv_plugin, - 'add_vms_to_service_insertion') as fake_add: - with mock.patch.object( - fc_plugin, - 'create_flow_classifier') as fake_create: - self.driver.init_complete(None, None, {}) - # check that the plugin was called to add vms to the - # security group - self.assertTrue(fake_add.called) - # check that redirect_all flow classifier entry - # was created - self.assertTrue(fake_create.called) - - def test_create_flow_classifier_precommit(self): - with self.flow_classifier(flow_classifier=self._fc) as fc: - fc_context = fc_ctx.FlowClassifierContext( - self.flowclassifier_plugin, self.ctx, - fc['flow_classifier'] - ) - # just make sure it does not raise an exception - self.driver.create_flow_classifier_precommit(fc_context) - - def test_create_flow_classifier_precommit_logical_source_port(self): - with self.port( - name='port1', - device_owner='compute', - device_id='test', - arg_list=( - portbindings.HOST_ID, - ), - **{portbindings.HOST_ID: 'test'} - ) as src_port: - with self.flow_classifier(flow_classifier={ - 'name': 'test1', - 'logical_source_port': src_port['port']['id'] - }) as fc: - fc_context = fc_ctx.FlowClassifierContext( - self.flowclassifier_plugin, self.ctx, - fc['flow_classifier'] - ) - self.assertRaises( - fc_exc.FlowClassifierBadRequest, - self.driver.create_flow_classifier_precommit, - fc_context) - - def test_create_flow_classifier_precommit_logical_dest_port(self): - with self.port( - name='port1', - device_owner='compute', - device_id='test', - arg_list=( - portbindings.HOST_ID, - ), - **{portbindings.HOST_ID: 'test'} - ) as dst_port: - with self.flow_classifier(flow_classifier={ - 'name': 'test1', - 'logical_destination_port': dst_port['port']['id'] - }) as fc: - fc_context = fc_ctx.FlowClassifierContext( - self.flowclassifier_plugin, self.ctx, - fc['flow_classifier'] - ) - self.assertRaises( - fc_exc.FlowClassifierBadRequest, - self.driver.create_flow_classifier_precommit, - fc_context) - - def _validate_rule_structure(self, rule): - self.assertEqual(self._fc_description, rule.find('notes').text) - self.assertEqual('ipv4', rule.find('packetType').text) - self.assertEqual( - self._fc_source, - rule.find('sources').find('source').find('value').text) - self.assertEqual( - self._fc_dest, - rule.find('destinations').find('destination').find('value').text) - ports = "%s-%s" % (self._fc_source_ports[0], self._fc_source_ports[-1]) - if self._fc_source_ports[0] == self._fc_source_ports[-1]: - ports = str(self._fc_source_ports[0]) - self.assertEqual( - ports, - rule.find('services').find('service').find('sourcePort').text) - ports = "%s-%s" % (self._fc_dest_ports[0], self._fc_dest_ports[-1]) - if self._fc_dest_ports[0] == self._fc_dest_ports[-1]: - ports = str(self._fc_dest_ports[0]) - self.assertEqual( - ports, - rule.find('services').find('service').find('destinationPort').text) - self.assertEqual( - self._fc_prot, - rule.find('services').find('service').find('protocolName').text) - self.assertTrue(rule.find('name').text.startswith(self._fc_name)) - - def test_create_flow_classifier(self): - with self.flow_classifier(flow_classifier=self._fc) as fc: - fc_context = fc_ctx.FlowClassifierContext( - self.flowclassifier_plugin, self.ctx, - fc['flow_classifier'] - ) - with mock.patch.object( - self.driver, - 'update_redirect_section_in_backed') as mock_update_section: - self.driver.create_flow_classifier(fc_context) - self.assertTrue(mock_update_section.called) - section = mock_update_section.call_args[0][0] - self._validate_rule_structure(section.find('rule')) - - def test_update_flow_classifier(self): - with self.flow_classifier(flow_classifier=self._fc) as fc: - fc_context = fc_ctx.FlowClassifierContext( - self.flowclassifier_plugin, self.ctx, - fc['flow_classifier'] - ) - self.driver.create_flow_classifier(fc_context) - with mock.patch.object( - self.driver, - 'update_redirect_section_in_backed') as mock_update_section: - self.driver.update_flow_classifier(fc_context) - self.assertTrue(mock_update_section.called) - section = mock_update_section.call_args[0][0] - self._validate_rule_structure(section.find('rule')) - - def test_delete_flow_classifier(self): - with self.flow_classifier(flow_classifier=self._fc) as fc: - fc_context = fc_ctx.FlowClassifierContext( - self.flowclassifier_plugin, self.ctx, - fc['flow_classifier'] - ) - self.driver.create_flow_classifier(fc_context) - with mock.patch.object( - self.driver, - 'update_redirect_section_in_backed') as mock_update_section: - self.driver.delete_flow_classifier(fc_context) - self.assertTrue(mock_update_section.called) - section = mock_update_section.call_args[0][0] - # make sure the rule is not there - self.assertIsNone(section.find('rule')) diff --git a/vmware_nsx/tests/unit/services/ipam/test_nsxv_driver.py b/vmware_nsx/tests/unit/services/ipam/test_nsxv_driver.py deleted file mode 100644 index 7df4712b9d..0000000000 --- a/vmware_nsx/tests/unit/services/ipam/test_nsxv_driver.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2016 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from vmware_nsx.tests.unit.nsx_v import test_plugin - -from neutron_lib.api.definitions import provider_net as pnet - - -class TestNsxvIpamSubnets(test_plugin.TestSubnetsV2): - """Run the nsxv plugin subnets tests with the ipam driver""" - def setUp(self): - cfg.CONF.set_override( - "ipam_driver", - "vmware_nsx.services.ipam.nsx_v.driver.NsxvIpamDriver") - super(TestNsxvIpamSubnets, self).setUp() - - def provider_net(self): - name = 'dvs-provider-net' - providernet_args = {pnet.NETWORK_TYPE: 'vlan', - pnet.SEGMENTATION_ID: 43, - pnet.PHYSICAL_NETWORK: 'dvs-uuid'} - return self.network(name=name, do_delete=False, - providernet_args=providernet_args, - arg_list=(pnet.NETWORK_TYPE, - pnet.SEGMENTATION_ID, - pnet.PHYSICAL_NETWORK)) - - def test_provider_net_use_driver(self): - with self.provider_net() as net: - before = len(self.fc2._ipam_pools) - with self.subnet(network=net, cidr='10.10.10.0/29', - enable_dhcp=False): - self.assertEqual(before + 1, len(self.fc2._ipam_pools)) - - def test_ext_net_use_driver(self): - with self.network(router__external=True) as net: - before = len(self.fc2._ipam_pools) - with self.subnet(network=net, cidr='10.10.10.0/29', - enable_dhcp=False): - self.assertEqual(before + 1, len(self.fc2._ipam_pools)) - - def test_regular_net_dont_use_driver(self): - with self.network() as net: - before = len(self.fc2._ipam_pools) - with self.subnet(network=net, cidr='10.10.10.0/29', - enable_dhcp=False): - self.assertEqual(before, len(self.fc2._ipam_pools)) - - def test_no_more_ips(self): - # create a small provider network, and use all the IPs - with self.provider_net() as net: - with self.subnet(network=net, cidr='10.10.10.0/29', - enable_dhcp=False) as subnet: - # create ports on this subnet until there are no more free ips - # legal ips are 10.10.10.2 - 10.10.10.6 - fixed_ips = [{'subnet_id': subnet['subnet']['id']}] - for counter in range(5): - port_res = self._create_port( - self.fmt, net['network']['id'], fixed_ips=fixed_ips) - port = self.deserialize('json', port_res) - self.assertIn('port', port) - - # try to create another one - should fail - port_res = self._create_port( - self.fmt, net['network']['id'], fixed_ips=fixed_ips) - port = self.deserialize('json', port_res) - self.assertIn('NeutronError', port) - self.assertIn('message', port['NeutronError']) - self.assertTrue(('No more IP addresses available' in - port['NeutronError']['message'])) - - def test_use_same_ips(self): - # create a provider network and try to allocate the same ip twice - with self.provider_net() as net: - with self.subnet(network=net, cidr='10.10.10.0/24', - enable_dhcp=False) as subnet: - fixed_ips = [{'ip_address': '10.10.10.2', - 'subnet_id': subnet['subnet']['id']}] - # First port should succeed - port_res = self._create_port( - self.fmt, net['network']['id'], fixed_ips=fixed_ips) - port = self.deserialize('json', port_res) - self.assertIn('port', port) - - # try to create another one - should fail - port_res = self._create_port( - self.fmt, net['network']['id'], fixed_ips=fixed_ips) - port = self.deserialize('json', port_res) - self.assertIn('NeutronError', port) - self.assertIn('message', port['NeutronError']) - self.assertTrue(('already allocated in subnet' in - port['NeutronError']['message'])) - - -class TestNsxvIpamPorts(test_plugin.TestPortsV2): - """Run the nsxv plugin ports tests with the ipam driver""" - def setUp(self): - cfg.CONF.set_override( - "ipam_driver", - "vmware_nsx.services.ipam.nsx_v.driver.NsxvIpamDriver") - super(TestNsxvIpamPorts, self).setUp() diff --git a/vmware_nsx/tests/unit/services/l2gateway/test_nsxv_driver.py b/vmware_nsx/tests/unit/services/l2gateway/test_nsxv_driver.py deleted file mode 100644 index 947485c135..0000000000 --- a/vmware_nsx/tests/unit/services/l2gateway/test_nsxv_driver.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2015 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from unittest import mock - -from networking_l2gw.db.l2gateway import l2gateway_db -from neutron.tests import base -from neutron_lib import context -from neutron_lib import exceptions as n_exc - -from vmware_nsx.common import exceptions as nsx_exc -from vmware_nsx.db import nsxv_db -from vmware_nsx.dvs import dvs_utils -from vmware_nsx.services.l2gateway.nsx_v import driver as nsx_v_driver -from vmware_nsx.tests.unit.nsx_v import test_plugin - -CORE_PLUGIN = "vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2" - - -class TestL2gatewayDriver(base.BaseTestCase): - - def setUp(self): - super(TestL2gatewayDriver, self).setUp() - self.context = context.get_admin_context() - self.plugin = nsx_v_driver.NsxvL2GatewayDriver(mock.MagicMock()) - - def test_validate_device_with_multi_devices(self): - fake_l2gw_dict = {"l2_gateway": - {"tenant_id": "fake__tenant_id", - "name": "fake_l2gw", - "devices": [{"interfaces": - [{"name": "fake_inter"}], - "device_name": "fake_dev"}, - {"interfaces": - [{"name": "fake_inter_1"}], - "device_name": "fake_dev_1"}]}} - with mock.patch.object(l2gateway_db.L2GatewayMixin, '_admin_check'): - self.assertRaises(n_exc.InvalidInput, - self.plugin.create_l2_gateway, - self.context, fake_l2gw_dict) - - def test_validate_interface_with_multi_interfaces(self): - fake_l2gw_dict = {"l2_gateway": - {"tenant_id": "fake_tenant_id", - "name": "fake_l2gw", - "devices": [{"interfaces": - [{"name": "fake_inter_1"}, - {"name": "fake_inter_2"}], - "device_name": "fake_dev"}]}} - with mock.patch.object(l2gateway_db.L2GatewayMixin, '_admin_check'): - self.assertRaises(n_exc.InvalidInput, - self.plugin.create_l2_gateway, - self.context, fake_l2gw_dict) - - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._nsxv') - def test_validate_interface_with_invalid_interfaces(self, _nsxv): - fake_interfaces = [{"name": "fake_inter"}] - _nsxv.vcns.validate_network.return_value = False - self.assertRaises(n_exc.InvalidInput, - self.plugin._validate_interface_list, - self.context, - fake_interfaces) - - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') - def test_create_gw_edge_failure(self, edge_manager): - with mock.patch.object(nsxv_db, - 'get_nsxv_router_binding', - return_value=None): - self.assertRaises(nsx_exc.NsxL2GWDeviceNotFound, - self.plugin._create_l2_gateway_edge, - self.context) - - @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' - 'L2GatewayMixin._admin_check') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._create_l2_gateway_edge') - @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' - 'L2GatewayMixin.create_l2_gateway') - def test_create_l2_gateway_failure(self, create_l2gw, _create_l2gw_edge, - val_inter, val_dev, _admin_check): - fake_l2gw_dict = {"l2_gateway": - {"tenant_id": "fake_teannt_id", - "name": "fake_l2gw", - "devices": [{"interfaces": - [{"name": "fake_inter"}], - "device_name": "fake_dev"}]}} - _create_l2gw_edge.side_effect = nsx_exc.NsxL2GWDeviceNotFound - self.assertRaises(nsx_exc.NsxL2GWDeviceNotFound, - self.plugin.create_l2_gateway, - self.context, fake_l2gw_dict) - - @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' - 'L2GatewayMixin._admin_check') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._create_l2_gateway_edge') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') - def test_create_l2_gateway(self, edge_manager, _create_l2gw_edge, - val_inter, val_dev, _admin_check): - fake_l2gw_dict = {"l2_gateway": - {"tenant_id": "fake_teannt_id", - "name": "fake_l2gw", - "devices": [{"interfaces": - [{"name": "fake_inter"}], - "device_name": "fake_dev"}]}} - fake_devices = [{"interfaces": [{"name": "fake_inter"}], - "device_name": "fake_dev"}] - fake_interfaces = [{"name": "fake_inter"}] - _create_l2gw_edge.return_value = 'fake_dev' - self.plugin.create_l2_gateway(self.context, fake_l2gw_dict) - _admin_check.assert_called_with(self.context, 'CREATE') - val_dev.assert_called_with(fake_devices) - val_inter.assert_called_with(self.context, fake_interfaces) - - @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' - 'L2GatewayMixin._admin_check') - @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' - 'L2GatewayMixin.get_l2_gateway_connection') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._get_device') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._nsxv') - def test_delete_l2_gateway_connection(self, nsxv, get_devices, - get_conn, admin_check): - fake_conn_dict = {'l2_gateway_id': 'fake_l2gw_id'} - fake_device_dict = {'id': 'fake_dev_id', - 'device_name': 'fake_dev_name'} - get_conn.return_value = fake_conn_dict - get_devices.return_value = fake_device_dict - self.plugin.delete_l2_gateway_connection(self.context, fake_conn_dict) - admin_check.assert_called_with(self.context, 'DELETE') - get_conn.assert_called_with(self.context, fake_conn_dict) - get_devices.assert_called_with(self.context, 'fake_l2gw_id') - self.plugin._nsxv().del_bridge.asert_called_with('fake_dev_name') - - @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' - 'L2GatewayMixin._admin_check') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._get_device') - @mock.patch('vmware_nsx.db.' - 'nsxv_db.get_nsxv_router_binding_by_edge') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') - def test_delete_l2_gateway(self, edge_manager, get_nsxv_router, - get_devices, admin_check): - fake_device_dict = {"id": "fake_dev_id", - "device_name": "fake_edge_name", - "l2_gateway_id": "fake_l2gw_id"} - fake_rtr_binding = {"router_id": 'fake_router_id'} - get_devices.return_value = fake_device_dict - get_nsxv_router.return_value = fake_rtr_binding - self.plugin.delete_l2_gateway(self.context, 'fake_l2gw_id') - admin_check.assert_called_with(self.context, 'DELETE') - get_devices.assert_called_with(self.context, 'fake_l2gw_id') - get_nsxv_router.assert_called_with(self.context.session, - "fake_edge_name") - - -class TestL2GatewayDriverRouter(test_plugin.NsxVPluginV2TestCase): - - @mock.patch.object(dvs_utils, 'dvs_create_session') - def setUp(self, *mocks): - # init the nsxv plugin, edge manager and fake vcns - super(TestL2GatewayDriverRouter, self).setUp(plugin=CORE_PLUGIN, - ext_mgr=None) - self.context = context.get_admin_context() - # init the L2 gateway driver - self.driver = nsx_v_driver.NsxvL2GatewayDriver(mock.MagicMock()) - - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') - @mock.patch('vmware_nsx.services.l2gateway.' - 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') - def test_create_l2_gateway_router(self, val_inter, val_dev): - # Verify that creating the router doesn't fail - fake_l2gw_dict = {"l2_gateway": - {"tenant_id": "fake_teannt_id", - "name": "fake_l2gw", - "devices": [{"interfaces": - [{"name": "fake_inter"}], - "device_name": "fake_dev"}]}} - self.driver.create_l2_gateway(self.context, fake_l2gw_dict) - - def test_create_l2_gateway_router_edge(self): - # Verify that the router edge is really created - edge_id = self.driver._create_l2_gateway_edge(self.context) - self.assertEqual('edge-1', edge_id) diff --git a/vmware_nsx/tests/unit/services/qos/test_nsxv_notification.py b/vmware_nsx/tests/unit/services/qos/test_nsxv_notification.py deleted file mode 100644 index baa6363cba..0000000000 --- a/vmware_nsx/tests/unit/services/qos/test_nsxv_notification.py +++ /dev/null @@ -1,340 +0,0 @@ -# Copyright 2016 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -from unittest import mock - -from neutron.services.qos import qos_plugin -from neutron.tests.unit.services.qos import base -from neutron_lib import context -from neutron_lib.objects import registry as obj_reg -from neutron_lib.plugins import directory -from neutron_lib.services.qos import constants as qos_consts -from oslo_config import cfg -from oslo_utils import uuidutils - -from vmware_nsx.dvs import dvs -from vmware_nsx.dvs import dvs_utils -from vmware_nsx.services.qos.common import utils as qos_com_utils -from vmware_nsx.services.qos.nsx_v import driver as qos_driver -from vmware_nsx.services.qos.nsx_v import utils as qos_utils -from vmware_nsx.tests.unit.nsx_v import test_plugin - -CORE_PLUGIN = "vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2" -QosPolicy = obj_reg.load_class('QosPolicy') -QosPolicyDefault = obj_reg.load_class('QosPolicyDefault') -QosBandwidthLimitRule = obj_reg.load_class('QosBandwidthLimitRule') -QosDscpMarkingRule = obj_reg.load_class('QosDscpMarkingRule') - - -class TestQosNsxVNotification(test_plugin.NsxVPluginV2TestCase, - base.BaseQosTestCase): - - @mock.patch.object(dvs_utils, 'dvs_create_session') - def setUp(self, *mocks): - # init the nsx-v plugin for testing with DVS - self._init_dvs_config() - # Reset the drive to re-create it - qos_driver.DRIVER = None - # Skip Octavia init because of RPC conflicts - with mock.patch("vmware_nsx.services.lbaas.octavia.octavia_listener." - "NSXOctaviaListener.__init__", return_value=None),\ - mock.patch("vmware_nsx.services.lbaas.octavia.octavia_listener." - "NSXOctaviaStatisticsCollector.__init__", - return_value=None): - super(TestQosNsxVNotification, self).setUp(plugin=CORE_PLUGIN, - ext_mgr=None, - with_md_proxy=False) - self.setup_coreplugin(CORE_PLUGIN) - - plugin_instance = directory.get_plugin() - self._core_plugin = plugin_instance - self._core_plugin.init_is_complete = True - - self.qos_plugin = qos_plugin.QoSPlugin() - mock.patch.object(qos_utils.NsxVQosRule, - '_get_qos_plugin', - return_value=self.qos_plugin).start() - - # Pre defined QoS data for the tests - self.test_tenant_id = '1d7ddf4daf1f47529b5cc93b2e843980' - self.ctxt = context.Context('fake_user', self.test_tenant_id) - - self.policy_data = { - 'policy': {'id': uuidutils.generate_uuid(), - 'project_id': self.test_tenant_id, - 'name': 'test-policy', - 'description': 'Test policy description', - 'shared': True}} - - self.rule_data = { - 'bandwidth_limit_rule': { - 'id': uuidutils.generate_uuid(), - 'max_kbps': 100, - 'max_burst_kbps': 150, - 'type': qos_consts.RULE_TYPE_BANDWIDTH_LIMIT}} - self.ingress_rule_data = { - 'bandwidth_limit_rule': { - 'id': uuidutils.generate_uuid(), - 'max_kbps': 200, - 'max_burst_kbps': 250, - 'direction': 'ingress', - 'type': qos_consts.RULE_TYPE_BANDWIDTH_LIMIT}} - self.dscp_rule_data = { - 'dscp_marking_rule': { - 'id': uuidutils.generate_uuid(), - 'dscp_mark': 22, - 'type': qos_consts.RULE_TYPE_DSCP_MARKING}} - - self.policy = QosPolicy( - self.ctxt, **self.policy_data['policy']) - - # egress bw rule - self.rule = QosBandwidthLimitRule( - self.ctxt, **self.rule_data['bandwidth_limit_rule']) - # ingress bw rule - self.ingress_rule = QosBandwidthLimitRule( - self.ctxt, **self.ingress_rule_data['bandwidth_limit_rule']) - # dscp marking rule - self.dscp_rule = QosDscpMarkingRule( - self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) - - self._net_data = {'network': { - 'name': 'test-qos', - 'tenant_id': self.test_tenant_id, - 'qos_policy_id': self.policy.id, - 'port_security_enabled': False, - 'admin_state_up': False, - 'shared': False - }} - self._rules = [self.rule_data['bandwidth_limit_rule']] - self._dscp_rules = [self.dscp_rule_data['dscp_marking_rule']] - - mock.patch.object(QosPolicy, 'obj_load_attr').start() - - def _init_dvs_config(self): - # Ensure that DVS is enabled - # and enable the DVS features for nsxv qos support - cfg.CONF.set_override('host_ip', 'fake_ip', group='dvs') - cfg.CONF.set_override('host_username', 'fake_user', group='dvs') - cfg.CONF.set_override('host_password', 'fake_password', group='dvs') - cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') - cfg.CONF.set_default('use_dvs_features', True, 'nsxv') - - def _create_net(self, net_data=None): - if net_data is None: - net_data = self._net_data - net_data['tenant_id'] = self.test_tenant_id - with mock.patch('vmware_nsx.services.qos.common.utils.' - 'get_network_policy_id', - return_value=self.policy.id): - return self._core_plugin.create_network(self.ctxt, net_data) - - @mock.patch.object(qos_com_utils, 'update_network_policy_binding') - @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') - def test_create_network_with_policy_rule(self, - dvs_update_mock, - update_bindings_mock): - """Test the DVS update when a QoS rule is attached to a network""" - # Create a policy with a rule - _policy = QosPolicy( - self.ctxt, **self.policy_data['policy']) - setattr(_policy, "rules", [self.rule, self.ingress_rule, - self.dscp_rule]) - - with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' - 'get_policy', - return_value=_policy) as get_rules_mock,\ - mock.patch.object(self.plugin, '_validate_qos_policy_id'): - # create the network to use this policy - net = self._create_net() - - # make sure the network-policy binding was updated - update_bindings_mock.assert_called_once_with( - self.ctxt, net['id'], self.policy.id) - # make sure the qos rule was found - get_rules_mock.assert_called_with(self.ctxt, self.policy.id) - # make sure the dvs was updated - self.assertTrue(dvs_update_mock.called) - - @mock.patch.object(qos_com_utils, 'update_network_policy_binding') - @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') - def test_create_network_with_default_policy(self, - dvs_update_mock, - update_bindings_mock): - """Test the DVS update when default policy attached to a network""" - # Create a default policy with a rule - policy_data = copy.deepcopy(self.policy_data['policy']) - policy_data['is_default'] = True - _policy = QosPolicy(self.ctxt, **policy_data) - setattr(_policy, "rules", [self.rule, self.dscp_rule]) - default_policy = QosPolicyDefault( - qos_policy_id=policy_data['id']) - - with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' - 'get_policy', - return_value=_policy) as get_rules_mock,\ - mock.patch.object( - QosPolicyDefault, 'get_object', return_value=default_policy): - # create the network (with no specific qos policy) - net_data = copy.deepcopy(self._net_data) - del net_data['network']['qos_policy_id'] - net = self._create_net(net_data=net_data) - - # make sure the network-policy binding was updated - update_bindings_mock.assert_called_once_with( - self.ctxt, net['id'], self.policy.id) - # make sure the qos rule was found - get_rules_mock.assert_called_with(self.ctxt, self.policy.id) - # make sure the dvs was updated - self.assertTrue(dvs_update_mock.called) - - def _test_rule_action_notification(self, action): - # Create a policy with a rule - _policy = QosPolicy( - self.ctxt, **self.policy_data['policy']) - - # set the rule in the policy data - setattr(_policy, "rules", [self.rule]) - - with mock.patch.object(qos_com_utils, - 'update_network_policy_binding'),\ - mock.patch.object(dvs.DvsManager, 'update_port_groups_config' - ) as dvs_update_mock,\ - mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' - 'get_policy', - return_value=_policy) as get_rules_mock,\ - mock.patch.object(QosPolicy, 'get_object', return_value=_policy): - # create the network to use this policy - net = self._create_net() - dvs_update_mock.called = False - get_rules_mock.called = False - - with mock.patch('neutron.objects.db.api.create_object', - return_value=self.rule_data),\ - mock.patch('neutron.objects.db.api.update_object', - return_value=self.rule_data),\ - mock.patch('neutron.objects.db.api.delete_object'),\ - mock.patch.object(_policy, 'get_bound_networks', - return_value=[net['id']]),\ - mock.patch.object(self.ctxt.session, 'expunge'): - - # create/update/delete the rule - if action == 'create': - self.qos_plugin.create_policy_bandwidth_limit_rule( - self.ctxt, self.policy.id, self.rule_data) - elif action == 'update': - self.qos_plugin.update_policy_bandwidth_limit_rule( - self.ctxt, self.rule.id, - self.policy.id, self.rule_data) - else: - self.qos_plugin.delete_policy_bandwidth_limit_rule( - self.ctxt, self.rule.id, self.policy.id) - - # make sure the qos rule was found - self.assertTrue(get_rules_mock.called) - # make sure the dvs was updated - self.assertTrue(dvs_update_mock.called) - - def test_create_rule_notification(self): - """Test the DVS update when a QoS rule, attached to a network, - is created - """ - self._test_rule_action_notification('create') - - def test_update_rule_notification(self): - """Test the DVS update when a QoS rule, attached to a network, - is modified - """ - self._test_rule_action_notification('update') - - def test_delete_rule_notification(self): - """Test the DVS update when a QoS rule, attached to a network, - is deleted - """ - self._test_rule_action_notification('delete') - - def _test_dscp_rule_action_notification(self, action): - # Create a policy with a rule - _policy = QosPolicy( - self.ctxt, **self.policy_data['policy']) - - # set the rule in the policy data - setattr(_policy, "rules", [self.dscp_rule]) - plugin = self.qos_plugin - with mock.patch.object(qos_com_utils, - 'update_network_policy_binding'),\ - mock.patch.object(dvs.DvsManager, 'update_port_groups_config' - ) as dvs_update_mock,\ - mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' - 'get_policy', - return_value=_policy) as rules_mock,\ - mock.patch.object(QosPolicy, 'get_object', - return_value=_policy),\ - mock.patch.object(self.ctxt.session, 'expunge'): - # create the network to use this policy - net = self._create_net() - dvs_update_mock.called = False - rules_mock.called = False - - with mock.patch('neutron.objects.db.api.create_object', - return_value=self.dscp_rule_data),\ - mock.patch('neutron.objects.db.api.update_object', - return_value=self.dscp_rule_data),\ - mock.patch('neutron.objects.db.api.delete_object'),\ - mock.patch.object(_policy, 'get_bound_networks', - return_value=[net['id']]),\ - mock.patch.object(self.ctxt.session, 'expunge'): - - # create/update/delete the rule - if action == 'create': - plugin.create_policy_dscp_marking_rule( - self.ctxt, - self.policy.id, - self.dscp_rule_data) - elif action == 'update': - plugin.update_policy_dscp_marking_rule( - self.ctxt, - self.dscp_rule.id, - self.policy.id, - self.dscp_rule_data) - else: - plugin.delete_policy_dscp_marking_rule( - self.ctxt, - self.dscp_rule.id, - self.policy.id) - - # make sure the qos rule was found - self.assertTrue(rules_mock.called) - - # make sure the dvs was updated - self.assertTrue(dvs_update_mock.called) - - def test_create_dscp_rule_notification(self): - """Test the DVS update when a QoS DSCP rule, attached to a network, - is created - """ - self._test_dscp_rule_action_notification('create') - - def test_update_dscp_rule_notification(self): - """Test the DVS update when a QoS DSCP rule, attached to a network, - is modified - """ - self._test_dscp_rule_action_notification('update') - - def test_delete_dscp_rule_notification(self): - """Test the DVS update when a QoS DSCP rule, attached to a network, - is deleted - """ - self._test_dscp_rule_action_notification('delete') diff --git a/vmware_nsx/tests/unit/services/vpnaas/test_nsxv_vpnaas.py b/vmware_nsx/tests/unit/services/vpnaas/test_nsxv_vpnaas.py deleted file mode 100644 index 446dcd41eb..0000000000 --- a/vmware_nsx/tests/unit/services/vpnaas/test_nsxv_vpnaas.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2016 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib - -from unittest import mock - -from neutron_lib.api.definitions import external_net as extnet_apidef -from neutron_lib import context -from neutron_lib.plugins import directory -from neutron_vpnaas.db.vpn import vpn_models # noqa -from neutron_vpnaas.extensions import vpnaas -from oslo_utils import uuidutils - -from vmware_nsx.common import exceptions as nsxv_exc -from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc -from vmware_nsx.services.vpnaas.nsxv import ipsec_driver -from vmware_nsx.tests.unit.nsx_v import test_plugin - - -_uuid = uuidutils.generate_uuid - -DRIVER_PATH = "vmware_nsx.services.vpnaas.nsxv.ipsec_driver.NSXvIPsecVpnDriver" -VALI_PATH = "vmware_nsx.services.vpnaas.nsxv.ipsec_validator.IPsecValidator" -FAKE_ROUTER_ID = "aaaaaa-bbbbb-ccc" -FAKE_VPNSERVICE_ID = _uuid() -FAKE_IPSEC_CONNECTION = {"vpnservice_id": FAKE_VPNSERVICE_ID, - "id": _uuid()} -FAKE_EDGE_ID = _uuid() -FAKE_IPSEC_VPN_SITE = {"peerIp": "192.168.1.1"} -FAKE_VCNSAPIEXC = {"status": "fail", - "head": "fake_head", - "response": "error"} -FAKE_NEW_CONNECTION = {"peer_cidrs": "192.168.1.0/24"} - - -class TestVpnaasDriver(test_plugin.NsxVPluginV2TestCase): - - def setUp(self): - super(TestVpnaasDriver, self).setUp() - self.context = context.get_admin_context() - self.service_plugin = mock.Mock() - self.validator = mock.Mock() - self.driver = ipsec_driver.NSXvIPsecVpnDriver(self.service_plugin) - self.plugin = directory.get_plugin() - self.l3plugin = self.plugin - - @contextlib.contextmanager - def router(self, name='vpn-test-router', tenant_id=_uuid(), - admin_state_up=True, **kwargs): - request = {'router': {'tenant_id': tenant_id, - 'name': name, - 'admin_state_up': admin_state_up}} - for arg in kwargs: - request['router'][arg] = kwargs[arg] - router = self.l3plugin.create_router(self.context, request) - yield router - - @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) - @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) - @mock.patch('%s._generate_new_sites' % DRIVER_PATH) - @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) - @mock.patch('%s._update_status' % DRIVER_PATH) - @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) - def test_create_ipsec_site_connection(self, mock_update_fw, - mock_update_status, - mock_update_ipsec, mock_gen_new, - mock_get_id, - mock_conv_ipsec): - mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) - mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE - mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE - self.driver.create_ipsec_site_connection(self.context, - FAKE_IPSEC_CONNECTION) - mock_conv_ipsec.assert_called_with(self.context, - FAKE_IPSEC_CONNECTION) - mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) - mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) - mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, - FAKE_IPSEC_VPN_SITE, - enabled=True) - mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) - mock_update_status.assert_called_with( - self.context, - FAKE_IPSEC_CONNECTION["vpnservice_id"], - FAKE_IPSEC_CONNECTION["id"], - "ACTIVE") - - @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) - @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) - @mock.patch('%s._generate_new_sites' % DRIVER_PATH) - @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) - @mock.patch('%s._update_status' % DRIVER_PATH) - def test_create_ipsec_site_connection_fail(self, - mock_update_status, - mock_update_ipsec, - mock_gen_new, mock_get_id, - mock_conv_ipsec): - mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) - mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE - mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE - mock_update_ipsec.side_effect = ( - vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) - self.assertRaises(nsxv_exc.NsxPluginException, - self.driver.create_ipsec_site_connection, - self.context, FAKE_IPSEC_CONNECTION) - mock_conv_ipsec.assert_called_with(self.context, FAKE_IPSEC_CONNECTION) - mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) - mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) - mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, - FAKE_IPSEC_VPN_SITE, - enabled=True) - mock_update_status.assert_called_with( - self.context, - FAKE_IPSEC_CONNECTION["vpnservice_id"], - FAKE_IPSEC_CONNECTION["id"], - "ERROR") - - @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) - @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) - @mock.patch('%s._generate_new_sites' % DRIVER_PATH) - @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) - @mock.patch('%s._update_status' % DRIVER_PATH) - @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) - def test_update_fw_fail(self, mock_update_fw, mock_update_status, - mock_update_ipsec, mock_gen_new, - mock_get_id, mock_conv_ipsec): - mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) - mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE - mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE - mock_update_fw.side_effect = ( - vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) - self.assertRaises(nsxv_exc.NsxPluginException, - self.driver.create_ipsec_site_connection, - self.context, FAKE_IPSEC_CONNECTION) - mock_conv_ipsec.assert_called_with(self.context, FAKE_IPSEC_CONNECTION) - mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) - mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) - mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, - FAKE_IPSEC_VPN_SITE, - enabled=True) - mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) - mock_update_status.assert_called_with( - self.context, - FAKE_IPSEC_CONNECTION["vpnservice_id"], - FAKE_IPSEC_CONNECTION["id"], - "ERROR") - - @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) - @mock.patch('%s._update_site_dict' % DRIVER_PATH) - @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) - @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) - def test_update_ipsec(self, mock_update_fw, mock_update_ipsec, - mock_update_sites, mock_get_id): - mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) - mock_update_sites.return_value = FAKE_IPSEC_VPN_SITE - self.driver.update_ipsec_site_connection(self.context, - FAKE_IPSEC_CONNECTION, - FAKE_NEW_CONNECTION) - mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, - FAKE_IPSEC_CONNECTION, - FAKE_NEW_CONNECTION) - mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) - mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) - - @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) - @mock.patch('%s._update_site_dict' % DRIVER_PATH) - @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) - @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) - def test_update_ipsec_fail_with_notfound(self, mock_update_fw, - mock_update_ipsec, - mock_update_sites, mock_get_id): - mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) - mock_update_sites.return_value = {} - self.assertRaises(nsxv_exc.NsxIPsecVpnMappingNotFound, - self.driver.update_ipsec_site_connection, - self.context, FAKE_IPSEC_CONNECTION, - FAKE_NEW_CONNECTION) - mock_update_sites.assert_called_with(self.context, - FAKE_EDGE_ID, - FAKE_IPSEC_CONNECTION, - FAKE_NEW_CONNECTION) - - @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) - @mock.patch('%s._update_site_dict' % DRIVER_PATH) - @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) - @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) - def test_update_ipsec_fail_with_fw_fail(self, mock_update_fw, - mock_update_ipsec, - mock_update_sites, mock_get_id): - mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) - mock_update_fw.side_effect = ( - vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) - self.assertRaises(nsxv_exc.NsxPluginException, - self.driver.update_ipsec_site_connection, - self.context, FAKE_IPSEC_CONNECTION, - FAKE_NEW_CONNECTION) - mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, - FAKE_IPSEC_CONNECTION, - FAKE_NEW_CONNECTION) - mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) - - @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) - @mock.patch('%s._update_site_dict' % DRIVER_PATH) - @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) - @mock.patch('%s._update_status' % DRIVER_PATH) - def test_update_ipsec_fail_with_site_fail(self, mock_update_status, - mock_update_ipsec, - mock_update_sites, mock_get_id): - mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) - mock_update_sites.return_value = FAKE_IPSEC_VPN_SITE - mock_update_ipsec.side_effect = ( - vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) - self.assertRaises(nsxv_exc.NsxPluginException, - self.driver.update_ipsec_site_connection, - self.context, - FAKE_IPSEC_CONNECTION, - FAKE_NEW_CONNECTION) - mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, - FAKE_IPSEC_CONNECTION, - FAKE_NEW_CONNECTION) - mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, - FAKE_IPSEC_VPN_SITE) - mock_update_status.assert_called_with( - self.context, - FAKE_IPSEC_CONNECTION["vpnservice_id"], - FAKE_IPSEC_CONNECTION["id"], - "ERROR") - - def test_create_vpn_service_legal(self): - """Create a legal vpn service""" - # create an external network with a subnet, and an exclusive router - providernet_args = {extnet_apidef.EXTERNAL: True} - with self.network(name='ext-net', - providernet_args=providernet_args, - arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ - self.subnet(ext_net),\ - self.router(router_type='exclusive', - external_gateway_info={'network_id': - ext_net['network']['id']}) as router,\ - self.subnet(cidr="20.0.0.0/24") as sub: - # add an interface to the router - self.l3plugin.add_router_interface( - self.context, - router['id'], - {'subnet_id': sub['subnet']['id']}) - # create the service - vpnservice = {'router_id': router['id'], - 'id': _uuid(), - 'subnet_id': sub['subnet']['id']} - with mock.patch.object(self.driver, '_get_gateway_ips', - return_value=(None, None)): - self.driver.create_vpnservice(self.context, vpnservice) - - def test_create_vpn_service_on_shared_router(self): - """Creating a service with shared router is not allowed""" - # create an external network with a subnet, and a shared router - providernet_args = {extnet_apidef.EXTERNAL: True} - with self.network(name='ext-net', - providernet_args=providernet_args, - arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ - self.subnet(ext_net),\ - self.router(router_type='shared', - external_gateway_info={'network_id': - ext_net['network']['id']}) as router,\ - self.subnet(cidr="20.0.0.0/24") as sub: - # add an interface to the router - self.l3plugin.add_router_interface( - self.context, - router['id'], - {'subnet_id': sub['subnet']['id']}) - # create the service - vpnservice = {'router_id': router['id'], - 'id': _uuid(), - 'subnet_id': sub['subnet']['id']} - self.assertRaises(nsxv_exc.NsxPluginException, - self.driver.create_vpnservice, - self.context, vpnservice) - - def test_create_vpn_service_on_router_without_if(self): - """Creating a service with unattached subnet is not allowed""" - # create an external network with a subnet, and an exclusive router - providernet_args = {extnet_apidef.EXTERNAL: True} - with self.network(name='ext-net', - providernet_args=providernet_args, - arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ - self.subnet(ext_net),\ - self.router(router_type='exclusive', - external_gateway_info={'network_id': - ext_net['network']['id']}) as router,\ - self.subnet() as sub: - # create the service - vpnservice = {'router_id': router['id'], - 'id': _uuid(), - 'subnet_id': sub['subnet']['id']} - self.assertRaises(vpnaas.SubnetIsNotConnectedToRouter, - self.driver.create_vpnservice, - self.context, vpnservice) - - def test_create_vpn_service_without_subnet(self): - """Creating a service without a subnet is not allowed""" - # create an external network with a subnet, and an exclusive router - providernet_args = {extnet_apidef.EXTERNAL: True} - with self.network(name='ext-net', - providernet_args=providernet_args, - arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ - self.subnet(ext_net),\ - self.router(router_type='exclusive', - external_gateway_info={'network_id': - ext_net['network']['id']}) as router,\ - self.subnet(cidr="20.0.0.0/24") as sub: - # add an interface to the router - self.l3plugin.add_router_interface( - self.context, - router['id'], - {'subnet_id': sub['subnet']['id']}) - # create the service without the subnet - vpnservice = {'router_id': router['id'], - 'id': _uuid(), - 'subnet_id': None} - self.assertRaises(nsxv_exc.NsxPluginException, - self.driver.create_vpnservice, - self.context, vpnservice) diff --git a/vmware_nsx/tests/unit/shell/test_admin_utils.py b/vmware_nsx/tests/unit/shell/test_admin_utils.py index 6b708d4073..5635d9e2d7 100644 --- a/vmware_nsx/tests/unit/shell/test_admin_utils.py +++ b/vmware_nsx/tests/unit/shell/test_admin_utils.py @@ -27,22 +27,17 @@ from neutron.db import servicetype_db # noqa from neutron.quota import resource_registry from neutron.tests import base from neutron_lib.callbacks import registry -from neutron_lib.plugins import constants from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa -from vmware_nsx.db import nsxv_db -from vmware_nsx.dvs import dvs_utils from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as nsxp_utils -from vmware_nsx.shell.admin.plugins.nsxv.resources import migration from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as nsxv_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as nsxv3_utils from vmware_nsx.shell import resources from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_p import test_plugin as test_p_plugin -from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin from vmware_nsxlib.v3 import client as v3_client from vmware_nsxlib.v3 import core_resources @@ -125,177 +120,6 @@ class AbstractTestAdminUtils(base.BaseTestCase, metaclass=abc.ABCMeta): return self._plugin.create_router(self.edgeapi.context, data) -class TestNsxvAdminUtils(AbstractTestAdminUtils, - test_v_plugin.NsxVPluginV2TestCase): - - def _get_plugin_name(self): - return 'nsxv' - - def _init_mock_plugin(self, *mocks): - super(TestNsxvAdminUtils, self)._init_mock_plugin() - - # support the dvs manager: - mock.patch.object(dvs_utils, 'dvs_create_session').start() - # override metadata get-object - dummy_lb = { - 'enabled': True, - 'enableServiceInsertion': True, - 'accelerationEnabled': True, - 'virtualServer': [], - 'applicationProfile': [], - 'pool': [], - 'applicationRule': [] - } - mock.patch('vmware_nsx.plugins.nsx_v.vshield.nsxv_edge_cfg_obj.' - 'NsxvEdgeCfgObj.get_object', - return_value=dummy_lb).start() - - # Tests shouldn't wait for dummy spawn jobs to finish - mock.patch('vmware_nsx.shell.admin.plugins.nsxv.resources.utils.' - 'NsxVPluginWrapper.count_spawn_jobs', - return_value=0).start() - - self._plugin = nsxv_utils.NsxVPluginWrapper() - - def get_plugin_mock(alias=constants.CORE): - if alias in (constants.CORE, constants.L3): - return self._plugin - - mock.patch("neutron_lib.plugins.directory.get_plugin", - side_effect=get_plugin_mock).start() - - # Create a router to make sure we have deployed an edge - self.router = self._create_router() - self.dist_router = self._create_router(dist=True) - self.network = self._create_net() - - def tearDown(self): - if self.router and self.router.get('id'): - self._plugin.delete_router( - self.edgeapi.context, self.router['id']) - if self.dist_router and self.dist_router.get('id'): - self._plugin.delete_router( - self.edgeapi.context, self.dist_router['id']) - if self.network and self.network.get('id'): - self._plugin.delete_network( - self.edgeapi.context, self.network['id']) - super(TestNsxvAdminUtils, self).tearDown() - - def test_nsxv_resources(self): - self._test_resources(resources.nsxv_resources) - - def _test_edge_nsx_update(self, edge_id, params): - args = {'property': ["edge-id=%s" % edge_id]} - args['property'].extend(params) - self._test_resource('edges', 'nsx-update', **args) - - def _create_router(self, dist=False): - # Create an exclusive router (with an edge) - tenant_id = uuidutils.generate_uuid() - data = {'router': {'tenant_id': tenant_id}} - data['router']['name'] = 'dummy' - data['router']['admin_state_up'] = True - if dist: - data['router']['distributes'] = True - else: - data['router']['router_type'] = 'exclusive' - - return self._plugin.create_router(self.edgeapi.context, data) - - def _create_net(self): - tenant_id = uuidutils.generate_uuid() - data = {'network': {'tenant_id': tenant_id, - 'name': 'dummy', - 'admin_state_up': True, - 'shared': False}} - net = self._plugin.create_network(self.edgeapi.context, data) - data = {'subnet': {'tenant_id': tenant_id, - 'name': 'dummy', - 'admin_state_up': True, - 'network_id': net['id'], - 'cidr': '1.1.1.0/16', - 'enable_dhcp': True, - 'ip_version': 4, - 'dns_nameservers': None, - 'host_routes': None, - 'allocation_pools': None}} - self._plugin.create_subnet(self.edgeapi.context, data) - return net - - def get_edge_id(self): - bindings = nsxv_db.get_nsxv_router_bindings( - self.edgeapi.context.session) - for binding in bindings: - if binding.edge_id: - return binding.edge_id - # use a dummy edge - return "edge-1" - - def test_edge_nsx_updates(self): - """Test eges/nsx-update utility with different inputs.""" - edge_id = self.get_edge_id() - self._test_edge_nsx_update(edge_id, ["appliances=true"]) - self._test_edge_nsx_update(edge_id, ["size=compact"]) - self._test_edge_nsx_update(edge_id, ["hostgroup=update"]) - self._test_edge_nsx_update(edge_id, ["hostgroup=all"]) - self._test_edge_nsx_update(edge_id, ["hostgroup=clean"]) - self._test_edge_nsx_update(edge_id, ["highavailability=True"]) - self._test_edge_nsx_update(edge_id, ["resource=cpu", "limit=100"]) - self._test_edge_nsx_update(edge_id, ["syslog-server=1.1.1.1", - "syslog-proto=tcp", - "log-level=debug"]) - - def test_bad_args(self): - args = {'property': ["xxx"]} - errors = self._test_resource_with_errors( - 'networks', 'nsx-update', **args) - self.assertEqual(1, len(errors)) - - def test_resources_with_common_args(self): - """Run all nsxv admin utilities with some common arguments - - Using arguments like edge-id which many apis need - This improves the test coverage - """ - edge_id = self.get_edge_id() - args = ["edge-id=%s" % edge_id, - "router-id=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", - "policy-id=1", - "network_id=net-1", - "net-id=net-1", - "network=net-1", - "port=port-1", - "security-group-id=sg-1", - "dvs-id=dvs-1", - "moref=virtualwire-1", - "teamingpolicy=LACP_ACTIVE", - "log-allowed-traffic=true", - "az-name=default", - "transit-network=abc", - "moref=abc", - ] - self._test_resources_with_args( - resources.nsxv_resources, args) - - def test_router_recreate(self): - # Testing router-recreate separately because it may change the edge-id - edge_id = self.get_edge_id() - args = {'property': ["edge-id=%s" % edge_id]} - self._test_resource('routers', 'nsx-recreate', **args) - - def test_migration_validation(self): - # check that validation fails - payload = admin_utils.MetadataEventPayload( - {'property': ["transit-network=1.1.1.0/24"]}) - try: - migration.validate_config_for_migration( - 'nsx-migrate-v2t', 'validate', None, payload) - except SystemExit: - return - else: - self.fail() - - class TestNsxv3AdminUtils(AbstractTestAdminUtils, test_v3_plugin.NsxV3PluginTestCaseMixin):