From 445384dde7299cd709fd095cd648081945ada5d4 Mon Sep 17 00:00:00 2001
From: Adit Sarfaty <asarfaty@vmware.com>
Date: Mon, 4 Sep 2017 15:27:20 +0300
Subject: [PATCH] NSX|v3: provider networks updates

- Deprecate the "vxlan" type
- Add the "geneve" type: with overlay transport zone
- Add the "nsx-net" type: attach an existing nsx logical switch
(vlan or overlay) to a neutron network.

In addition, this patch adds unit tests to all provider networks types.

Change-Id: I48a35c913c08ea4afcca64ed2e13db41260b95a3
---
 ...pdate-provider-types-aa1c20e988878ffe.yaml |   8 +
 vmware_nsx/common/utils.py                    |   3 +-
 .../alembic_migrations/versions/CONTRACT_HEAD |   2 +-
 .../a1be06050b41_update_nsx_binding_types.py  |  60 ++++++
 vmware_nsx/db/nsx_models.py                   |   4 +-
 vmware_nsx/plugins/nsx_v3/plugin.py           | 128 +++++++++----
 vmware_nsx/tests/unit/nsx_v3/test_plugin.py   | 173 +++++++++++++++++-
 7 files changed, 333 insertions(+), 45 deletions(-)
 create mode 100644 releasenotes/notes/nsxv3-update-provider-types-aa1c20e988878ffe.yaml
 create mode 100644 vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_update_nsx_binding_types.py

diff --git a/releasenotes/notes/nsxv3-update-provider-types-aa1c20e988878ffe.yaml b/releasenotes/notes/nsxv3-update-provider-types-aa1c20e988878ffe.yaml
new file mode 100644
index 0000000000..24d9d239c2
--- /dev/null
+++ b/releasenotes/notes/nsxv3-update-provider-types-aa1c20e988878ffe.yaml
@@ -0,0 +1,8 @@
+---
+prelude: >
+    Adding support for Geneve and nSX-network provider networks.
+features:
+  - |
+  Deprecating the VXLAN provider network type.
+  Adding Geneve provider networks (with overlay transport zone).
+  Adding nsx-net provider networks attached to an existing nsx
diff --git a/vmware_nsx/common/utils.py b/vmware_nsx/common/utils.py
index 6fd2d034e3..f2dbbbfec9 100644
--- a/vmware_nsx/common/utils.py
+++ b/vmware_nsx/common/utils.py
@@ -67,7 +67,8 @@ class NsxV3NetworkTypes(object):
     """Allowed provider network types for the NSXv3 Plugin."""
     FLAT = 'flat'
     VLAN = 'vlan'
-    VXLAN = 'vxlan'
+    GENEVE = 'geneve'
+    NSX_NETWORK = 'nsx-net'
 
 
 def is_nsx_version_1_1_0(nsx_version):
diff --git a/vmware_nsx/db/migration/alembic_migrations/versions/CONTRACT_HEAD b/vmware_nsx/db/migration/alembic_migrations/versions/CONTRACT_HEAD
index 74d759e164..2da64c137b 100644
--- a/vmware_nsx/db/migration/alembic_migrations/versions/CONTRACT_HEAD
+++ b/vmware_nsx/db/migration/alembic_migrations/versions/CONTRACT_HEAD
@@ -1 +1 @@
-84ceffa27115
\ No newline at end of file
+a1be06050b41
\ No newline at end of file
diff --git a/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_update_nsx_binding_types.py b/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_update_nsx_binding_types.py
new file mode 100644
index 0000000000..76ec48792f
--- /dev/null
+++ b/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_update_nsx_binding_types.py
@@ -0,0 +1,60 @@
+# Copyright 2017 VMware, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""update nsx binding types
+
+Revision ID: a1be06050b41
+Revises: 84ceffa27115
+Create Date: 2017-09-04 23:58:22.003350
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'a1be06050b41'
+down_revision = '84ceffa27115'
+depends_on = ('aede17d51d0f')
+
+from alembic import op
+import sqlalchemy as sa
+
+from neutron.db import migration as neutron_op
+
+
+all_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
+                                   'vxlan', 'geneve', 'portgroup', 'nsx-net',
+                                   name='tz_network_bindings_binding_type')
+
+new_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
+                                   'geneve', 'portgroup', 'nsx-net',
+                                   name='tz_network_bindings_binding_type')
+
+
+def upgrade():
+    # add the new network types to the enum
+    neutron_op.alter_enum_add_value(
+        'tz_network_bindings',
+        'binding_type',
+        all_tz_binding_type_enum,
+        False)
+
+    # change existing entries with type 'vxlan' to 'geneve'
+    op.execute("UPDATE tz_network_bindings SET binding_type='geneve' "
+               "where binding_type='vxlan'")
+
+    # remove 'vxlan' from the enum
+    op.alter_column(
+        'tz_network_bindings',
+        'binding_type',
+        type_=new_tz_binding_type_enum,
+        existing_type=all_tz_binding_type_enum,
+        existing_nullable=False)
diff --git a/vmware_nsx/db/nsx_models.py b/vmware_nsx/db/nsx_models.py
index 3e7cd354c1..a9dfff7616 100644
--- a/vmware_nsx/db/nsx_models.py
+++ b/vmware_nsx/db/nsx_models.py
@@ -43,9 +43,9 @@ class TzNetworkBinding(model_base.BASEV2, models.TimestampMixin):
     network_id = sa.Column(sa.String(36),
                            sa.ForeignKey('networks.id', ondelete="CASCADE"),
                            primary_key=True)
-    # 'flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', 'portgroup'
+    # 'flat', 'vlan', 'stt', 'gre', 'l3_ext', 'geneve', 'portgroup', 'nsx-net'
     binding_type = sa.Column(sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
-                                     'vxlan', 'portgroup',
+                                     'geneve', 'portgroup', 'nsx-net',
                                      name='tz_network_bindings_binding_type'),
                              nullable=False, primary_key=True)
     phy_uuid = sa.Column(sa.String(36), primary_key=True, default='')
diff --git a/vmware_nsx/plugins/nsx_v3/plugin.py b/vmware_nsx/plugins/nsx_v3/plugin.py
index 28284537c7..49fdaff98d 100644
--- a/vmware_nsx/plugins/nsx_v3/plugin.py
+++ b/vmware_nsx/plugins/nsx_v3/plugin.py
@@ -693,12 +693,30 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
                     if bindings:
                         raise n_exc.VlanIdInUse(
                             vlan_id=vlan_id, physical_network=physical_net)
-            elif net_type == utils.NsxV3NetworkTypes.VXLAN:
+            elif net_type == utils.NsxV3NetworkTypes.GENEVE:
                 if vlan_id:
                     err_msg = (_("Segmentation ID cannot be specified with "
                                  "%s network type") %
-                               utils.NsxV3NetworkTypes.VXLAN)
+                               utils.NsxV3NetworkTypes.GENEVE)
                 tz_type = self.nsxlib.transport_zone.TRANSPORT_TYPE_OVERLAY
+            elif net_type == utils.NsxV3NetworkTypes.NSX_NETWORK:
+                # Linking neutron networks to an existing NSX logical switch
+                if physical_net is None:
+                    err_msg = (_("Physical network must be specified with "
+                                 "%s network type") % net_type)
+                # Validate the logical switch existence
+                try:
+                    self.nsxlib.logical_switch.get(physical_net)
+                except nsx_lib_exc.ResourceNotFound:
+                    err_msg = (_('Logical switch %s does not exist') %
+                               physical_net)
+                # make sure no other neutron network is using it
+                bindings = (
+                    nsx_db.get_network_bindings_by_vlanid_and_physical_net(
+                        context.elevated().session, 0, physical_net))
+                if bindings:
+                    err_msg = (_('Logical switch %s is already used by '
+                                 'another network') % physical_net)
             else:
                 err_msg = (_('%(net_type_param)s %(net_type_value)s not '
                              'supported') %
@@ -718,7 +736,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
             physical_net = az._default_overlay_tz_uuid
 
         # validate the transport zone existence and type
-        if not err_msg and is_provider_net and physical_net:
+        if (not err_msg and is_provider_net and physical_net and
+            net_type != utils.NsxV3NetworkTypes.NSX_NETWORK):
             try:
                 backend_type = self.nsxlib.transport_zone.get_transport_type(
                     physical_net)
@@ -752,43 +771,68 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
     def _create_network_at_the_backend(self, context, net_data, az):
         is_provider_net, net_type, physical_net, vlan_id = (
             self._validate_provider_create(context, net_data, az))
-        neutron_net_id = net_data.get('id') or uuidutils.generate_uuid()
-        # To ensure that the correct tag will be set
-        net_data['id'] = neutron_net_id
-        # update the network name to indicate the neutron id too.
-        net_name = utils.get_name_and_uuid(net_data['name'] or 'network',
-                                           neutron_net_id)
-        tags = self.nsxlib.build_v3_tags_payload(
-            net_data, resource_type='os-neutron-net-id',
-            project_name=context.tenant_name)
 
-        admin_state = net_data.get('admin_state_up', True)
+        if is_provider_net and net_type == utils.NsxV3NetworkTypes.NSX_NETWORK:
+            # Network already exists on the NSX backend
+            nsx_id = physical_net
+        else:
+            # Create network on the backend
+            neutron_net_id = net_data.get('id') or uuidutils.generate_uuid()
+            # To ensure that the correct tag will be set
+            net_data['id'] = neutron_net_id
+            # update the network name to indicate the neutron id too.
+            net_name = utils.get_name_and_uuid(net_data['name'] or 'network',
+                                               neutron_net_id)
+            tags = self.nsxlib.build_v3_tags_payload(
+                net_data, resource_type='os-neutron-net-id',
+                project_name=context.tenant_name)
 
-        # Create network on the backend
-        LOG.debug('create_network: %(net_name)s, %(physical_net)s, '
-                  '%(tags)s, %(admin_state)s, %(vlan_id)s',
-                  {'net_name': net_name,
-                   'physical_net': physical_net,
-                   'tags': tags,
-                   'admin_state': admin_state,
-                   'vlan_id': vlan_id})
-        nsx_result = self.nsxlib.logical_switch.create(
-            net_name, physical_net, tags,
-            admin_state=admin_state,
-            vlan_id=vlan_id,
-            description=net_data.get('description'))
+            admin_state = net_data.get('admin_state_up', True)
+            LOG.debug('create_network: %(net_name)s, %(physical_net)s, '
+                      '%(tags)s, %(admin_state)s, %(vlan_id)s',
+                      {'net_name': net_name,
+                       'physical_net': physical_net,
+                       'tags': tags,
+                       'admin_state': admin_state,
+                       'vlan_id': vlan_id})
+            nsx_result = self.nsxlib.logical_switch.create(
+                net_name, physical_net, tags,
+                admin_state=admin_state,
+                vlan_id=vlan_id,
+                description=net_data.get('description'))
+            nsx_id = nsx_result['id']
 
         return (is_provider_net,
                 net_type,
                 physical_net,
                 vlan_id,
-                nsx_result['id'])
+                nsx_id)
 
     def _is_overlay_network(self, context, network_id):
+        """Return True if this is an overlay network
+
+        1. No binding ("normal" overlay networks will have no binding)
+        2. Geneve network
+        3. nsx network where the backend network is connected to an overlay TZ
+        """
         bindings = nsx_db.get_network_bindings(context.session, network_id)
         # With NSX plugin, "normal" overlay networks will have no binding
-        return (not bindings or
-                bindings[0].binding_type == utils.NsxV3NetworkTypes.VXLAN)
+        if not bindings:
+            return True
+        binding = bindings[0]
+        if binding.binding_type == utils.NsxV3NetworkTypes.GENEVE:
+            return True
+        if binding.binding_type == utils.NsxV3NetworkTypes.NSX_NETWORK:
+            # check the backend network
+            # TODO(asarfaty): Keep TZ type in DB to avoid going to the backend
+            ls = self.nsxlib.logical_switch.get(binding.phy_uuid)
+            tz = ls.get('transport_zone_id')
+            if tz:
+                backend_type = self.nsxlib.transport_zone.get_transport_type(
+                    tz)
+                return (backend_type ==
+                        self.nsxlib.transport_zone.TRANSPORT_TYPE_OVERLAY)
+        return False
 
     def _extend_network_dict_provider(self, context, network, bindings=None):
         if 'id' not in network:
@@ -824,6 +868,13 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
         return super(NsxV3Plugin, self).get_subnets(
             context, filters, fields, sorts, limit, marker, page_reverse)
 
+    def _network_is_nsx_net(self, context, network_id):
+        bindings = nsx_db.get_network_bindings(context.session, network_id)
+        if not bindings:
+            return False
+        return (bindings[0].binding_type ==
+                utils.NsxV3NetworkTypes.NSX_NETWORK)
+
     def create_network(self, context, network):
         net_data = network['network']
         external = net_data.get(ext_net_extn.EXTERNAL)
@@ -979,17 +1030,27 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
                     self._disable_native_dhcp(context, network_id)
 
         nsx_net_id = self._get_network_nsx_id(context, network_id)
+        is_nsx_net = self._network_is_nsx_net(context, network_id)
+        is_overlay_network = self._is_overlay_network(context, network_id)
         # First call DB operation for delete network as it will perform
         # checks on active ports
         self._retry_delete_network(context, network_id)
-        if not self._network_is_external(context, network_id):
+        if (not self._network_is_external(context, network_id) and
+            not is_nsx_net):
             # TODO(salv-orlando): Handle backend failure, possibly without
             # requiring us to un-delete the DB object. For instance, ignore
             # failures occurring if logical switch is not found
             self.nsxlib.logical_switch.delete(nsx_net_id)
         else:
+            if (cfg.CONF.nsx_v3.native_dhcp_metadata and is_nsx_net and
+                is_overlay_network):
+                # Delete the mdproxy port manually
+                port_id = self.nsxlib.get_id_by_resource_and_tag(
+                    self.nsxlib.logical_port.resource_type,
+                    'os-neutron-net-id', network_id)
+                if port_id:
+                    self.nsxlib.logical_port.delete(port_id)
             # TODO(berlin): delete subnets public announce on the network
-            pass
 
     def _get_network_nsx_id(self, context, neutron_id):
         # get the nsx switch id from the DB mapping
@@ -1010,6 +1071,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
         # Neutron does not support changing provider network values
         providernet._raise_if_updates_provider_attributes(net_data)
         extern_net = self._network_is_external(context, id)
+        is_nsx_net = self._network_is_nsx_net(context, id)
         if extern_net:
             self._assert_on_external_net_with_qos(net_data)
         updated_net = super(NsxV3Plugin, self).update_network(context, id,
@@ -1022,7 +1084,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
         self._process_l3_update(context, updated_net, network['network'])
         self._extend_network_dict_provider(context, updated_net)
 
-        if (not extern_net and
+        if (not extern_net and not is_nsx_net and
             ('name' in net_data or 'admin_state_up' in net_data or
              'description' in net_data)):
             try:
@@ -3186,7 +3248,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
     def _validate_multiple_subnets_routers(self, context, router_id, net_id):
         network = self.get_network(context, net_id)
         net_type = network.get(pnet.NETWORK_TYPE)
-        if (net_type and net_type != utils.NsxV3NetworkTypes.VXLAN):
+        if (net_type and net_type != utils.NsxV3NetworkTypes.GENEVE):
             err_msg = (_("Only overlay networks can be attached to a logical "
                          "router. Network %(net_id)s is a %(net_type)s based "
                          "network") % {'net_id': net_id, 'net_type': net_type})
diff --git a/vmware_nsx/tests/unit/nsx_v3/test_plugin.py b/vmware_nsx/tests/unit/nsx_v3/test_plugin.py
index ef179ffed1..10511cfbe1 100644
--- a/vmware_nsx/tests/unit/nsx_v3/test_plugin.py
+++ b/vmware_nsx/tests/unit/nsx_v3/test_plugin.py
@@ -77,6 +77,10 @@ def _mock_create_firewall_rules(*args):
         ]}
 
 
+def _return_id_key(*args, **kwargs):
+    return {'id': uuidutils.generate_uuid()}
+
+
 def _mock_nsx_backend_calls():
     mock.patch("vmware_nsxlib.v3.client.NSX3Client").start()
 
@@ -84,9 +88,6 @@ def _mock_nsx_backend_calls():
                     'resource_type': 'FakeResource',
                     'id': uuidutils.generate_uuid()}
 
-    def _return_id_key(*args, **kwargs):
-        return {'id': uuidutils.generate_uuid()}
-
     def _return_id(*args, **kwargs):
         return uuidutils.generate_uuid()
 
@@ -112,11 +113,6 @@ def _mock_nsx_backend_calls():
         "get_id_by_name_or_id",
         return_value=uuidutils.generate_uuid()).start()
 
-    mock.patch(
-        "vmware_nsxlib.v3.core_resources.NsxLibTransportZone."
-        "get_id_by_name_or_id",
-        return_value=uuidutils.generate_uuid()).start()
-
     mock.patch(
         "vmware_nsxlib.v3.core_resources.NsxLibBridgeEndpoint.create",
         side_effect=_return_id_key).start()
@@ -278,6 +274,167 @@ class TestNetworksV2(test_plugin.TestNetworksV2, NsxV3PluginTestCaseMixin):
             networks = self.plugin.get_networks(ctx)
             self.assertListEqual([], networks)
 
+    def test_create_provider_flat_network(self):
+        providernet_args = {pnet.NETWORK_TYPE: 'flat'}
+        with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
+                        'create', side_effect=_return_id_key) as nsx_create, \
+            mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
+                       'delete') as nsx_delete, \
+            mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
+                       'get_transport_type', return_value='VLAN'),\
+            self.network(name='flat_net',
+                         providernet_args=providernet_args,
+                         arg_list=(pnet.NETWORK_TYPE, )) as net:
+            self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE))
+            # make sure the network is created at the backend
+            nsx_create.assert_called_once()
+
+            # Delete the network and make sure it is deleted from the backend
+            req = self.new_delete_request('networks', net['network']['id'])
+            res = req.get_response(self.api)
+            self.assertEqual(exc.HTTPNoContent.code, res.status_int)
+            nsx_delete.assert_called_once()
+
+    def test_create_provider_flat_network_with_physical_net(self):
+        physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID
+        providernet_args = {pnet.NETWORK_TYPE: 'flat',
+                            pnet.PHYSICAL_NETWORK: physical_network}
+        with mock.patch(
+            'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
+            'get_transport_type', return_value='VLAN'),\
+            self.network(name='flat_net',
+                         providernet_args=providernet_args,
+                         arg_list=(pnet.NETWORK_TYPE,
+                                   pnet.PHYSICAL_NETWORK)) as net:
+            self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE))
+
+    def test_create_provider_flat_network_with_vlan(self):
+        providernet_args = {pnet.NETWORK_TYPE: 'flat',
+                            pnet.SEGMENTATION_ID: 11}
+        with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
+                        'get_transport_type', return_value='VLAN'):
+            result = self._create_network(fmt='json', name='bad_flat_net',
+                                          admin_state_up=True,
+                                          providernet_args=providernet_args,
+                                          arg_list=(pnet.NETWORK_TYPE,
+                                                    pnet.SEGMENTATION_ID))
+            data = self.deserialize('json', result)
+            # should fail
+            self.assertEqual('InvalidInput', data['NeutronError']['type'])
+
+    def test_create_provider_geneve_network(self):
+        providernet_args = {pnet.NETWORK_TYPE: 'geneve'}
+        with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
+                        'create', side_effect=_return_id_key) as nsx_create, \
+            mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
+                       'delete') as nsx_delete, \
+            mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
+                       'get_transport_type', return_value='OVERLAY'),\
+            self.network(name='geneve_net',
+                         providernet_args=providernet_args,
+                         arg_list=(pnet.NETWORK_TYPE, )) as net:
+            self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE))
+            # make sure the network is created at the backend
+            nsx_create.assert_called_once()
+
+            # Delete the network and make sure it is deleted from the backend
+            req = self.new_delete_request('networks', net['network']['id'])
+            res = req.get_response(self.api)
+            self.assertEqual(exc.HTTPNoContent.code, res.status_int)
+            nsx_delete.assert_called_once()
+
+    def test_create_provider_geneve_network_with_physical_net(self):
+        physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID
+        providernet_args = {pnet.NETWORK_TYPE: 'geneve',
+                            pnet.PHYSICAL_NETWORK: physical_network}
+        with mock.patch(
+            'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
+            'get_transport_type', return_value='OVERLAY'),\
+            self.network(name='geneve_net',
+                         providernet_args=providernet_args,
+                         arg_list=(pnet.NETWORK_TYPE, )) as net:
+            self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE))
+
+    def test_create_provider_geneve_network_with_vlan(self):
+        providernet_args = {pnet.NETWORK_TYPE: 'geneve',
+                            pnet.SEGMENTATION_ID: 11}
+        with mock.patch(
+            'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
+            'get_transport_type', return_value='OVERLAY'):
+            result = self._create_network(fmt='json', name='bad_geneve_net',
+                                          admin_state_up=True,
+                                          providernet_args=providernet_args,
+                                          arg_list=(pnet.NETWORK_TYPE,
+                                                    pnet.SEGMENTATION_ID))
+            data = self.deserialize('json', result)
+            # should fail
+            self.assertEqual('InvalidInput', data['NeutronError']['type'])
+
+    def test_create_provider_vlan_network(self):
+        providernet_args = {pnet.NETWORK_TYPE: 'vlan',
+                            pnet.SEGMENTATION_ID: 11}
+        with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
+                        'create', side_effect=_return_id_key) as nsx_create, \
+            mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
+                       'delete') as nsx_delete, \
+            mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.'
+                       'get_transport_type', return_value='VLAN'),\
+            self.network(name='vlan_net',
+                         providernet_args=providernet_args,
+                         arg_list=(pnet.NETWORK_TYPE,
+                                   pnet.SEGMENTATION_ID)) as net:
+            self.assertEqual('vlan', net['network'].get(pnet.NETWORK_TYPE))
+            # make sure the network is created at the backend
+            nsx_create.assert_called_once()
+
+            # Delete the network and make sure it is deleted from the backend
+            req = self.new_delete_request('networks', net['network']['id'])
+            res = req.get_response(self.api)
+            self.assertEqual(exc.HTTPNoContent.code, res.status_int)
+            nsx_delete.assert_called_once()
+
+    def test_create_provider_nsx_network(self):
+        physical_network = 'Fake logical switch'
+        providernet_args = {pnet.NETWORK_TYPE: 'nsx-net',
+                            pnet.PHYSICAL_NETWORK: physical_network}
+
+        with mock.patch(
+            'vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.create',
+            side_effect=nsxlib_exc.ResourceNotFound) as nsx_create, \
+            mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.'
+                       'delete') as nsx_delete, \
+            self.network(name='nsx_net',
+                         providernet_args=providernet_args,
+                         arg_list=(pnet.NETWORK_TYPE,
+                                   pnet.PHYSICAL_NETWORK)) as net:
+            self.assertEqual('nsx-net', net['network'].get(pnet.NETWORK_TYPE))
+            self.assertEqual(physical_network,
+                             net['network'].get(pnet.PHYSICAL_NETWORK))
+            # make sure the network is NOT created at the backend
+            nsx_create.assert_not_called()
+
+            # Delete the network. It should NOT deleted from the backend
+            req = self.new_delete_request('networks', net['network']['id'])
+            res = req.get_response(self.api)
+            self.assertEqual(exc.HTTPNoContent.code, res.status_int)
+            nsx_delete.assert_not_called()
+
+    def test_create_provider_bad_nsx_network(self):
+        physical_network = 'Bad logical switch'
+        providernet_args = {pnet.NETWORK_TYPE: 'nsx-net',
+                            pnet.PHYSICAL_NETWORK: physical_network}
+        with mock.patch(
+            "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get",
+            side_effect=nsxlib_exc.ResourceNotFound):
+            result = self._create_network(fmt='json', name='bad_nsx_net',
+                                          admin_state_up=True,
+                                          providernet_args=providernet_args,
+                                          arg_list=(pnet.NETWORK_TYPE,
+                                                    pnet.PHYSICAL_NETWORK))
+            data = self.deserialize('json', result)
+            # should fail
+            self.assertEqual('InvalidInput', data['NeutronError']['type'])
+
 
 class TestSubnetsV2(test_plugin.TestSubnetsV2, NsxV3PluginTestCaseMixin):