From 9254b0aedab189b4f6ef7b5a85e26c88baf14163 Mon Sep 17 00:00:00 2001
From: Gary Kotton <gkotton@vmware.com>
Date: Sat, 20 Dec 2014 23:17:52 -0800
Subject: [PATCH] VMware: initial NSXv developments

Add the NSXv code to the repo.

Co-Authored-By: Kobi Samoray <ksamoray@vmware.com>

Change-Id: Iefc76e0d6bfab8136bd2e3300a8b3d4a3fdb3a46
---
 test-requirements.txt                         |    3 +-
 .../neutron/plugins/vmware/api_client/base.py |    2 +-
 .../plugins/vmware/api_client/client.py       |    8 +-
 .../vmware/api_client/eventlet_client.py      |    4 +-
 .../vmware/api_client/eventlet_request.py     |    2 +-
 .../plugins/vmware/api_client/request.py      |    2 +-
 .../plugins/vmware/check_nsx_config.py        |    6 +-
 .../neutron/plugins/vmware/common/config.py   |   70 +-
 .../plugins/vmware/common/exceptions.py       |   14 -
 .../plugins/vmware/common/nsx_utils.py        |   16 +-
 .../plugins/vmware/common/nsxv_constants.py   |   28 +
 .../plugins/vmware/common/securitygroups.py   |    2 +-
 .../neutron/plugins/vmware/common/sync.py     |    8 +-
 .../neutron/plugins/vmware/common/utils.py    |    9 +
 ...{servicerouter.py => distributedrouter.py} |   16 +-
 .../neutron/plugins/vmware/dbexts/lsn_db.py   |  132 --
 .../plugins/vmware/dbexts/maclearning.py      |   78 -
 .../neutron/plugins/vmware/dbexts/models.py   |  117 --
 .../plugins/vmware/dbexts/networkgw_db.py     |  521 -----
 .../plugins/vmware/dbexts/nsxrouter.py        |   66 +
 .../neutron/plugins/vmware/dbexts/nsxv_db.py  |  435 ++++
 .../neutron/plugins/vmware/dbexts/qos_db.py   |  302 ---
 .../neutron/plugins/vmware/dbexts/vcns_db.py  |  202 --
 .../plugins/vmware/dbexts/vcns_models.py      |   94 -
 .../plugins/vmware/dbexts/vnic_index_db.py    |   61 +
 .../plugins/vmware/dhcp_meta/combined.py      |    4 +-
 .../plugins/vmware/dhcp_meta/lsnmanager.py    |    8 +-
 .../plugins/vmware/dhcp_meta/migration.py     |    4 +-
 .../neutron/plugins/vmware/dhcp_meta/nsx.py   |    4 +-
 .../neutron/plugins/vmware/dhcp_meta/rpc.py   |    2 +-
 .../neutron/plugins/vmware/dhcpmeta_modes.py  |   14 +-
 .../vmware/extensions/distributedrouter.py    |   70 +
 .../vmware/extensions/metadata_providers.py   |   56 +
 .../plugins/vmware/extensions/networkgw.py    |    2 +-
 .../plugins/vmware/extensions/nvp_qos.py      |    2 +-
 .../plugins/vmware/extensions/vnic_index.py   |   61 +
 .../plugins/vmware/nsxlib/l2gateway.py        |    6 +-
 .../neutron/plugins/vmware/nsxlib/lsn.py      |    4 +-
 .../neutron/plugins/vmware/nsxlib/queue.py    |    4 +-
 .../neutron/plugins/vmware/nsxlib/router.py   |   10 +-
 .../neutron/plugins/vmware/nsxlib/secgroup.py |    4 +-
 .../neutron/plugins/vmware/nsxlib/switch.py   |    8 +-
 vmware_nsx/neutron/plugins/vmware/plugin.py   |    4 +-
 .../neutron/plugins/vmware/plugins/base.py    |   30 +-
 .../neutron/plugins/vmware/plugins/nsx_v.py   | 1855 +++++++++++++++++
 .../plugins/vmware/plugins/nsx_v_md_proxy.py  |  367 ++++
 .../neutron/plugins/vmware/shell/__init__.py  |    2 +-
 .../vmware/vshield/common/VcnsApiClient.py    |   57 +-
 .../vmware/vshield/common/constants.py        |   33 +
 .../vmware/vshield/common/exceptions.py       |    4 +
 .../vmware/vshield/edge_appliance_driver.py   |  603 ++++--
 .../vmware/vshield/edge_firewall_driver.py    |  152 +-
 .../vmware/vshield/edge_ipsecvpn_driver.py    |    2 +-
 .../vshield/edge_loadbalancer_driver.py       |   40 +-
 .../plugins/vmware/vshield/edge_utils.py      | 1349 ++++++++++++
 .../vmware/vshield/nsxv_edge_cfg_obj.py       |   67 +
 .../vmware/vshield/nsxv_loadbalancer.py       |  391 ++++
 .../vmware/vshield/securitygroup_utils.py     |  183 ++
 .../plugins/vmware/vshield/tasks/tasks.py     |   16 +-
 .../neutron/plugins/vmware/vshield/vcns.py    |  316 ++-
 .../plugins/vmware/vshield/vcns_driver.py     |   47 +
 .../neutron/tests/unit/vmware/__init__.py     |   22 +-
 .../unit/vmware/apiclient/test_api_common.py  |    2 +-
 .../apiclient/test_api_eventlet_request.py    |    6 +-
 .../tests/unit/vmware/db/test_nsx_db.py       |    2 +-
 .../tests/unit/vmware/etc/nsx.ini.test        |    7 +
 .../vmware/extensions/test_maclearning.py     |    4 +-
 .../unit/vmware/extensions/test_networkgw.py  |    6 +-
 .../vmware/extensions/test_portsecurity.py    |    2 +-
 .../unit/vmware/extensions/test_qosqueues.py  |    4 +-
 .../unit/vmware/extensions/test_vnic_index.py |  108 +
 .../tests/unit/vmware/nsx_v/__init__.py       |    0
 .../vmware/nsx_v/test_nsxv_loadbalancer.py    |   95 +
 .../neutron/tests/unit/vmware/nsxlib/base.py  |    8 +-
 .../unit/vmware/nsxlib/test_l2gateway.py      |    8 +-
 .../tests/unit/vmware/nsxlib/test_lsn.py      |    6 +-
 .../tests/unit/vmware/nsxlib/test_queue.py    |    4 +-
 .../tests/unit/vmware/nsxlib/test_router.py   |   11 +-
 .../tests/unit/vmware/nsxlib/test_secgroup.py |    4 +-
 .../tests/unit/vmware/nsxlib/test_switch.py   |    4 +-
 .../unit/vmware/nsxlib/test_versioning.py     |    7 +-
 .../tests/unit/vmware/test_agent_scheduler.py |    4 +-
 .../tests/unit/vmware/test_dhcpmeta.py        |   10 +-
 .../tests/unit/vmware/test_nsx_opts.py        |   12 +-
 .../tests/unit/vmware/test_nsx_plugin.py      |   11 +-
 .../tests/unit/vmware/test_nsx_sync.py        |   14 +-
 .../tests/unit/vmware/test_nsx_utils.py       |    6 +-
 .../tests/unit/vmware/test_nsx_v_plugin.py    | 1614 ++++++++++++++
 .../tests/unit/vmware/vshield/fake_vcns.py    |  382 +++-
 .../unit/vmware/vshield/test_vcns_driver.py   |   18 +-
 90 files changed, 8471 insertions(+), 1889 deletions(-)
 create mode 100644 vmware_nsx/neutron/plugins/vmware/common/nsxv_constants.py
 rename vmware_nsx/neutron/plugins/vmware/dbexts/{servicerouter.py => distributedrouter.py} (63%)
 delete mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/lsn_db.py
 delete mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/maclearning.py
 delete mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/models.py
 delete mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/networkgw_db.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/nsxrouter.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/nsxv_db.py
 delete mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/qos_db.py
 delete mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/vcns_db.py
 delete mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/vcns_models.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/dbexts/vnic_index_db.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/extensions/distributedrouter.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/extensions/metadata_providers.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/extensions/vnic_index.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/plugins/nsx_v.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/plugins/nsx_v_md_proxy.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/vshield/edge_utils.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/vshield/nsxv_edge_cfg_obj.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/vshield/nsxv_loadbalancer.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/vshield/securitygroup_utils.py
 create mode 100644 vmware_nsx/neutron/plugins/vmware/vshield/vcns_driver.py
 create mode 100644 vmware_nsx/neutron/tests/unit/vmware/extensions/test_vnic_index.py
 create mode 100644 vmware_nsx/neutron/tests/unit/vmware/nsx_v/__init__.py
 create mode 100644 vmware_nsx/neutron/tests/unit/vmware/nsx_v/test_nsxv_loadbalancer.py
 create mode 100644 vmware_nsx/neutron/tests/unit/vmware/test_nsx_v_plugin.py

diff --git a/test-requirements.txt b/test-requirements.txt
index bb51f87d26..48e334e7b4 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,8 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
--e git://git.openstack.org/openstack/neutron.git#egg=neutron
+# Temporary, till https://review.openstack.org/#/c/143949/ is merged
+-e git://github.com/gkotton/neutron.git#egg=neutron
 
 hacking>=0.9.2,<0.10
 
diff --git a/vmware_nsx/neutron/plugins/vmware/api_client/base.py b/vmware_nsx/neutron/plugins/vmware/api_client/base.py
index d7e2f768d0..1f6d04223b 100644
--- a/vmware_nsx/neutron/plugins/vmware/api_client/base.py
+++ b/vmware_nsx/neutron/plugins/vmware/api_client/base.py
@@ -23,7 +23,7 @@ from oslo.config import cfg
 
 from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware import api_client
+from vmware_nsx.neutron.plugins.vmware import api_client
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/api_client/client.py b/vmware_nsx/neutron/plugins/vmware/api_client/client.py
index 67ea8d2fd0..08f7763391 100644
--- a/vmware_nsx/neutron/plugins/vmware/api_client/client.py
+++ b/vmware_nsx/neutron/plugins/vmware/api_client/client.py
@@ -19,11 +19,11 @@ import httplib
 
 from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import base
-from neutron.plugins.vmware.api_client import eventlet_client
-from neutron.plugins.vmware.api_client import eventlet_request
 from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.api_client import version
+from vmware_nsx.neutron.plugins.vmware.api_client import base
+from vmware_nsx.neutron.plugins.vmware.api_client import eventlet_client
+from vmware_nsx.neutron.plugins.vmware.api_client import eventlet_request
+from vmware_nsx.neutron.plugins.vmware.api_client import version
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_client.py b/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_client.py
index 0eba2fdb56..54d809bd7b 100644
--- a/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_client.py
+++ b/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_client.py
@@ -22,8 +22,8 @@ eventlet.monkey_patch()
 
 from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import base
-from neutron.plugins.vmware.api_client import eventlet_request
+from vmware_nsx.neutron.plugins.vmware.api_client import base
+from vmware_nsx.neutron.plugins.vmware.api_client import eventlet_request
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_request.py b/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_request.py
index 3402e4f908..b15347a582 100644
--- a/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_request.py
+++ b/vmware_nsx/neutron/plugins/vmware/api_client/eventlet_request.py
@@ -22,7 +22,7 @@ from oslo.serialization import jsonutils
 
 from neutron.i18n import _LI, _LW
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import request
+from vmware_nsx.neutron.plugins.vmware.api_client import request
 
 LOG = logging.getLogger(__name__)
 USER_AGENT = "Neutron eventlet client/2.0"
diff --git a/vmware_nsx/neutron/plugins/vmware/api_client/request.py b/vmware_nsx/neutron/plugins/vmware/api_client/request.py
index a488d7637f..3222a35131 100644
--- a/vmware_nsx/neutron/plugins/vmware/api_client/request.py
+++ b/vmware_nsx/neutron/plugins/vmware/api_client/request.py
@@ -27,7 +27,7 @@ import six.moves.urllib.parse as urlparse
 
 from neutron.i18n import _LI, _LW
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware import api_client
+from vmware_nsx.neutron.plugins.vmware import api_client
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/check_nsx_config.py b/vmware_nsx/neutron/plugins/vmware/check_nsx_config.py
index a063394c1b..cb9975817d 100644
--- a/vmware_nsx/neutron/plugins/vmware/check_nsx_config.py
+++ b/vmware_nsx/neutron/plugins/vmware/check_nsx_config.py
@@ -20,9 +20,9 @@ import sys
 from oslo.config import cfg
 
 from neutron.common import config
-from neutron.plugins.vmware.common import config as nsx_config  # noqa
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.common import config as nsx_config  # noqa
+from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
 
 config.setup_logging()
 
diff --git a/vmware_nsx/neutron/plugins/vmware/common/config.py b/vmware_nsx/neutron/plugins/vmware/common/config.py
index 01735ae101..df7153b072 100644
--- a/vmware_nsx/neutron/plugins/vmware/common/config.py
+++ b/vmware_nsx/neutron/plugins/vmware/common/config.py
@@ -12,10 +12,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import logging
+
 from oslo.config import cfg
 
+from neutron.i18n import _LW
 from neutron.plugins.vmware.common import exceptions as nsx_exc
 
+LOG = logging.getLogger(__name__)
+
 
 class AgentModes:
     AGENT = 'agent'
@@ -153,40 +158,87 @@ cluster_opts = [
 ]
 
 DEFAULT_STATUS_CHECK_INTERVAL = 2000
+DEFAULT_MINIMUM_POOLED_EDGES = 1
+DEFAULT_MAXIMUM_POOLED_EDGES = 3
+DEFAULT_MAXIMUM_TUNNELS_PER_VNIC = 20
 
-vcns_opts = [
+nsxv_opts = [
     cfg.StrOpt('user',
                default='admin',
+               deprecated_group="vcns",
                help=_('User name for vsm')),
     cfg.StrOpt('password',
                default='default',
+               deprecated_group="vcns",
                secret=True,
                help=_('Password for vsm')),
     cfg.StrOpt('manager_uri',
+               deprecated_group="vcns",
                help=_('uri for vsm')),
+    cfg.ListOpt('cluster_moid',
+                default=[],
+                help=_('Parameter listing the IDs of the clusters '
+                       'which are used by OpenStack.')),
     cfg.StrOpt('datacenter_moid',
+               deprecated_group="vcns",
                help=_('Optional parameter identifying the ID of datacenter '
                       'to deploy NSX Edges')),
     cfg.StrOpt('deployment_container_id',
+               deprecated_group="vcns",
                help=_('Optional parameter identifying the ID of datastore to '
                       'deploy NSX Edges')),
     cfg.StrOpt('resource_pool_id',
+               deprecated_group="vcns",
                help=_('Optional parameter identifying the ID of resource to '
                       'deploy NSX Edges')),
     cfg.StrOpt('datastore_id',
+               deprecated_group="vcns",
                help=_('Optional parameter identifying the ID of datastore to '
                       'deploy NSX Edges')),
     cfg.StrOpt('external_network',
+               deprecated_group="vcns",
                help=_('Network ID for physical network connectivity')),
     cfg.IntOpt('task_status_check_interval',
                default=DEFAULT_STATUS_CHECK_INTERVAL,
-               help=_("Task status check interval"))
+               deprecated_group="vcns",
+               help=_("Task status check interval")),
+    cfg.StrOpt('vdn_scope_id',
+               help=_('Network scope ID for VXLAN virtual wires')),
+    cfg.StrOpt('dvs_id',
+               help=_('DVS ID for VLANs')),
+    cfg.IntOpt('maximum_tunnels_per_vnic',
+               default=DEFAULT_MAXIMUM_TUNNELS_PER_VNIC,
+               help=_('Maximum number of sub interfaces supported '
+                      'per vnic in edge. The value should be in 1-110.')),
+    cfg.ListOpt('backup_edge_pool',
+                default=['service:large:4:10',
+                         'service:compact:4:10',
+                         'vdr:large:4:10'],
+                help=_('Defines edge pool using the format: '
+                       '<edge_type>:[edge_size]:<min_edges>:<max_edges>.'
+                       'edge_type: service,vdr. '
+                       'edge_size: compact, large, xlarge, quadlarge '
+                       'and default is large.')),
+    cfg.IntOpt('retries',
+               default=10,
+               help=_('Maximum number of API retries on endpoint.')),
+    cfg.StrOpt('mgt_net_moid',
+               help=_('Network ID for management network connectivity')),
+    cfg.ListOpt('mgt_net_proxy_ips',
+                help=_('Management network IP address for metadata proxy')),
+    cfg.StrOpt('mgt_net_proxy_netmask',
+               help=_('Management network netmask for metadata proxy')),
+    cfg.ListOpt('nova_metadata_ips',
+                help=_('IP addresses used by Nova metadata service')),
+    cfg.IntOpt('nova_metadata_port',
+               default=8775,
+               help=_("TCP Port used by Nova metadata server."))
 ]
 
 # Register the configuration options
 cfg.CONF.register_opts(connection_opts)
 cfg.CONF.register_opts(cluster_opts)
-cfg.CONF.register_opts(vcns_opts, group="vcns")
+cfg.CONF.register_opts(nsxv_opts, group="nsxv")
 cfg.CONF.register_opts(base_opts, group="NSX")
 cfg.CONF.register_opts(sync_opts, group="NSX_SYNC")
 
@@ -197,3 +249,15 @@ def validate_config_options():
         error = (_("Invalid replication_mode: %s") %
                  cfg.CONF.NSX.replication_mode)
         raise nsx_exc.NsxPluginException(err_msg=error)
+
+
+def validate_nsxv_config_options():
+    if (cfg.CONF.nsxv.manager_uri is None or
+        cfg.CONF.nsxv.user is None or
+        cfg.CONF.nsxv.password is None):
+        error = _("manager_uri, user and passwork be configured!")
+        raise nsx_exc.NsxPluginException(err_msg=error)
+    if cfg.CONF.nsxv.dvs_id is None:
+        LOG.warning(_LW("dvs_id must be configured to support VLAN's!"))
+    if cfg.CONF.nsxv.vdn_scope_id is None:
+        LOG.warning(_LW("vdn_scope_id must be configured to support VXLAN's!"))
diff --git a/vmware_nsx/neutron/plugins/vmware/common/exceptions.py b/vmware_nsx/neutron/plugins/vmware/common/exceptions.py
index 3f435bd531..94cfdb27ad 100644
--- a/vmware_nsx/neutron/plugins/vmware/common/exceptions.py
+++ b/vmware_nsx/neutron/plugins/vmware/common/exceptions.py
@@ -76,20 +76,6 @@ class ServiceOverQuota(n_exc.Conflict):
     message = _("Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s")
 
 
-class RouterInUseByLBService(n_exc.InUse):
-    message = _("Router %(router_id)s is in use by Loadbalancer Service "
-                "%(vip_id)s")
-
-
-class RouterInUseByFWService(n_exc.InUse):
-    message = _("Router %(router_id)s is in use by firewall Service "
-                "%(firewall_id)s")
-
-
-class VcnsDriverException(NsxPluginException):
-    message = _("Error happened in NSX VCNS Driver: %(err_msg)s")
-
-
 class ServiceClusterUnavailable(NsxPluginException):
     message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, "
                 "check NSX setup and/or configuration")
diff --git a/vmware_nsx/neutron/plugins/vmware/common/nsx_utils.py b/vmware_nsx/neutron/plugins/vmware/common/nsx_utils.py
index 754e75d0fe..cdbef33226 100644
--- a/vmware_nsx/neutron/plugins/vmware/common/nsx_utils.py
+++ b/vmware_nsx/neutron/plugins/vmware/common/nsx_utils.py
@@ -19,16 +19,16 @@ from neutron.extensions import multiprovidernet as mpnet
 from neutron.extensions import providernet as pnet
 from neutron.i18n import _LW
 from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import client
 from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import utils as vmw_utils
-from neutron.plugins.vmware.dbexts import db as nsx_db
 from neutron.plugins.vmware.dbexts import networkgw_db
-from neutron.plugins.vmware import nsx_cluster
-from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
+from vmware_nsx.neutron.plugins.vmware.api_client import client
+from vmware_nsx.neutron.plugins.vmware.common import utils as vmw_utils
+from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
+from vmware_nsx.neutron.plugins.vmware import nsx_cluster
+from vmware_nsx.neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
 
 LOG = log.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/common/nsxv_constants.py b/vmware_nsx/neutron/plugins/vmware/common/nsxv_constants.py
new file mode 100644
index 0000000000..e0b9d5a804
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/common/nsxv_constants.py
@@ -0,0 +1,28 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Edge size
+COMPACT = 'compact'
+LARGE = 'large'
+XLARGE = 'xlarge'
+QUADLARGE = 'quadlarge'
+
+
+# Edge type
+SERVICE_EDGE = 'service'
+VDR_EDGE = 'vdr'
+
+# Internal element purpose
+INTER_EDGE_PURPOSE = 'inter_edge_net'
diff --git a/vmware_nsx/neutron/plugins/vmware/common/securitygroups.py b/vmware_nsx/neutron/plugins/vmware/common/securitygroups.py
index db61b72a85..bb59accdd5 100644
--- a/vmware_nsx/neutron/plugins/vmware/common/securitygroups.py
+++ b/vmware_nsx/neutron/plugins/vmware/common/securitygroups.py
@@ -14,7 +14,7 @@
 #    under the License.
 
 from neutron.openstack.common import log
-from neutron.plugins.vmware.common import nsx_utils
+from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
 
 LOG = log.getLogger(__name__)
 # Protocol number look up for supported protocols
diff --git a/vmware_nsx/neutron/plugins/vmware/common/sync.py b/vmware_nsx/neutron/plugins/vmware/common/sync.py
index d5aaaf96b4..4deed66851 100644
--- a/vmware_nsx/neutron/plugins/vmware/common/sync.py
+++ b/vmware_nsx/neutron/plugins/vmware/common/sync.py
@@ -30,10 +30,10 @@ from neutron.openstack.common import log
 from neutron.openstack.common import loopingcall
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
+from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
 
 # Maximum page size for a single request
 # NOTE(salv-orlando): This might become a version-dependent map should the
diff --git a/vmware_nsx/neutron/plugins/vmware/common/utils.py b/vmware_nsx/neutron/plugins/vmware/common/utils.py
index 49f5cf01d9..58497a000d 100644
--- a/vmware_nsx/neutron/plugins/vmware/common/utils.py
+++ b/vmware_nsx/neutron/plugins/vmware/common/utils.py
@@ -36,6 +36,15 @@ class NetworkTypes:
     BRIDGE = 'bridge'
 
 
+# Allowed network types for the NSX-v Plugin
+class NsxVNetworkTypes:
+    """Allowed provider network types for the NSX-v Plugin."""
+    FLAT = 'flat'
+    VLAN = 'vlan'
+    VXLAN = 'vxlan'
+    PORTGROUP = 'portgroup'
+
+
 def get_tags(**kwargs):
     tags = ([dict(tag=value, scope=key)
             for key, value in kwargs.iteritems()])
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/servicerouter.py b/vmware_nsx/neutron/plugins/vmware/dbexts/distributedrouter.py
similarity index 63%
rename from vmware_nsx/neutron/plugins/vmware/dbexts/servicerouter.py
rename to vmware_nsx/neutron/plugins/vmware/dbexts/distributedrouter.py
index cdbc4dce61..27b2072b7e 100644
--- a/vmware_nsx/neutron/plugins/vmware/dbexts/servicerouter.py
+++ b/vmware_nsx/neutron/plugins/vmware/dbexts/distributedrouter.py
@@ -1,5 +1,6 @@
 # Copyright 2013 VMware, Inc.  All rights reserved.
 #
+#
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
@@ -13,15 +14,16 @@
 #    under the License.
 #
 
-from neutron.db import l3_dvr_db
-from vmware_nsx.neutron.plugins.vmware.extensions import servicerouter
+from vmware_nsx.neutron.plugins.vmware.dbexts import nsxrouter
+from vmware_nsx.neutron.plugins.vmware.extensions import (
+    distributedrouter as dist_rtr)
 
 
-class ServiceRouter_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin):
-    """Mixin class to enable service router support."""
+class DistributedRouter_mixin(nsxrouter.NsxRouterMixin):
+    """Mixin class to enable distributed router support."""
 
-    extra_attributes = (
-        l3_dvr_db.L3_NAT_with_dvr_db_mixin.extra_attributes + [{
-            'name': servicerouter.SERVICE_ROUTER,
+    nsx_attributes = (
+        nsxrouter.NsxRouterMixin.nsx_attributes + [{
+            'name': dist_rtr.DISTRIBUTED,
             'default': False
         }])
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/lsn_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/lsn_db.py
deleted file mode 100644
index a4865bdfc1..0000000000
--- a/vmware_nsx/neutron/plugins/vmware/dbexts/lsn_db.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2014 VMware, Inc.
-#
-# All Rights Reserved
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-from oslo.db import exception as d_exc
-from sqlalchemy import Column
-from sqlalchemy import ForeignKey
-from sqlalchemy import orm
-from sqlalchemy import String
-
-from neutron.db import models_v2
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import exceptions as p_exc
-
-
-LOG = logging.getLogger(__name__)
-
-
-class LsnPort(models_v2.model_base.BASEV2):
-
-    __tablename__ = 'lsn_port'
-
-    lsn_port_id = Column(String(36), primary_key=True)
-
-    lsn_id = Column(String(36), ForeignKey('lsn.lsn_id', ondelete="CASCADE"),
-                    nullable=False)
-    sub_id = Column(String(36), nullable=False, unique=True)
-    mac_addr = Column(String(32), nullable=False, unique=True)
-
-    def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id):
-        self.lsn_port_id = lsn_port_id
-        self.lsn_id = lsn_id
-        self.sub_id = subnet_id
-        self.mac_addr = mac_address
-
-
-class Lsn(models_v2.model_base.BASEV2):
-    __tablename__ = 'lsn'
-
-    lsn_id = Column(String(36), primary_key=True)
-    net_id = Column(String(36), nullable=False)
-
-    def __init__(self, net_id, lsn_id):
-        self.net_id = net_id
-        self.lsn_id = lsn_id
-
-
-def lsn_add(context, network_id, lsn_id):
-    """Add Logical Service Node information to persistent datastore."""
-    with context.session.begin(subtransactions=True):
-        lsn = Lsn(network_id, lsn_id)
-        context.session.add(lsn)
-
-
-def lsn_remove(context, lsn_id):
-    """Remove Logical Service Node information from datastore given its id."""
-    with context.session.begin(subtransactions=True):
-        context.session.query(Lsn).filter_by(lsn_id=lsn_id).delete()
-
-
-def lsn_remove_for_network(context, network_id):
-    """Remove information about the Logical Service Node given its network."""
-    with context.session.begin(subtransactions=True):
-        context.session.query(Lsn).filter_by(net_id=network_id).delete()
-
-
-def lsn_get_for_network(context, network_id, raise_on_err=True):
-    """Retrieve LSN information given its network id."""
-    query = context.session.query(Lsn)
-    try:
-        return query.filter_by(net_id=network_id).one()
-    except (orm.exc.NoResultFound, d_exc.DBError):
-        msg = _('Unable to find Logical Service Node for network %s')
-        if raise_on_err:
-            LOG.error(msg, network_id)
-            raise p_exc.LsnNotFound(entity='network',
-                                    entity_id=network_id)
-        else:
-            LOG.warn(msg, network_id)
-
-
-def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id):
-    """Add Logical Service Node Port information to persistent datastore."""
-    with context.session.begin(subtransactions=True):
-        lsn_port = LsnPort(lsn_port_id, subnet_id, mac, lsn_id)
-        context.session.add(lsn_port)
-
-
-def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True):
-    """Return Logical Service Node Port information given its subnet id."""
-    with context.session.begin(subtransactions=True):
-        try:
-            return (context.session.query(LsnPort).
-                    filter_by(sub_id=subnet_id).one())
-        except (orm.exc.NoResultFound, d_exc.DBError):
-            if raise_on_err:
-                raise p_exc.LsnPortNotFound(lsn_id=None,
-                                            entity='subnet',
-                                            entity_id=subnet_id)
-
-
-def lsn_port_get_for_mac(context, mac_address, raise_on_err=True):
-    """Return Logical Service Node Port information given its mac address."""
-    with context.session.begin(subtransactions=True):
-        try:
-            return (context.session.query(LsnPort).
-                    filter_by(mac_addr=mac_address).one())
-        except (orm.exc.NoResultFound, d_exc.DBError):
-            if raise_on_err:
-                raise p_exc.LsnPortNotFound(lsn_id=None,
-                                            entity='mac',
-                                            entity_id=mac_address)
-
-
-def lsn_port_remove(context, lsn_port_id):
-    """Remove Logical Service Node port from the given Logical Service Node."""
-    with context.session.begin(subtransactions=True):
-        (context.session.query(LsnPort).
-         filter_by(lsn_port_id=lsn_port_id).delete())
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/maclearning.py b/vmware_nsx/neutron/plugins/vmware/dbexts/maclearning.py
deleted file mode 100644
index 6a5f73acdf..0000000000
--- a/vmware_nsx/neutron/plugins/vmware/dbexts/maclearning.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-
-from neutron.api.v2 import attributes
-from neutron.db import db_base_plugin_v2
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.extensions import maclearning as mac
-
-LOG = logging.getLogger(__name__)
-
-
-class MacLearningState(model_base.BASEV2):
-
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False)
-
-    # Add a relationship to the Port model using the backref attribute.
-    # This will instruct SQLAlchemy to eagerly load this association.
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("mac_learning_state", lazy='joined',
-                            uselist=False, cascade='delete'))
-
-
-class MacLearningDbMixin(object):
-    """Mixin class for mac learning."""
-
-    def _make_mac_learning_state_dict(self, port, fields=None):
-        res = {'port_id': port['port_id'],
-               mac.MAC_LEARNING: port[mac.MAC_LEARNING]}
-        return self._fields(res, fields)
-
-    def _extend_port_mac_learning_state(self, port_res, port_db):
-        state = port_db.mac_learning_state
-        if state and state.mac_learning_enabled:
-            port_res[mac.MAC_LEARNING] = state.mac_learning_enabled
-
-    # Register dict extend functions for ports
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attributes.PORTS, ['_extend_port_mac_learning_state'])
-
-    def _update_mac_learning_state(self, context, port_id, enabled):
-        try:
-            query = self._model_query(context, MacLearningState)
-            state = query.filter(MacLearningState.port_id == port_id).one()
-            state.update({mac.MAC_LEARNING: enabled})
-        except exc.NoResultFound:
-            self._create_mac_learning_state(context,
-                                            {'id': port_id,
-                                             mac.MAC_LEARNING: enabled})
-
-    def _create_mac_learning_state(self, context, port):
-        with context.session.begin(subtransactions=True):
-            enabled = port[mac.MAC_LEARNING]
-            state = MacLearningState(port_id=port['id'],
-                                     mac_learning_enabled=enabled)
-            context.session.add(state)
-        return self._make_mac_learning_state_dict(state)
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/models.py b/vmware_nsx/neutron/plugins/vmware/dbexts/models.py
deleted file mode 100644
index 3337c9f254..0000000000
--- a/vmware_nsx/neutron/plugins/vmware/dbexts/models.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-from sqlalchemy import Column, Enum, ForeignKey, Integer, String
-
-from neutron.db import model_base
-
-
-class TzNetworkBinding(model_base.BASEV2):
-    """Represents a binding of a virtual network with a transport zone.
-
-    This model class associates a Neutron network with a transport zone;
-    optionally a vlan ID might be used if the binding type is 'bridge'
-    """
-    __tablename__ = 'tz_network_bindings'
-
-    # TODO(arosen) - it might be worth while refactoring the how this data
-    # is stored later so every column does not need to be a primary key.
-    network_id = Column(String(36),
-                        ForeignKey('networks.id', ondelete="CASCADE"),
-                        primary_key=True)
-    # 'flat', 'vlan', stt' or 'gre'
-    binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
-                               name='tz_network_bindings_binding_type'),
-                          nullable=False, primary_key=True)
-    phy_uuid = Column(String(36), primary_key=True, default='')
-    vlan_id = Column(Integer, primary_key=True, autoincrement=False, default=0)
-
-    def __init__(self, network_id, binding_type, phy_uuid, vlan_id):
-        self.network_id = network_id
-        self.binding_type = binding_type
-        self.phy_uuid = phy_uuid
-        self.vlan_id = vlan_id
-
-    def __repr__(self):
-        return "<NetworkBinding(%s,%s,%s,%s)>" % (self.network_id,
-                                                  self.binding_type,
-                                                  self.phy_uuid,
-                                                  self.vlan_id)
-
-
-class NeutronNsxNetworkMapping(model_base.BASEV2):
-    """Maps neutron network identifiers to NSX identifiers.
-
-    Because of chained logical switches more than one mapping might exist
-    for a single Neutron network.
-    """
-    __tablename__ = 'neutron_nsx_network_mappings'
-    neutron_id = Column(String(36),
-                        ForeignKey('networks.id', ondelete='CASCADE'),
-                        primary_key=True)
-    nsx_id = Column(String(36), primary_key=True)
-
-
-class NeutronNsxSecurityGroupMapping(model_base.BASEV2):
-    """Backend mappings for Neutron Security Group identifiers.
-
-    This class maps a neutron security group identifier to the corresponding
-    NSX security profile identifier.
-    """
-
-    __tablename__ = 'neutron_nsx_security_group_mappings'
-    neutron_id = Column(String(36),
-                        ForeignKey('securitygroups.id', ondelete="CASCADE"),
-                        primary_key=True)
-    nsx_id = Column(String(36), primary_key=True)
-
-
-class NeutronNsxPortMapping(model_base.BASEV2):
-    """Represents the mapping between neutron and nsx port uuids."""
-
-    __tablename__ = 'neutron_nsx_port_mappings'
-    neutron_id = Column(String(36),
-                        ForeignKey('ports.id', ondelete="CASCADE"),
-                        primary_key=True)
-    nsx_switch_id = Column(String(36))
-    nsx_port_id = Column(String(36), nullable=False)
-
-    def __init__(self, neutron_id, nsx_switch_id, nsx_port_id):
-        self.neutron_id = neutron_id
-        self.nsx_switch_id = nsx_switch_id
-        self.nsx_port_id = nsx_port_id
-
-
-class NeutronNsxRouterMapping(model_base.BASEV2):
-    """Maps neutron router identifiers to NSX identifiers."""
-    __tablename__ = 'neutron_nsx_router_mappings'
-    neutron_id = Column(String(36),
-                        ForeignKey('routers.id', ondelete='CASCADE'),
-                        primary_key=True)
-    nsx_id = Column(String(36))
-
-
-class MultiProviderNetworks(model_base.BASEV2):
-    """Networks provisioned through multiprovider extension."""
-
-    __tablename__ = 'multi_provider_networks'
-    network_id = Column(String(36),
-                        ForeignKey('networks.id', ondelete="CASCADE"),
-                        primary_key=True)
-
-    def __init__(self, network_id):
-        self.network_id = network_id
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/networkgw_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/networkgw_db.py
deleted file mode 100644
index a790893ccc..0000000000
--- a/vmware_nsx/neutron/plugins/vmware/dbexts/networkgw_db.py
+++ /dev/null
@@ -1,521 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import sqlalchemy as sa
-
-from sqlalchemy import orm
-from sqlalchemy.orm import exc as sa_orm_exc
-
-from neutron.api.v2 import attributes
-from neutron.common import exceptions
-from neutron.common import utils
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.openstack.common import log as logging
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.extensions import networkgw
-
-
-LOG = logging.getLogger(__name__)
-DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface'
-NETWORK_ID = 'network_id'
-SEGMENTATION_TYPE = 'segmentation_type'
-SEGMENTATION_ID = 'segmentation_id'
-ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID,
-                                     SEGMENTATION_TYPE,
-                                     SEGMENTATION_ID))
-# Constants for gateway device operational status
-STATUS_UNKNOWN = "UNKNOWN"
-STATUS_ERROR = "ERROR"
-STATUS_ACTIVE = "ACTIVE"
-STATUS_DOWN = "DOWN"
-
-
-class GatewayInUse(exceptions.InUse):
-    message = _("Network Gateway '%(gateway_id)s' still has active mappings "
-                "with one or more neutron networks.")
-
-
-class GatewayNotFound(exceptions.NotFound):
-    message = _("Network Gateway %(gateway_id)s could not be found")
-
-
-class GatewayDeviceInUse(exceptions.InUse):
-    message = _("Network Gateway Device '%(device_id)s' is still used by "
-                "one or more network gateways.")
-
-
-class GatewayDeviceNotFound(exceptions.NotFound):
-    message = _("Network Gateway Device %(device_id)s could not be found.")
-
-
-class GatewayDevicesNotFound(exceptions.NotFound):
-    message = _("One or more Network Gateway Devices could not be found: "
-                "%(device_ids)s.")
-
-
-class NetworkGatewayPortInUse(exceptions.InUse):
-    message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and "
-                "therefore cannot be deleted directly via the port API.")
-
-
-class GatewayConnectionInUse(exceptions.InUse):
-    message = _("The specified mapping '%(mapping)s' is already in use on "
-                "network gateway '%(gateway_id)s'.")
-
-
-class MultipleGatewayConnections(exceptions.Conflict):
-    message = _("Multiple network connections found on '%(gateway_id)s' "
-                "with provided criteria.")
-
-
-class GatewayConnectionNotFound(exceptions.NotFound):
-    message = _("The connection %(network_mapping_info)s was not found on the "
-                "network gateway '%(network_gateway_id)s'")
-
-
-class NetworkGatewayUnchangeable(exceptions.InUse):
-    message = _("The network gateway %(gateway_id)s "
-                "cannot be updated or deleted")
-
-
-class NetworkConnection(model_base.BASEV2, models_v2.HasTenant):
-    """Defines a connection between a network gateway and a network."""
-    # We use port_id as the primary key as one can connect a gateway
-    # to a network in multiple ways (and we cannot use the same port form
-    # more than a single gateway)
-    network_gateway_id = sa.Column(sa.String(36),
-                                   sa.ForeignKey('networkgateways.id',
-                                                 ondelete='CASCADE'))
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey('networks.id', ondelete='CASCADE'))
-    segmentation_type = sa.Column(
-        sa.Enum('flat', 'vlan',
-                name='networkconnections_segmentation_type'))
-    segmentation_id = sa.Column(sa.Integer)
-    __table_args__ = (sa.UniqueConstraint(network_gateway_id,
-                                          segmentation_type,
-                                          segmentation_id),)
-    # Also, storing port id comes back useful when disconnecting a network
-    # from a gateway
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey('ports.id', ondelete='CASCADE'),
-                        primary_key=True)
-
-
-class NetworkGatewayDeviceReference(model_base.BASEV2):
-    id = sa.Column(sa.String(36), primary_key=True)
-    network_gateway_id = sa.Column(sa.String(36),
-                                   sa.ForeignKey('networkgateways.id',
-                                                 ondelete='CASCADE'),
-                                   primary_key=True)
-    interface_name = sa.Column(sa.String(64), primary_key=True)
-
-
-class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId,
-                           models_v2.HasTenant):
-    nsx_id = sa.Column(sa.String(36))
-    # Optional name for the gateway device
-    name = sa.Column(sa.String(255))
-    # Transport connector type. Not using enum as range of
-    # connector types might vary with backend version
-    connector_type = sa.Column(sa.String(10))
-    # Transport connector IP Address
-    connector_ip = sa.Column(sa.String(64))
-    # operational status
-    status = sa.Column(sa.String(16))
-
-
-class NetworkGateway(model_base.BASEV2, models_v2.HasId,
-                     models_v2.HasTenant):
-    """Defines the data model for a network gateway."""
-    name = sa.Column(sa.String(255))
-    # Tenant id is nullable for this resource
-    tenant_id = sa.Column(sa.String(36))
-    default = sa.Column(sa.Boolean())
-    devices = orm.relationship(NetworkGatewayDeviceReference,
-                               backref='networkgateways',
-                               cascade='all,delete')
-    network_connections = orm.relationship(NetworkConnection, lazy='joined')
-
-
-class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
-
-    gateway_resource = networkgw.GATEWAY_RESOURCE_NAME
-    device_resource = networkgw.DEVICE_RESOURCE_NAME
-
-    def _get_network_gateway(self, context, gw_id):
-        try:
-            gw = self._get_by_id(context, NetworkGateway, gw_id)
-        except sa_orm_exc.NoResultFound:
-            raise GatewayNotFound(gateway_id=gw_id)
-        return gw
-
-    def _make_gw_connection_dict(self, gw_conn):
-        return {'port_id': gw_conn['port_id'],
-                'segmentation_type': gw_conn['segmentation_type'],
-                'segmentation_id': gw_conn['segmentation_id']}
-
-    def _make_network_gateway_dict(self, network_gateway, fields=None):
-        device_list = []
-        for d in network_gateway['devices']:
-            device_list.append({'id': d['id'],
-                                'interface_name': d['interface_name']})
-        res = {'id': network_gateway['id'],
-               'name': network_gateway['name'],
-               'default': network_gateway['default'],
-               'devices': device_list,
-               'tenant_id': network_gateway['tenant_id']}
-        # Query gateway connections only if needed
-        if not fields or 'ports' in fields:
-            res['ports'] = [self._make_gw_connection_dict(conn)
-                            for conn in network_gateway.network_connections]
-        return self._fields(res, fields)
-
-    def _set_mapping_info_defaults(self, mapping_info):
-        if not mapping_info.get('segmentation_type'):
-            mapping_info['segmentation_type'] = 'flat'
-        if not mapping_info.get('segmentation_id'):
-            mapping_info['segmentation_id'] = 0
-
-    def _validate_network_mapping_info(self, network_mapping_info):
-        self._set_mapping_info_defaults(network_mapping_info)
-        network_id = network_mapping_info.get(NETWORK_ID)
-        if not network_id:
-            raise exceptions.InvalidInput(
-                error_message=_("A network identifier must be specified "
-                                "when connecting a network to a network "
-                                "gateway. Unable to complete operation"))
-        connection_attrs = set(network_mapping_info.keys())
-        if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES):
-            raise exceptions.InvalidInput(
-                error_message=(_("Invalid keys found among the ones provided "
-                                 "in request body: %(connection_attrs)s."),
-                               connection_attrs))
-        seg_type = network_mapping_info.get(SEGMENTATION_TYPE)
-        seg_id = network_mapping_info.get(SEGMENTATION_ID)
-        # The NSX plugin accepts 0 as a valid vlan tag
-        seg_id_valid = seg_id == 0 or utils.is_valid_vlan_tag(seg_id)
-        if seg_type.lower() == 'flat' and seg_id:
-            msg = _("Cannot specify a segmentation id when "
-                    "the segmentation type is flat")
-            raise exceptions.InvalidInput(error_message=msg)
-        elif (seg_type.lower() == 'vlan' and not seg_id_valid):
-            msg = _("Invalid segmentation id (%d) for "
-                    "vlan segmentation type") % seg_id
-            raise exceptions.InvalidInput(error_message=msg)
-        return network_id
-
-    def _retrieve_gateway_connections(self, context, gateway_id,
-                                      mapping_info={}, only_one=False):
-        filters = {'network_gateway_id': [gateway_id]}
-        for k, v in mapping_info.iteritems():
-            if v and k != NETWORK_ID:
-                filters[k] = [v]
-        query = self._get_collection_query(context,
-                                           NetworkConnection,
-                                           filters)
-        return query.one() if only_one else query.all()
-
-    def _unset_default_network_gateways(self, context):
-        with context.session.begin(subtransactions=True):
-            context.session.query(NetworkGateway).update(
-                {NetworkGateway.default: False})
-
-    def _set_default_network_gateway(self, context, gw_id):
-        with context.session.begin(subtransactions=True):
-            gw = (context.session.query(NetworkGateway).
-                  filter_by(id=gw_id).one())
-            gw['default'] = True
-
-    def prevent_network_gateway_port_deletion(self, context, port):
-        """Pre-deletion check.
-
-        Ensures a port will not be deleted if is being used by a network
-        gateway. In that case an exception will be raised.
-        """
-        if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF:
-            raise NetworkGatewayPortInUse(port_id=port['id'],
-                                          device_owner=port['device_owner'])
-
-    def _validate_device_list(self, context, tenant_id, gateway_data):
-        device_query = self._query_gateway_devices(
-            context, filters={'id': [device['id']
-                                     for device in gateway_data['devices']]})
-        retrieved_device_ids = set()
-        for device in device_query:
-            retrieved_device_ids.add(device['id'])
-            if device['tenant_id'] != tenant_id:
-                raise GatewayDeviceNotFound(device_id=device['id'])
-        missing_device_ids = (
-            set(device['id'] for device in gateway_data['devices']) -
-            retrieved_device_ids)
-        if missing_device_ids:
-            raise GatewayDevicesNotFound(
-                device_ids=",".join(missing_device_ids))
-
-    def create_network_gateway(self, context, network_gateway,
-            validate_device_list=True):
-        gw_data = network_gateway[self.gateway_resource]
-        tenant_id = self._get_tenant_id_for_create(context, gw_data)
-        with context.session.begin(subtransactions=True):
-            gw_db = NetworkGateway(
-                id=gw_data.get('id', uuidutils.generate_uuid()),
-                tenant_id=tenant_id,
-                name=gw_data.get('name'))
-            # Device list is guaranteed to be a valid list, but some devices
-            # might still either not exist or belong to a different tenant
-            if validate_device_list:
-                self._validate_device_list(context, tenant_id, gw_data)
-            gw_db.devices.extend([NetworkGatewayDeviceReference(**device)
-                                  for device in gw_data['devices']])
-            context.session.add(gw_db)
-        LOG.debug("Created network gateway with id:%s", gw_db['id'])
-        return self._make_network_gateway_dict(gw_db)
-
-    def update_network_gateway(self, context, id, network_gateway):
-        gw_data = network_gateway[self.gateway_resource]
-        with context.session.begin(subtransactions=True):
-            gw_db = self._get_network_gateway(context, id)
-            if gw_db.default:
-                raise NetworkGatewayUnchangeable(gateway_id=id)
-            # Ensure there is something to update before doing it
-            if any([gw_db[k] != gw_data[k] for k in gw_data]):
-                gw_db.update(gw_data)
-        LOG.debug("Updated network gateway with id:%s", id)
-        return self._make_network_gateway_dict(gw_db)
-
-    def get_network_gateway(self, context, id, fields=None):
-        gw_db = self._get_network_gateway(context, id)
-        return self._make_network_gateway_dict(gw_db, fields)
-
-    def delete_network_gateway(self, context, id):
-        with context.session.begin(subtransactions=True):
-            gw_db = self._get_network_gateway(context, id)
-            if gw_db.network_connections:
-                raise GatewayInUse(gateway_id=id)
-            if gw_db.default:
-                raise NetworkGatewayUnchangeable(gateway_id=id)
-            context.session.delete(gw_db)
-        LOG.debug("Network gateway '%s' was destroyed.", id)
-
-    def get_network_gateways(self, context, filters=None, fields=None,
-                             sorts=None, limit=None, marker=None,
-                             page_reverse=False):
-        marker_obj = self._get_marker_obj(
-            context, 'network_gateway', limit, marker)
-        return self._get_collection(context, NetworkGateway,
-                                    self._make_network_gateway_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts, limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def connect_network(self, context, network_gateway_id,
-                        network_mapping_info):
-        network_id = self._validate_network_mapping_info(network_mapping_info)
-        LOG.debug("Connecting network '%(network_id)s' to gateway "
-                  "'%(network_gateway_id)s'",
-                  {'network_id': network_id,
-                   'network_gateway_id': network_gateway_id})
-        with context.session.begin(subtransactions=True):
-            gw_db = self._get_network_gateway(context, network_gateway_id)
-            tenant_id = self._get_tenant_id_for_create(context, gw_db)
-            # TODO(salvatore-orlando): Leverage unique constraint instead
-            # of performing another query!
-            if self._retrieve_gateway_connections(context,
-                                                  network_gateway_id,
-                                                  network_mapping_info):
-                raise GatewayConnectionInUse(mapping=network_mapping_info,
-                                             gateway_id=network_gateway_id)
-            # TODO(salvatore-orlando): Creating a port will give it an IP,
-            # but we actually do not need any. Instead of wasting an IP we
-            # should have a way to say a port shall not be associated with
-            # any subnet
-            try:
-                # We pass the segmentation type and id too - the plugin
-                # might find them useful as the network connection object
-                # does not exist yet.
-                # NOTE: they're not extended attributes, rather extra data
-                # passed in the port structure to the plugin
-                # TODO(salvatore-orlando): Verify optimal solution for
-                # ownership of the gateway port
-                port = self.create_port(context, {
-                    'port':
-                    {'tenant_id': tenant_id,
-                     'network_id': network_id,
-                     'mac_address': attributes.ATTR_NOT_SPECIFIED,
-                     'admin_state_up': True,
-                     'fixed_ips': [],
-                     'device_id': network_gateway_id,
-                     'device_owner': DEVICE_OWNER_NET_GW_INTF,
-                     'name': '',
-                     'gw:segmentation_type':
-                     network_mapping_info.get('segmentation_type'),
-                     'gw:segmentation_id':
-                     network_mapping_info.get('segmentation_id')}})
-            except exceptions.NetworkNotFound:
-                err_msg = (_("Requested network '%(network_id)s' not found."
-                             "Unable to create network connection on "
-                             "gateway '%(network_gateway_id)s") %
-                           {'network_id': network_id,
-                            'network_gateway_id': network_gateway_id})
-                LOG.error(err_msg)
-                raise exceptions.InvalidInput(error_message=err_msg)
-            port_id = port['id']
-            LOG.debug("Gateway port for '%(network_gateway_id)s' "
-                      "created on network '%(network_id)s':%(port_id)s",
-                      {'network_gateway_id': network_gateway_id,
-                       'network_id': network_id,
-                       'port_id': port_id})
-            # Create NetworkConnection record
-            network_mapping_info['port_id'] = port_id
-            network_mapping_info['tenant_id'] = tenant_id
-            gw_db.network_connections.append(
-                NetworkConnection(**network_mapping_info))
-            port_id = port['id']
-            # now deallocate and recycle ip from the port
-            for fixed_ip in port.get('fixed_ips', []):
-                self._delete_ip_allocation(context, network_id,
-                                           fixed_ip['subnet_id'],
-                                           fixed_ip['ip_address'])
-            LOG.debug("Ensured no Ip addresses are configured on port %s",
-                      port_id)
-            return {'connection_info':
-                    {'network_gateway_id': network_gateway_id,
-                     'network_id': network_id,
-                     'port_id': port_id}}
-
-    def disconnect_network(self, context, network_gateway_id,
-                           network_mapping_info):
-        network_id = self._validate_network_mapping_info(network_mapping_info)
-        LOG.debug("Disconnecting network '%(network_id)s' from gateway "
-                  "'%(network_gateway_id)s'",
-                  {'network_id': network_id,
-                   'network_gateway_id': network_gateway_id})
-        with context.session.begin(subtransactions=True):
-            # Uniquely identify connection, otherwise raise
-            try:
-                net_connection = self._retrieve_gateway_connections(
-                    context, network_gateway_id,
-                    network_mapping_info, only_one=True)
-            except sa_orm_exc.NoResultFound:
-                raise GatewayConnectionNotFound(
-                    network_mapping_info=network_mapping_info,
-                    network_gateway_id=network_gateway_id)
-            except sa_orm_exc.MultipleResultsFound:
-                raise MultipleGatewayConnections(
-                    gateway_id=network_gateway_id)
-            # Remove gateway port from network
-            # FIXME(salvatore-orlando): Ensure state of port in NSX is
-            # consistent with outcome of transaction
-            self.delete_port(context, net_connection['port_id'],
-                             nw_gw_port_check=False)
-            # Remove NetworkConnection record
-            context.session.delete(net_connection)
-
-    def _make_gateway_device_dict(self, gateway_device, fields=None,
-                                  include_nsx_id=False):
-        res = {'id': gateway_device['id'],
-               'name': gateway_device['name'],
-               'status': gateway_device['status'],
-               'connector_type': gateway_device['connector_type'],
-               'connector_ip': gateway_device['connector_ip'],
-               'tenant_id': gateway_device['tenant_id']}
-        if include_nsx_id:
-            # Return the NSX mapping as well. This attribute will not be
-            # returned in the API response anyway. Ensure it will not be
-            # filtered out in field selection.
-            if fields:
-                fields.append('nsx_id')
-            res['nsx_id'] = gateway_device['nsx_id']
-        return self._fields(res, fields)
-
-    def _get_gateway_device(self, context, device_id):
-        try:
-            return self._get_by_id(context, NetworkGatewayDevice, device_id)
-        except sa_orm_exc.NoResultFound:
-            raise GatewayDeviceNotFound(device_id=device_id)
-
-    def _is_device_in_use(self, context, device_id):
-        query = self._get_collection_query(
-            context, NetworkGatewayDeviceReference, {'id': [device_id]})
-        return query.first()
-
-    def get_gateway_device(self, context, device_id, fields=None,
-                           include_nsx_id=False):
-        return self._make_gateway_device_dict(
-            self._get_gateway_device(context, device_id),
-            fields, include_nsx_id)
-
-    def _query_gateway_devices(self, context,
-                               filters=None, sorts=None,
-                               limit=None, marker=None,
-                               page_reverse=None):
-        marker_obj = self._get_marker_obj(
-            context, 'gateway_device', limit, marker)
-        return self._get_collection_query(context,
-                                          NetworkGatewayDevice,
-                                          filters=filters,
-                                          sorts=sorts,
-                                          limit=limit,
-                                          marker_obj=marker_obj,
-                                          page_reverse=page_reverse)
-
-    def get_gateway_devices(self, context, filters=None, fields=None,
-                            sorts=None, limit=None, marker=None,
-                            page_reverse=False, include_nsx_id=False):
-        query = self._query_gateway_devices(context, filters, sorts, limit,
-                                            marker, page_reverse)
-        return [self._make_gateway_device_dict(row, fields, include_nsx_id)
-                for row in query]
-
-    def create_gateway_device(self, context, gateway_device,
-                              initial_status=STATUS_UNKNOWN):
-        device_data = gateway_device[self.device_resource]
-        tenant_id = self._get_tenant_id_for_create(context, device_data)
-        with context.session.begin(subtransactions=True):
-            device_db = NetworkGatewayDevice(
-                id=device_data.get('id', uuidutils.generate_uuid()),
-                tenant_id=tenant_id,
-                name=device_data.get('name'),
-                connector_type=device_data['connector_type'],
-                connector_ip=device_data['connector_ip'],
-                status=initial_status)
-            context.session.add(device_db)
-        LOG.debug("Created network gateway device: %s", device_db['id'])
-        return self._make_gateway_device_dict(device_db)
-
-    def update_gateway_device(self, context, gateway_device_id,
-                              gateway_device, include_nsx_id=False):
-        device_data = gateway_device[self.device_resource]
-        with context.session.begin(subtransactions=True):
-            device_db = self._get_gateway_device(context, gateway_device_id)
-            # Ensure there is something to update before doing it
-            if any([device_db[k] != device_data[k] for k in device_data]):
-                device_db.update(device_data)
-        LOG.debug("Updated network gateway device: %s",
-                  gateway_device_id)
-        return self._make_gateway_device_dict(
-            device_db, include_nsx_id=include_nsx_id)
-
-    def delete_gateway_device(self, context, device_id):
-        with context.session.begin(subtransactions=True):
-            # A gateway device should not be deleted
-            # if it is used in any network gateway service
-            if self._is_device_in_use(context, device_id):
-                raise GatewayDeviceInUse(device_id=device_id)
-            device_db = self._get_gateway_device(context, device_id)
-            context.session.delete(device_db)
-        LOG.debug("Deleted network gateway device: %s.", device_id)
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/nsxrouter.py b/vmware_nsx/neutron/plugins/vmware/dbexts/nsxrouter.py
new file mode 100644
index 0000000000..d9f8df5e2a
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/dbexts/nsxrouter.py
@@ -0,0 +1,66 @@
+# Copyright 2013 VMware, Inc.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+from neutron.db import db_base_plugin_v2
+from neutron.extensions import l3
+from neutron.openstack.common import log as logging
+from neutron.plugins.vmware.dbexts import nsxv_models
+
+LOG = logging.getLogger(__name__)
+
+
+class NsxRouterMixin(object):
+    """Mixin class to enable nsx router support."""
+
+    nsx_attributes = []
+
+    def _extend_nsx_router_dict(self, router_res, router_db):
+        nsx_attrs = router_db['nsx_attributes']
+        # Return False if nsx attributes are not definied for this
+        # neutron router
+        for attr in self.nsx_attributes:
+            name = attr['name']
+            default = attr['default']
+            router_res[name] = (
+                nsx_attrs and nsx_attrs[name] or default)
+
+    def _process_nsx_router_create(
+        self, context, router_db, router_req):
+        if not router_db['nsx_attributes']:
+            kwargs = {}
+            for attr in self.nsx_attributes:
+                name = attr['name']
+                default = attr['default']
+                kwargs[name] = router_req.get(name, default)
+            nsx_attributes = nsxv_models.NsxvRouterExtAttributes(
+                router_id=router_db['id'], **kwargs)
+            context.session.add(nsx_attributes)
+            router_db['nsx_attributes'] = nsx_attributes
+        else:
+            # The situation where the record already exists will
+            # be likely once the NSXRouterExtAttributes model
+            # will allow for defining several attributes pertaining
+            # to different extensions
+            for attr in self.nsx_attributes:
+                name = attr['name']
+                default = attr['default']
+                router_db['nsx_attributes'][name] = router_req.get(
+                    name, default)
+        LOG.debug("Nsx router extension successfully processed "
+                  "for router:%s", router_db['id'])
+
+    # Register dict extend functions for ports
+    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
+        l3.ROUTERS, ['_extend_nsx_router_dict'])
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/nsxv_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/nsxv_db.py
new file mode 100644
index 0000000000..0e73ca9318
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/dbexts/nsxv_db.py
@@ -0,0 +1,435 @@
+# Copyright 2013 VMware, Inc.
+#
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo.db import exception as db_exc
+from oslo.utils import excutils
+from sqlalchemy.orm import exc
+
+import neutron.db.api as db
+from neutron.i18n import _, _LE
+from neutron.openstack.common import log as logging
+from neutron.plugins.vmware.common import exceptions as nsx_exc
+from neutron.plugins.vmware.dbexts import nsxv_models
+from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
+from vmware_nsx.neutron.plugins.vmware.vshield.common import constants
+
+
+LOG = logging.getLogger(__name__)
+
+
+def add_nsxv_router_binding(session, router_id, vse_id, lswitch_id, status,
+                            appliance_size=nsxv_constants.LARGE,
+                            edge_type=nsxv_constants.SERVICE_EDGE):
+    with session.begin(subtransactions=True):
+        binding = nsxv_models.NsxvRouterBinding(
+            router_id=router_id,
+            edge_id=vse_id,
+            lswitch_id=lswitch_id,
+            status=status,
+            appliance_size=appliance_size,
+            edge_type=edge_type)
+        session.add(binding)
+        return binding
+
+
+def get_nsxv_router_binding(session, router_id):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvRouterBinding).
+                filter_by(router_id=router_id).first())
+
+
+def get_nsxv_router_binding_by_edge(session, edge_id):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvRouterBinding).
+                filter_by(edge_id=edge_id).first())
+
+
+def get_nsxv_router_bindings(session):
+    with session.begin(subtransactions=True):
+        return session.query(nsxv_models.NsxvRouterBinding).all()
+
+
+def update_nsxv_router_binding(session, router_id, **kwargs):
+    with session.begin(subtransactions=True):
+        binding = (session.query(nsxv_models.NsxvRouterBinding).
+                   filter_by(router_id=router_id).one())
+        for key, value in kwargs.iteritems():
+            binding[key] = value
+
+
+def delete_nsxv_router_binding(session, router_id):
+    with session.begin(subtransactions=True):
+        binding = (session.query(nsxv_models.NsxvRouterBinding).
+                   filter_by(router_id=router_id).one())
+        session.delete(binding)
+
+
+def get_edge_vnic_binding(session, edge_id, network_id):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvEdgeVnicBinding).
+                filter_by(edge_id=edge_id, network_id=network_id).first())
+
+
+def get_edge_vnic_bindings_by_edge(session, edge_id):
+    query = session.query(nsxv_models.NsxvEdgeVnicBinding)
+    query = query.filter(
+        nsxv_models.NsxvEdgeVnicBinding.edge_id == edge_id,
+        nsxv_models.NsxvEdgeVnicBinding.network_id is not None)
+    return query.all()
+
+
+def get_edge_vnic_bindings_by_int_lswitch(session, lswitch_id):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvEdgeVnicBinding).
+                filter_by(network_id=lswitch_id).all())
+
+
+def create_edge_vnic_binding(session, edge_id, vnic_index,
+                             network_id, tunnel_index=-1):
+    with session.begin(subtransactions=True):
+        binding = nsxv_models.NsxvEdgeVnicBinding(
+            edge_id=edge_id,
+            vnic_index=vnic_index,
+            tunnel_index=tunnel_index,
+            network_id=network_id)
+        session.add(binding)
+
+
+def delete_edge_vnic_binding_by_network(session, edge_id, network_id):
+    with session.begin(subtransactions=True):
+        binding = (session.query(nsxv_models.NsxvEdgeVnicBinding).
+                   filter_by(edge_id=edge_id, network_id=network_id).one())
+        session.delete(binding)
+
+
+def init_edge_vnic_binding(session, edge_id):
+    """Init edge vnic binding to preallocated 10 available edge vnics."""
+
+    with session.begin(subtransactions=True):
+        for vnic_index in range(constants.MAX_VNIC_NUM)[1:]:
+            start = (vnic_index - 1) * constants.MAX_TUNNEL_NUM
+            stop = vnic_index * constants.MAX_TUNNEL_NUM
+            for tunnel_index in range(start, stop):
+                binding = nsxv_models.NsxvEdgeVnicBinding(
+                    edge_id=edge_id,
+                    vnic_index=vnic_index,
+                    tunnel_index=tunnel_index + 1)
+                session.add(binding)
+
+
+def clean_edge_vnic_binding(session, edge_id):
+    """Clean edge vnic binding."""
+
+    with session.begin(subtransactions=True):
+        (session.query(nsxv_models.NsxvEdgeVnicBinding).
+         filter_by(edge_id=edge_id).delete())
+
+
+def allocate_edge_vnic(session, edge_id, network_id):
+    """Allocate an avaliable edge vnic to network."""
+
+    with session.begin(subtransactions=True):
+        bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding).
+                    filter_by(edge_id=edge_id, network_id=None).all())
+        for binding in bindings:
+            if binding['tunnel_index'] % constants.MAX_TUNNEL_NUM == 1:
+                binding['network_id'] = network_id
+                session.add(binding)
+                return binding
+    msg = (_("Failed to allocate one available vnic on edge_id: "
+             ":%(edge_id)s to network_id: %(network_id)s") %
+           {'edge_id': edge_id, 'network_id': network_id})
+    LOG.exception(msg)
+    raise nsx_exc.NsxPluginException(err_msg=msg)
+
+
+def allocate_edge_vnic_with_tunnel_index(session, edge_id, network_id):
+    """Allocate an avaliable edge vnic with tunnel index to network."""
+
+    with session.begin(subtransactions=True):
+        binding = (session.query(nsxv_models.NsxvEdgeVnicBinding).
+                   filter_by(edge_id=edge_id, network_id=None).first())
+        if not binding:
+            msg = (_("Failed to allocate one available vnic on edge_id: "
+                     ":%(edge_id)s to network_id: %(network_id)s") %
+                   {'edge_id': edge_id, 'network_id': network_id})
+            LOG.exception(msg)
+            raise nsx_exc.NsxPluginException(err_msg=msg)
+        binding['network_id'] = network_id
+        session.add(binding)
+    return binding
+
+
+def allocate_specific_edge_vnic(session, edge_id, vnic_index,
+                                tunnel_index, network_id):
+    """Allocate an specific edge vnic to network."""
+
+    with session.begin(subtransactions=True):
+        binding = (session.query(nsxv_models.NsxvEdgeVnicBinding).
+                   filter_by(edge_id=edge_id,
+                             vnic_index=vnic_index,
+                             tunnel_index=tunnel_index).one())
+        binding['network_id'] = network_id
+        session.add(binding)
+    return binding
+
+
+def get_dhcp_edge_network_binding(session, network_id):
+    with session.begin(subtransactions=True):
+        dhcp_router_edges = [binding['edge_id']
+                             for binding in get_nsxv_router_bindings(session)
+                             if binding['router_id'].startswith(
+                                 constants.DHCP_EDGE_PREFIX)]
+        bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding).
+                    filter_by(network_id=network_id))
+        for binding in bindings:
+            edge_id = binding['edge_id']
+            if edge_id in dhcp_router_edges:
+                return binding
+
+
+def free_edge_vnic_by_network(session, edge_id, network_id):
+    """Free an edge vnic."""
+
+    with session.begin(subtransactions=True):
+        binding = (session.query(nsxv_models.NsxvEdgeVnicBinding).
+                   filter_by(edge_id=edge_id, network_id=network_id).one())
+        binding['network_id'] = None
+        session.add(binding)
+    return binding
+
+
+def create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id):
+    with session.begin(subtransactions=True):
+        binding = nsxv_models.NsxvEdgeDhcpStaticBinding(
+            edge_id=edge_id,
+            mac_address=mac_address,
+            binding_id=binding_id)
+        session.add(binding)
+
+
+def get_edge_dhcp_static_binding(session, edge_id, mac_address):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).
+                filter_by(edge_id=edge_id, mac_address=mac_address).first())
+
+
+def delete_edge_dhcp_static_binding(session, edge_id, mac_address):
+    with session.begin(subtransactions=True):
+        session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by(
+            edge_id=edge_id, mac_address=mac_address).delete()
+
+
+def clean_edge_dhcp_static_bindings_by_edge(session, edge_id):
+    with session.begin(subtransactions=True):
+        session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by(
+            edge_id=edge_id).delete()
+
+
+def create_nsxv_internal_network(session, network_purpose, network_id):
+    with session.begin(subtransactions=True):
+        try:
+            network = nsxv_models.NsxvInternalNetworks(
+                network_purpose=network_purpose,
+                network_id=network_id)
+            session.add(network)
+        except db_exc.DBDuplicateEntry:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Duplicate internal network for purpose %s"),
+                              network_purpose)
+
+
+def get_nsxv_internal_network(session, network_purpose):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvInternalNetworks).
+                filter_by(network_purpose=network_purpose).all())
+
+
+def delete_nsxv_internal_network(session, network_purpose):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvInternalNetworks).
+                filter_by(network_purpose=network_purpose).delete())
+
+
+def create_nsxv_internal_edge(session, ext_ip_address, purpose, router_id):
+    with session.begin(subtransactions=True):
+        try:
+            internal_edge = nsxv_models.NsxvInternalEdges(
+                ext_ip_address=ext_ip_address,
+                purpose=purpose,
+                router_id=router_id)
+            session.add(internal_edge)
+        except db_exc.DBDuplicateEntry:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Duplicate internal Edge IP %s"),
+                              ext_ip_address)
+
+
+def get_nsxv_internal_edge(session, ext_ip_address):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvInternalEdges).
+                filter_by(ext_ip_address=ext_ip_address).all())
+
+
+def get_nsxv_internal_edges_by_purpose(session, purpose):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvInternalEdges).
+                filter_by(purpose=purpose).all())
+
+
+def delete_nsxv_internal_edge(session, ext_ip_address):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvInternalEdges).
+                filter_by(ext_ip_address=ext_ip_address).delete())
+
+
+def add_neutron_nsx_section_mapping(session, neutron_id, ip_section_id,
+                                    mac_section_id=None):
+    with session.begin(subtransactions=True):
+        mapping = nsxv_models.NsxvSectionMapping(
+            neutron_id=neutron_id, ip_section_id=ip_section_id,
+            mac_section_id=mac_section_id)
+        session.add(mapping)
+    return mapping
+
+
+def add_neutron_nsx_rule_mapping(session, neutron_id, nsx_rule_id):
+    with session.begin(subtransactions=True):
+        mapping = nsxv_models.NsxvRuleMapping(neutron_id=neutron_id,
+                                              nsx_rule_id=nsx_rule_id)
+        session.add(mapping)
+    return mapping
+
+
+def add_neutron_nsx_port_vnic_mapping(session, neutron_id, nsx_id):
+    with session.begin(subtransactions=True):
+        mapping = nsxv_models.NsxvPortVnicMapping(
+            neutron_id=neutron_id, nsx_id=nsx_id)
+        session.add(mapping)
+    return mapping
+
+
+def get_nsx_section(session, neutron_id):
+    try:
+        mapping = (session.query(nsxv_models.NsxvSectionMapping).
+                   filter_by(neutron_id=neutron_id).
+                   one())
+        return mapping
+    except exc.NoResultFound:
+        LOG.debug("NSX identifiers for neutron security group %s not yet "
+                  "stored in Neutron DB", neutron_id)
+
+
+def get_nsx_rule_id(session, neutron_id):
+    try:
+        mapping = (session.query(nsxv_models.NsxvRuleMapping).
+                   filter_by(neutron_id=neutron_id).
+                   one())
+        return mapping['nsx_rule_id']
+    except exc.NoResultFound:
+        LOG.debug("NSX identifiers for neutron rule %s not yet "
+                  "stored in Neutron DB", neutron_id)
+
+
+def get_nsx_vnic_id(session, neutron_id):
+    try:
+        mapping = (session.query(nsxv_models.NsxvPortVnicMapping).
+                   filter_by(neutron_id=neutron_id).
+                   one())
+        return mapping['nsx_id']
+    except exc.NoResultFound:
+        LOG.debug("NSX identifiers for neutron port %s not yet "
+                  "stored in Neutron DB", neutron_id)
+
+
+def get_network_bindings(session, network_id):
+    session = session or db.get_session()
+    return (session.query(nsxv_models.NsxvTzNetworkBinding).
+            filter_by(network_id=network_id).
+            all())
+
+
+def get_network_bindings_by_vlanid_and_physical_net(session, vlan_id,
+                                                    phy_uuid):
+    session = session or db.get_session()
+    return (session.query(nsxv_models.NsxvTzNetworkBinding).
+            filter_by(vlan_id=vlan_id, phy_uuid=phy_uuid).
+            all())
+
+
+def delete_network_bindings(session, network_id):
+    return (session.query(nsxv_models.NsxvTzNetworkBinding).
+            filter_by(network_id=network_id).delete())
+
+
+def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
+    with session.begin(subtransactions=True):
+        binding = nsxv_models.NsxvTzNetworkBinding(network_id, binding_type,
+                                                   phy_uuid, vlan_id)
+        session.add(binding)
+    return binding
+
+
+def get_network_bindings_by_vlanid(session, vlan_id):
+    session = session or db.get_session()
+    return (session.query(nsxv_models.NsxvTzNetworkBinding).
+            filter_by(vlan_id=vlan_id).
+            all())
+
+
+#
+# Edge Firewall binding methods
+#
+def add_nsxv_edge_firewallrule_binding(session, map_info):
+    with session.begin(subtransactions=True):
+        binding = nsxv_models.NsxvEdgeFirewallRuleBinding(
+            rule_id=map_info['rule_id'],
+            rule_vseid=map_info['rule_vseid'],
+            edge_id=map_info['edge_id'])
+        session.add(binding)
+    return binding
+
+
+def delete_nsxv_edge_firewallrule_binding(session, id):
+    with session.begin(subtransactions=True):
+        if not (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding).
+                filter_by(rule_id=id).delete()):
+            msg = _("Rule Resource binding with id:%s not found!") % id
+            raise nsx_exc.NsxPluginException(err_msg=msg)
+
+
+def get_nsxv_edge_firewallrule_binding(session, id, edge_id):
+    with session.begin(subtransactions=True):
+        return (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding).
+                filter_by(rule_id=id, edge_id=edge_id).first())
+
+
+def get_nsxv_edge_firewallrule_binding_by_vseid(
+        session, edge_id, rule_vseid):
+    with session.begin(subtransactions=True):
+        try:
+            return (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding).
+                    filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one())
+        except exc.NoResultFound:
+            msg = _("Rule Resource binding not found!")
+            raise nsx_exc.NsxPluginException(err_msg=msg)
+
+
+def cleanup_nsxv_edge_firewallrule_binding(session, edge_id):
+    with session.begin(subtransactions=True):
+        session.query(
+            nsxv_models.NsxvEdgeFirewallRuleBinding).filter_by(
+                edge_id=edge_id).delete()
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/qos_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/qos_db.py
deleted file mode 100644
index f5f56413f3..0000000000
--- a/vmware_nsx/neutron/plugins/vmware/dbexts/qos_db.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# Copyright 2013 VMware, Inc.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-from sqlalchemy import sql
-
-from neutron.api.v2 import attributes as attr
-from neutron.db import db_base_plugin_v2
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.i18n import _LI
-from neutron.openstack.common import log
-from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.extensions import qos
-
-
-LOG = log.getLogger(__name__)
-
-
-class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
-    name = sa.Column(sa.String(255))
-    default = sa.Column(sa.Boolean, default=False, server_default=sql.false())
-    min = sa.Column(sa.Integer, nullable=False)
-    max = sa.Column(sa.Integer, nullable=True)
-    qos_marking = sa.Column(sa.Enum('untrusted', 'trusted',
-                                    name='qosqueues_qos_marking'))
-    dscp = sa.Column(sa.Integer)
-
-
-class PortQueueMapping(model_base.BASEV2):
-    port_id = sa.Column(sa.String(36),
-                        sa.ForeignKey("ports.id", ondelete="CASCADE"),
-                        primary_key=True)
-
-    queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"),
-                         primary_key=True)
-
-    # Add a relationship to the Port model adding a backref which will
-    # allow SQLAlchemy for eagerly load the queue binding
-    port = orm.relationship(
-        models_v2.Port,
-        backref=orm.backref("qos_queue", uselist=False,
-                            cascade='delete', lazy='joined'))
-
-
-class NetworkQueueMapping(model_base.BASEV2):
-    network_id = sa.Column(sa.String(36),
-                           sa.ForeignKey("networks.id", ondelete="CASCADE"),
-                           primary_key=True)
-
-    queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id",
-                                                      ondelete="CASCADE"))
-
-    # Add a relationship to the Network model adding a backref which will
-    # allow SQLAlcremy for eagerly load the queue binding
-    network = orm.relationship(
-        models_v2.Network,
-        backref=orm.backref("qos_queue", uselist=False,
-                            cascade='delete', lazy='joined'))
-
-
-class QoSDbMixin(qos.QueuePluginBase):
-    """Mixin class to add queues."""
-
-    def create_qos_queue(self, context, qos_queue):
-        q = qos_queue['qos_queue']
-        with context.session.begin(subtransactions=True):
-            qos_queue = QoSQueue(id=q.get('id', uuidutils.generate_uuid()),
-                                 name=q.get('name'),
-                                 tenant_id=q['tenant_id'],
-                                 default=q.get('default'),
-                                 min=q.get('min'),
-                                 max=q.get('max'),
-                                 qos_marking=q.get('qos_marking'),
-                                 dscp=q.get('dscp'))
-            context.session.add(qos_queue)
-        return self._make_qos_queue_dict(qos_queue)
-
-    def get_qos_queue(self, context, queue_id, fields=None):
-        return self._make_qos_queue_dict(
-            self._get_qos_queue(context, queue_id), fields)
-
-    def _get_qos_queue(self, context, queue_id):
-        try:
-            return self._get_by_id(context, QoSQueue, queue_id)
-        except exc.NoResultFound:
-            raise qos.QueueNotFound(id=queue_id)
-
-    def get_qos_queues(self, context, filters=None, fields=None, sorts=None,
-                       limit=None, marker=None, page_reverse=False):
-        marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker)
-        return self._get_collection(context, QoSQueue,
-                                    self._make_qos_queue_dict,
-                                    filters=filters, fields=fields,
-                                    sorts=sorts, limit=limit,
-                                    marker_obj=marker_obj,
-                                    page_reverse=page_reverse)
-
-    def delete_qos_queue(self, context, queue_id):
-        qos_queue = self._get_qos_queue(context, queue_id)
-        with context.session.begin(subtransactions=True):
-            context.session.delete(qos_queue)
-
-    def _process_port_queue_mapping(self, context, port_data, queue_id):
-        port_data[qos.QUEUE] = queue_id
-        if not queue_id:
-            return
-        with context.session.begin(subtransactions=True):
-            context.session.add(PortQueueMapping(port_id=port_data['id'],
-                                queue_id=queue_id))
-
-    def _get_port_queue_bindings(self, context, filters=None, fields=None):
-        return self._get_collection(context, PortQueueMapping,
-                                    self._make_port_queue_binding_dict,
-                                    filters=filters, fields=fields)
-
-    def _delete_port_queue_mapping(self, context, port_id):
-        query = self._model_query(context, PortQueueMapping)
-        try:
-            binding = query.filter(PortQueueMapping.port_id == port_id).one()
-        except exc.NoResultFound:
-            # return since this can happen if we are updating a port that
-            # did not already have a queue on it. There is no need to check
-            # if there is one before deleting if we return here.
-            return
-        with context.session.begin(subtransactions=True):
-            context.session.delete(binding)
-
-    def _process_network_queue_mapping(self, context, net_data, queue_id):
-        net_data[qos.QUEUE] = queue_id
-        if not queue_id:
-            return
-        with context.session.begin(subtransactions=True):
-            context.session.add(
-                NetworkQueueMapping(network_id=net_data['id'],
-                                    queue_id=queue_id))
-
-    def _get_network_queue_bindings(self, context, filters=None, fields=None):
-        return self._get_collection(context, NetworkQueueMapping,
-                                    self._make_network_queue_binding_dict,
-                                    filters=filters, fields=fields)
-
-    def _delete_network_queue_mapping(self, context, network_id):
-        query = self._model_query(context, NetworkQueueMapping)
-        with context.session.begin(subtransactions=True):
-            binding = query.filter_by(network_id=network_id).first()
-            if binding:
-                context.session.delete(binding)
-
-    def _extend_dict_qos_queue(self, obj_res, obj_db):
-        queue_mapping = obj_db['qos_queue']
-        if queue_mapping:
-            obj_res[qos.QUEUE] = queue_mapping.get('queue_id')
-        return obj_res
-
-    def _extend_port_dict_qos_queue(self, port_res, port_db):
-        self._extend_dict_qos_queue(port_res, port_db)
-
-    def _extend_network_dict_qos_queue(self, network_res, network_db):
-        self._extend_dict_qos_queue(network_res, network_db)
-
-    # Register dict extend functions for networks and ports
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attr.NETWORKS, ['_extend_network_dict_qos_queue'])
-    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
-        attr.PORTS, ['_extend_port_dict_qos_queue'])
-
-    def _make_qos_queue_dict(self, queue, fields=None):
-        res = {'id': queue['id'],
-               'name': queue.get('name'),
-               'default': queue.get('default'),
-               'tenant_id': queue['tenant_id'],
-               'min': queue.get('min'),
-               'max': queue.get('max'),
-               'qos_marking': queue.get('qos_marking'),
-               'dscp': queue.get('dscp')}
-        return self._fields(res, fields)
-
-    def _make_port_queue_binding_dict(self, queue, fields=None):
-        res = {'port_id': queue['port_id'],
-               'queue_id': queue['queue_id']}
-        return self._fields(res, fields)
-
-    def _make_network_queue_binding_dict(self, queue, fields=None):
-        res = {'network_id': queue['network_id'],
-               'queue_id': queue['queue_id']}
-        return self._fields(res, fields)
-
-    def _check_for_queue_and_create(self, context, port):
-        """Check for queue and create.
-
-        This function determines if a port should be associated with a
-        queue. It works by first querying NetworkQueueMapping to determine
-        if the network is associated with a queue. If so, then it queries
-        NetworkQueueMapping for all the networks that are associated with
-        this queue. Next, it queries against all the ports on these networks
-        with the port device_id. Finally it queries PortQueueMapping. If that
-        query returns a queue_id that is returned. Otherwise a queue is
-        created that is the size of the queue associated with the network and
-        that queue_id is returned.
-
-        If the network is not associated with a queue we then query to see
-        if there is a default queue in the system. If so, a copy of that is
-        created and the queue_id is returned.
-
-        Otherwise None is returned. None is also returned if the port does not
-        have a device_id or if the device_owner is network:
-        """
-
-        queue_to_create = None
-        # If there is no device_id don't create a queue. The queue will be
-        # created on update port when the device_id is present. Also don't
-        # apply QoS to network ports.
-        if (not port.get('device_id') or
-            port['device_owner'].startswith('network:')):
-            return
-
-        # Check if there is a queue associated with the network
-        filters = {'network_id': [port['network_id']]}
-        network_queue_id = self._get_network_queue_bindings(
-            context, filters, ['queue_id'])
-        if network_queue_id:
-            # get networks that queue is associated with
-            filters = {'queue_id': [network_queue_id[0]['queue_id']]}
-            networks_with_same_queue = self._get_network_queue_bindings(
-                context, filters)
-
-            # get the ports on these networks with the same_queue and device_id
-            filters = {'device_id': [port.get('device_id')],
-                       'network_id': [network['network_id'] for
-                                      network in networks_with_same_queue]}
-            query = self._model_query(context, models_v2.Port.id)
-            query = self._apply_filters_to_query(query, models_v2.Port,
-                                                 filters)
-            ports_ids = [p[0] for p in query]
-            if ports_ids:
-                # shared queue already exists find the queue id
-                queues = self._get_port_queue_bindings(context,
-                                                       {'port_id': ports_ids},
-                                                       ['queue_id'])
-                if queues:
-                    return queues[0]['queue_id']
-
-            # get the size of the queue we want to create
-            queue_to_create = self._get_qos_queue(
-                context, network_queue_id[0]['queue_id'])
-
-        else:
-            # check for default queue
-            filters = {'default': [True]}
-            # context is elevated since default queue is owned by admin
-            queue_to_create = self.get_qos_queues(context.elevated(), filters)
-            if not queue_to_create:
-                return
-            queue_to_create = queue_to_create[0]
-
-        # create the queue
-        tenant_id = self._get_tenant_id_for_create(context, port)
-        if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'):
-            queue_to_create['max'] *= int(port[qos.RXTX_FACTOR])
-        queue = {'qos_queue': {'name': queue_to_create.get('name'),
-                               'min': queue_to_create.get('min'),
-                               'max': queue_to_create.get('max'),
-                               'dscp': queue_to_create.get('dscp'),
-                               'qos_marking':
-                               queue_to_create.get('qos_marking'),
-                               'tenant_id': tenant_id}}
-        return self.create_qos_queue(context, queue, False)['id']
-
-    def _validate_qos_queue(self, context, qos_queue):
-        if qos_queue.get('default'):
-            if context.is_admin:
-                if self.get_qos_queues(context, filters={'default': [True]}):
-                    raise qos.DefaultQueueAlreadyExists()
-            else:
-                raise qos.DefaultQueueCreateNotAdmin()
-        if qos_queue.get('qos_marking') == 'trusted':
-            dscp = qos_queue.pop('dscp')
-            if dscp:
-                # must raise because a non-zero dscp was provided
-                raise qos.QueueInvalidMarking()
-            LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
-                         "marking"), dscp)
-        max = qos_queue.get('max')
-        min = qos_queue.get('min')
-        # Max can be None
-        if max and min > max:
-            raise qos.QueueMinGreaterMax()
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/vcns_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/vcns_db.py
deleted file mode 100644
index 24b3e5b8ae..0000000000
--- a/vmware_nsx/neutron/plugins/vmware/dbexts/vcns_db.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from sqlalchemy.orm import exc
-
-from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.dbexts import vcns_models
-from neutron.plugins.vmware.vshield.common import (
-    exceptions as vcns_exc)
-
-LOG = logging.getLogger(__name__)
-
-
-def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status):
-    with session.begin(subtransactions=True):
-        binding = vcns_models.VcnsRouterBinding(
-            router_id=router_id,
-            edge_id=vse_id,
-            lswitch_id=lswitch_id,
-            status=status)
-        session.add(binding)
-        return binding
-
-
-def get_vcns_router_binding(session, router_id):
-    with session.begin(subtransactions=True):
-        return (session.query(vcns_models.VcnsRouterBinding).
-                filter_by(router_id=router_id).first())
-
-
-def update_vcns_router_binding(session, router_id, **kwargs):
-    with session.begin(subtransactions=True):
-        binding = (session.query(vcns_models.VcnsRouterBinding).
-                   filter_by(router_id=router_id).one())
-        for key, value in kwargs.iteritems():
-            binding[key] = value
-
-
-def delete_vcns_router_binding(session, router_id):
-    with session.begin(subtransactions=True):
-        binding = (session.query(vcns_models.VcnsRouterBinding).
-                   filter_by(router_id=router_id).one())
-        session.delete(binding)
-
-
-#
-# Edge Firewall binding methods
-#
-def add_vcns_edge_firewallrule_binding(session, map_info):
-    with session.begin(subtransactions=True):
-        binding = vcns_models.VcnsEdgeFirewallRuleBinding(
-            rule_id=map_info['rule_id'],
-            rule_vseid=map_info['rule_vseid'],
-            edge_id=map_info['edge_id'])
-        session.add(binding)
-        return binding
-
-
-def delete_vcns_edge_firewallrule_binding(session, id, edge_id):
-    with session.begin(subtransactions=True):
-        if not (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
-                filter_by(rule_id=id, edge_id=edge_id).delete()):
-            msg = _("Rule Resource binding with id:%s not found!") % id
-            raise nsx_exc.NsxPluginException(err_msg=msg)
-
-
-def get_vcns_edge_firewallrule_binding(session, id, edge_id):
-    with session.begin(subtransactions=True):
-        return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
-                filter_by(rule_id=id, edge_id=edge_id).first())
-
-
-def get_vcns_edge_firewallrule_binding_by_vseid(
-        session, edge_id, rule_vseid):
-    with session.begin(subtransactions=True):
-        try:
-            return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
-                    filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one())
-        except exc.NoResultFound:
-            msg = _("Rule Resource binding not found!")
-            raise nsx_exc.NsxPluginException(err_msg=msg)
-
-
-def cleanup_vcns_edge_firewallrule_binding(session, edge_id):
-    with session.begin(subtransactions=True):
-        session.query(
-            vcns_models.VcnsEdgeFirewallRuleBinding).filter_by(
-                edge_id=edge_id).delete()
-
-
-def add_vcns_edge_vip_binding(session, map_info):
-    with session.begin(subtransactions=True):
-        binding = vcns_models.VcnsEdgeVipBinding(
-            vip_id=map_info['vip_id'],
-            edge_id=map_info['edge_id'],
-            vip_vseid=map_info['vip_vseid'],
-            app_profileid=map_info['app_profileid'])
-        session.add(binding)
-
-    return binding
-
-
-def get_vcns_edge_vip_binding(session, id):
-    with session.begin(subtransactions=True):
-        try:
-            qry = session.query(vcns_models.VcnsEdgeVipBinding)
-            return qry.filter_by(vip_id=id).one()
-        except exc.NoResultFound:
-            msg = _("VIP Resource binding with id:%s not found!") % id
-            LOG.exception(msg)
-            raise vcns_exc.VcnsNotFound(
-                resource='router_service_binding', msg=msg)
-
-
-def delete_vcns_edge_vip_binding(session, id):
-    with session.begin(subtransactions=True):
-        qry = session.query(vcns_models.VcnsEdgeVipBinding)
-        if not qry.filter_by(vip_id=id).delete():
-            msg = _("VIP Resource binding with id:%s not found!") % id
-            LOG.exception(msg)
-            raise nsx_exc.NsxPluginException(err_msg=msg)
-
-
-def add_vcns_edge_pool_binding(session, map_info):
-    with session.begin(subtransactions=True):
-        binding = vcns_models.VcnsEdgePoolBinding(
-            pool_id=map_info['pool_id'],
-            edge_id=map_info['edge_id'],
-            pool_vseid=map_info['pool_vseid'])
-        session.add(binding)
-
-    return binding
-
-
-def get_vcns_edge_pool_binding(session, id, edge_id):
-    with session.begin(subtransactions=True):
-        return (session.query(vcns_models.VcnsEdgePoolBinding).
-                filter_by(pool_id=id, edge_id=edge_id).first())
-
-
-def get_vcns_edge_pool_binding_by_vseid(session, edge_id, pool_vseid):
-    with session.begin(subtransactions=True):
-        try:
-            qry = session.query(vcns_models.VcnsEdgePoolBinding)
-            binding = qry.filter_by(edge_id=edge_id,
-                                    pool_vseid=pool_vseid).one()
-        except exc.NoResultFound:
-            msg = (_("Pool Resource binding with edge_id:%(edge_id)s "
-                     "pool_vseid:%(pool_vseid)s not found!") %
-                   {'edge_id': edge_id, 'pool_vseid': pool_vseid})
-            LOG.exception(msg)
-            raise nsx_exc.NsxPluginException(err_msg=msg)
-        return binding
-
-
-def delete_vcns_edge_pool_binding(session, id, edge_id):
-    with session.begin(subtransactions=True):
-        qry = session.query(vcns_models.VcnsEdgePoolBinding)
-        if not qry.filter_by(pool_id=id, edge_id=edge_id).delete():
-            msg = _("Pool Resource binding with id:%s not found!") % id
-            LOG.exception(msg)
-            raise nsx_exc.NsxPluginException(err_msg=msg)
-
-
-def add_vcns_edge_monitor_binding(session, map_info):
-    with session.begin(subtransactions=True):
-        binding = vcns_models.VcnsEdgeMonitorBinding(
-            monitor_id=map_info['monitor_id'],
-            edge_id=map_info['edge_id'],
-            monitor_vseid=map_info['monitor_vseid'])
-        session.add(binding)
-
-    return binding
-
-
-def get_vcns_edge_monitor_binding(session, id, edge_id):
-    with session.begin(subtransactions=True):
-        return (session.query(vcns_models.VcnsEdgeMonitorBinding).
-                filter_by(monitor_id=id, edge_id=edge_id).first())
-
-
-def delete_vcns_edge_monitor_binding(session, id, edge_id):
-    with session.begin(subtransactions=True):
-        qry = session.query(vcns_models.VcnsEdgeMonitorBinding)
-        if not qry.filter_by(monitor_id=id, edge_id=edge_id).delete():
-            msg = _("Monitor Resource binding with id:%s not found!") % id
-            LOG.exception(msg)
-            raise nsx_exc.NsxPluginException(err_msg=msg)
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/vcns_models.py b/vmware_nsx/neutron/plugins/vmware/dbexts/vcns_models.py
deleted file mode 100644
index 41fe2c10b0..0000000000
--- a/vmware_nsx/neutron/plugins/vmware/dbexts/vcns_models.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright 2013 VMware, Inc.
-#
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-
-import sqlalchemy as sa
-
-from neutron.db import model_base
-from neutron.db import models_v2
-
-
-class VcnsRouterBinding(model_base.BASEV2, models_v2.HasStatusDescription):
-    """Represents the mapping between neutron router and vShield Edge."""
-
-    __tablename__ = 'vcns_router_bindings'
-
-    # no ForeignKey to routers.id because for now, a router can be removed
-    # from routers when delete_router is executed, but the binding is only
-    # removed after the Edge is deleted
-    router_id = sa.Column(sa.String(36),
-                          primary_key=True)
-    edge_id = sa.Column(sa.String(16),
-                        nullable=True)
-    lswitch_id = sa.Column(sa.String(36),
-                           nullable=False)
-
-
-#
-# VCNS Edge FW mapping tables
-#
-class VcnsEdgeFirewallRuleBinding(model_base.BASEV2):
-    """1:1 mapping between firewall rule and edge firewall rule_id."""
-
-    __tablename__ = 'vcns_firewall_rule_bindings'
-
-    rule_id = sa.Column(sa.String(36),
-                        # TODO(dougw) unbreak this link
-                        #sa.ForeignKey("firewall_rules.id"),
-                        primary_key=True)
-    edge_id = sa.Column(sa.String(36), primary_key=True)
-    rule_vseid = sa.Column(sa.String(36))
-
-
-class VcnsEdgePoolBinding(model_base.BASEV2):
-    """Represents the mapping between neutron pool and Edge pool."""
-
-    __tablename__ = 'vcns_edge_pool_bindings'
-
-    pool_id = sa.Column(sa.String(36),
-                        # TODO(dougw) unbreak this link
-                        #sa.ForeignKey("pools.id", ondelete="CASCADE"),
-                        primary_key=True)
-    edge_id = sa.Column(sa.String(36), primary_key=True)
-    pool_vseid = sa.Column(sa.String(36))
-
-
-class VcnsEdgeVipBinding(model_base.BASEV2):
-    """Represents the mapping between neutron vip and Edge vip."""
-
-    __tablename__ = 'vcns_edge_vip_bindings'
-
-    vip_id = sa.Column(sa.String(36),
-                       # TODO(dougw) unbreak this link
-                       #sa.ForeignKey("vips.id", ondelete="CASCADE"),
-                       primary_key=True)
-    edge_id = sa.Column(sa.String(36))
-    vip_vseid = sa.Column(sa.String(36))
-    app_profileid = sa.Column(sa.String(36))
-
-
-class VcnsEdgeMonitorBinding(model_base.BASEV2):
-    """Represents the mapping between neutron monitor and Edge monitor."""
-
-    __tablename__ = 'vcns_edge_monitor_bindings'
-
-    monitor_id = sa.Column(sa.String(36),
-                           # TODO(dougw) unbreak this link
-                           #sa.ForeignKey("healthmonitors.id",
-                           #              ondelete="CASCADE"),
-                           primary_key=True)
-    edge_id = sa.Column(sa.String(36), primary_key=True)
-    monitor_vseid = sa.Column(sa.String(36))
diff --git a/vmware_nsx/neutron/plugins/vmware/dbexts/vnic_index_db.py b/vmware_nsx/neutron/plugins/vmware/dbexts/vnic_index_db.py
new file mode 100644
index 0000000000..cf51fd66a7
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/dbexts/vnic_index_db.py
@@ -0,0 +1,61 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy.orm import exc
+
+from neutron.api.v2 import attributes as attr
+from neutron.db import db_base_plugin_v2
+from neutron.openstack.common import log as logging
+from neutron.plugins.vmware.dbexts import nsxv_models
+from vmware_nsx.neutron.plugins.vmware.extensions import vnic_index as vnicidx
+
+LOG = logging.getLogger(__name__)
+
+
+class VnicIndexDbMixin(object):
+
+    def _extend_port_vnic_index_binding(self, port_res, port_db):
+        state = port_db.vnic_index
+        port_res[vnicidx.VNIC_INDEX] = state.index if state else None
+
+    db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
+        attr.PORTS, ['_extend_port_vnic_index_binding'])
+
+    def _get_port_vnic_index(self, context, port_id):
+        """Returns the vnic index for the given port.
+        If the port is not associated with any vnic then return None
+        """
+        session = context.session
+        try:
+            mapping = (session.query(nsxv_models.NsxvPortIndexMapping).
+                       filter_by(port_id=port_id).one())
+            return mapping['index']
+        except exc.NoResultFound:
+            LOG.debug("No record in DB for vnic-index of port %s", port_id)
+
+    def _set_port_vnic_index_mapping(self, context, port_id, device_id, index):
+        """Save the port vnic-index to DB."""
+        session = context.session
+        with session.begin(subtransactions=True):
+            index_mapping_model = nsxv_models.NsxvPortIndexMapping(
+                port_id=port_id, device_id=device_id, index=index)
+            session.add(index_mapping_model)
+
+    def _delete_port_vnic_index_mapping(self, context, port_id):
+        """Delete the port vnic-index association."""
+        session = context.session
+        query = (session.query(nsxv_models.NsxvPortIndexMapping).
+                 filter_by(port_id=port_id))
+        query.delete()
diff --git a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/combined.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/combined.py
index 36ba563e81..f1b7032e06 100644
--- a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/combined.py
+++ b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/combined.py
@@ -18,8 +18,8 @@
 from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
 from neutron.common import constants as const
 from neutron.common import topics
-from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
-from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
 
 
 class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI):
diff --git a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py
index df6ffc3aac..ddcbd0931c 100644
--- a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py
+++ b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/lsnmanager.py
@@ -24,11 +24,11 @@ from neutron.i18n import _LE, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.common import nsx_utils
 from neutron.plugins.vmware.dbexts import lsn_db
-from neutron.plugins.vmware.dhcp_meta import constants as const
-from neutron.plugins.vmware.nsxlib import lsn as lsn_api
-from neutron.plugins.vmware.nsxlib import switch as switch_api
+from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import constants as const
+from vmware_nsx.neutron.plugins.vmware.nsxlib import lsn as lsn_api
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switch_api
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/migration.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/migration.py
index ea6fa7b15c..8fb8ff1090 100644
--- a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/migration.py
+++ b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/migration.py
@@ -21,8 +21,8 @@ from neutron.extensions import external_net
 from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.dhcp_meta import nsx
-from neutron.plugins.vmware.dhcp_meta import rpc
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import nsx
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/nsx.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/nsx.py
index 817a1e163d..89ea45563e 100644
--- a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/nsx.py
+++ b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/nsx.py
@@ -27,8 +27,8 @@ from neutron.extensions import external_net
 from neutron.i18n import _LE, _LI
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.common import exceptions as p_exc
-from neutron.plugins.vmware.dhcp_meta import constants as d_const
-from neutron.plugins.vmware.nsxlib import lsn as lsn_api
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import constants as d_const
+from vmware_nsx.neutron.plugins.vmware.nsxlib import lsn as lsn_api
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/rpc.py b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/rpc.py
index dec1a17c7d..1011055a2d 100644
--- a/vmware_nsx/neutron/plugins/vmware/dhcp_meta/rpc.py
+++ b/vmware_nsx/neutron/plugins/vmware/dhcp_meta/rpc.py
@@ -28,8 +28,8 @@ from neutron.db import models_v2
 from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log as logging
 from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import config
 from neutron.plugins.vmware.common import exceptions as nsx_exc
+from vmware_nsx.neutron.plugins.vmware.common import config
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/dhcpmeta_modes.py b/vmware_nsx/neutron/plugins/vmware/dhcpmeta_modes.py
index 78820235b8..da00fd4af2 100644
--- a/vmware_nsx/neutron/plugins/vmware/dhcpmeta_modes.py
+++ b/vmware_nsx/neutron/plugins/vmware/dhcpmeta_modes.py
@@ -27,14 +27,14 @@ from neutron.common import topics
 from neutron.db import agents_db
 from neutron.i18n import _LW
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import config
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.dhcp_meta import combined
-from neutron.plugins.vmware.dhcp_meta import lsnmanager
-from neutron.plugins.vmware.dhcp_meta import migration
-from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
-from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
-from neutron.plugins.vmware.extensions import lsn
+from vmware_nsx.neutron.plugins.vmware.common import config
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import combined
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import lsnmanager
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import migration
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
+from vmware_nsx.neutron.plugins.vmware.extensions import lsn
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/extensions/distributedrouter.py b/vmware_nsx/neutron/plugins/vmware/extensions/distributedrouter.py
new file mode 100644
index 0000000000..aa6949b826
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/extensions/distributedrouter.py
@@ -0,0 +1,70 @@
+# Copyright 2013 VMware, Inc.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from neutron.api.v2 import attributes
+
+
+def convert_to_boolean_if_not_none(data):
+    if data is not None:
+        return attributes.convert_to_boolean(data)
+    return data
+
+
+DISTRIBUTED = 'distributed'
+EXTENDED_ATTRIBUTES_2_0 = {
+    'routers': {
+        DISTRIBUTED: {'allow_post': True, 'allow_put': False,
+                      'convert_to': convert_to_boolean_if_not_none,
+                      'default': attributes.ATTR_NOT_SPECIFIED,
+                      'is_visible': True},
+    }
+}
+
+
+class Distributedrouter(object):
+    """Extension class supporting distributed router."""
+
+    @classmethod
+    def get_name(cls):
+        return "Distributed Router"
+
+    @classmethod
+    def get_alias(cls):
+        return "dist-router"
+
+    @classmethod
+    def get_description(cls):
+        return "Enables configuration of NSX Distributed routers."
+
+    @classmethod
+    def get_namespace(cls):
+        return "http://docs.openstack.org/ext/dist-router/api/v1.0"
+
+    @classmethod
+    def get_updated(cls):
+        return "2013-08-1T10:00:00-00:00"
+
+    def get_required_extensions(self):
+        return ["router"]
+
+    @classmethod
+    def get_resources(cls):
+        """Returns Ext Resources."""
+        return []
+
+    def get_extended_resources(self, version):
+        if version == "2.0":
+            return EXTENDED_ATTRIBUTES_2_0
+        else:
+            return {}
diff --git a/vmware_nsx/neutron/plugins/vmware/extensions/metadata_providers.py b/vmware_nsx/neutron/plugins/vmware/extensions/metadata_providers.py
new file mode 100644
index 0000000000..e3a76b3c5f
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/extensions/metadata_providers.py
@@ -0,0 +1,56 @@
+# Copyright 2014 VMware, Inc.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+# Attribute Map
+METADATA_PROVIDERS = 'metadata_providers'
+
+
+EXTENDED_ATTRIBUTES_2_0 = {
+    'subnets': {
+        METADATA_PROVIDERS:
+        {'allow_post': False,
+         'allow_put': False,
+         'is_visible': True,
+         'default': None}}}
+
+
+class Metadata_providers(object):
+    @classmethod
+    def get_name(cls):
+        return "Metadata Providers"
+
+    @classmethod
+    def get_alias(cls):
+        return "metadata-providers"
+
+    @classmethod
+    def get_description(cls):
+        return ("Id of the metadata providers attached to the subnet")
+
+    @classmethod
+    def get_namespace(cls):
+        return(
+            "http://docs.openstack.org/ext/neutron/metadata_providers/api/v1.0"
+        )
+
+    @classmethod
+    def get_updated(cls):
+        return "2014-12-11T12:00:00-00:00"
+
+    def get_extended_resources(self, version):
+        if version == "2.0":
+            return EXTENDED_ATTRIBUTES_2_0
+        else:
+            return {}
diff --git a/vmware_nsx/neutron/plugins/vmware/extensions/networkgw.py b/vmware_nsx/neutron/plugins/vmware/extensions/networkgw.py
index 28df908907..8045d27a2c 100644
--- a/vmware_nsx/neutron/plugins/vmware/extensions/networkgw.py
+++ b/vmware_nsx/neutron/plugins/vmware/extensions/networkgw.py
@@ -19,7 +19,7 @@ from oslo.config import cfg
 
 from neutron.api.v2 import attributes
 from neutron.api.v2 import resource_helper
-from neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware.common import utils
 
 GATEWAY_RESOURCE_NAME = "network_gateway"
 DEVICE_RESOURCE_NAME = "gateway_device"
diff --git a/vmware_nsx/neutron/plugins/vmware/extensions/nvp_qos.py b/vmware_nsx/neutron/plugins/vmware/extensions/nvp_qos.py
index 470f267b5a..450f19f0d4 100644
--- a/vmware_nsx/neutron/plugins/vmware/extensions/nvp_qos.py
+++ b/vmware_nsx/neutron/plugins/vmware/extensions/nvp_qos.py
@@ -17,7 +17,7 @@
 # TODO(arosen): This is deprecated in Juno, and
 # to be removed in Kxxxx.
 
-from neutron.plugins.vmware.extensions import qos
+from vmware_nsx.neutron.plugins.vmware.extensions import qos
 
 
 class Nvp_qos(qos.Qos):
diff --git a/vmware_nsx/neutron/plugins/vmware/extensions/vnic_index.py b/vmware_nsx/neutron/plugins/vmware/extensions/vnic_index.py
new file mode 100644
index 0000000000..a1a9ae4868
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/extensions/vnic_index.py
@@ -0,0 +1,61 @@
+# Copyright 2013 VMware, Inc.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from neutron.api.v2 import attributes
+
+# Attribute Map
+VNIC_INDEX = 'vnic_index'
+
+
+def convert_to_int_if_not_none(data):
+    if data is not None:
+        return attributes.convert_to_int(data)
+    return data
+
+EXTENDED_ATTRIBUTES_2_0 = {
+    'ports': {
+        VNIC_INDEX:
+        {'allow_post': True,
+         'allow_put': True,
+         'is_visible': True,
+         'default': None,
+         'convert_to': convert_to_int_if_not_none}}}
+
+
+class Vnic_index(object):
+    @classmethod
+    def get_name(cls):
+        return "VNIC Index"
+
+    @classmethod
+    def get_alias(cls):
+        return "vnic-index"
+
+    @classmethod
+    def get_description(cls):
+        return ("Enable a port to be associated with a VNIC index")
+
+    @classmethod
+    def get_namespace(cls):
+        return "http://docs.openstack.org/ext/neutron/vnic_index/api/v1.0"
+
+    @classmethod
+    def get_updated(cls):
+        return "2014-09-15T12:00:00-00:00"
+
+    def get_extended_resources(self, version):
+        if version == "2.0":
+            return EXTENDED_ATTRIBUTES_2_0
+        else:
+            return {}
diff --git a/vmware_nsx/neutron/plugins/vmware/nsxlib/l2gateway.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/l2gateway.py
index 170ea35538..3b1a9a1c1b 100644
--- a/vmware_nsx/neutron/plugins/vmware/nsxlib/l2gateway.py
+++ b/vmware_nsx/neutron/plugins/vmware/nsxlib/l2gateway.py
@@ -19,9 +19,9 @@ from oslo.serialization import jsonutils
 from neutron.openstack.common import log
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import switch
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch
 
 HTTP_GET = "GET"
 HTTP_POST = "POST"
diff --git a/vmware_nsx/neutron/plugins/vmware/nsxlib/lsn.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/lsn.py
index fda319a698..9ea84942dc 100644
--- a/vmware_nsx/neutron/plugins/vmware/nsxlib/lsn.py
+++ b/vmware_nsx/neutron/plugins/vmware/nsxlib/lsn.py
@@ -19,8 +19,8 @@ from neutron.common import exceptions as exception
 from neutron.openstack.common import log
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
 
 HTTP_GET = "GET"
 HTTP_POST = "POST"
diff --git a/vmware_nsx/neutron/plugins/vmware/nsxlib/queue.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/queue.py
index ac2f4df4d6..22e5a8d50e 100644
--- a/vmware_nsx/neutron/plugins/vmware/nsxlib/queue.py
+++ b/vmware_nsx/neutron/plugins/vmware/nsxlib/queue.py
@@ -20,8 +20,8 @@ from neutron.api.v2 import attributes as attr
 from neutron.common import exceptions as exception
 from neutron.openstack.common import log
 from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
 
 HTTP_POST = "POST"
 HTTP_DELETE = "DELETE"
diff --git a/vmware_nsx/neutron/plugins/vmware/nsxlib/router.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/router.py
index 1b26dfc367..be9d57a1d9 100644
--- a/vmware_nsx/neutron/plugins/vmware/nsxlib/router.py
+++ b/vmware_nsx/neutron/plugins/vmware/nsxlib/router.py
@@ -22,10 +22,10 @@ from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import switch
-from neutron.plugins.vmware.nsxlib import versioning
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch
+from vmware_nsx.neutron.plugins.vmware.nsxlib import versioning
 
 # @versioning.versioned decorator makes the apparent function body
 # totally unrelated to the real function.  This confuses pylint :(
@@ -630,7 +630,7 @@ def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
         raise nsx_exc.NsxPluginException(err_msg=msg)
     except api_exc.NsxApiException as e:
         msg = _("An exception occurred while updating IP addresses on a "
-                "router logical port:%s") % str(e)
+                "router logical port:%s") % e
         LOG.exception(msg)
         raise nsx_exc.NsxPluginException(err_msg=msg)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/nsxlib/secgroup.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/secgroup.py
index 5c0dd5c92d..d6dcd80553 100644
--- a/vmware_nsx/neutron/plugins/vmware/nsxlib/secgroup.py
+++ b/vmware_nsx/neutron/plugins/vmware/nsxlib/secgroup.py
@@ -20,8 +20,8 @@ from neutron.common import constants
 from neutron.common import exceptions
 from neutron.i18n import _LW
 from neutron.openstack.common import log
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
 
 HTTP_GET = "GET"
 HTTP_POST = "POST"
diff --git a/vmware_nsx/neutron/plugins/vmware/nsxlib/switch.py b/vmware_nsx/neutron/plugins/vmware/nsxlib/switch.py
index 2a0e692456..73a26d0515 100644
--- a/vmware_nsx/neutron/plugins/vmware/nsxlib/switch.py
+++ b/vmware_nsx/neutron/plugins/vmware/nsxlib/switch.py
@@ -23,8 +23,8 @@ from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
 
 HTTP_GET = "GET"
 HTTP_POST = "POST"
@@ -185,8 +185,8 @@ def delete_port(cluster, switch, port):
     uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
     try:
         nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
-    except exception.NotFound:
-        LOG.exception(_LE("Port or Network not found"))
+    except exception.NotFound as e:
+        LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
         raise exception.PortNotFoundOnNetwork(
             net_id=switch, port_id=port)
     except api_exc.NsxApiException:
diff --git a/vmware_nsx/neutron/plugins/vmware/plugin.py b/vmware_nsx/neutron/plugins/vmware/plugin.py
index abe346876c..6526995bc3 100644
--- a/vmware_nsx/neutron/plugins/vmware/plugin.py
+++ b/vmware_nsx/neutron/plugins/vmware/plugin.py
@@ -15,6 +15,8 @@
 #    under the License.
 #
 
-from neutron.plugins.vmware.plugins import base
+from vmware_nsx.neutron.plugins.vmware.plugins import base
+from vmware_nsx.neutron.plugins.vmware.plugins import nsx_v
 
 NsxPlugin = base.NsxPluginV2
+NsxVPlugin = nsx_v.NsxVPluginV2
diff --git a/vmware_nsx/neutron/plugins/vmware/plugins/base.py b/vmware_nsx/neutron/plugins/vmware/plugins/base.py
index e6387fe584..e4c67b00f6 100644
--- a/vmware_nsx/neutron/plugins/vmware/plugins/base.py
+++ b/vmware_nsx/neutron/plugins/vmware/plugins/base.py
@@ -55,27 +55,27 @@ from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import lockutils
 from neutron.openstack.common import log as logging
 from neutron.plugins.common import constants as plugin_const
-from neutron.plugins import vmware
 from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.common import config  # noqa
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware.common import securitygroups as sg_utils
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware.common import utils as c_utils
-from neutron.plugins.vmware.dbexts import db as nsx_db
 from neutron.plugins.vmware.dbexts import maclearning as mac_db
 from neutron.plugins.vmware.dbexts import networkgw_db
 from neutron.plugins.vmware.dbexts import qos_db
-from neutron.plugins.vmware import dhcpmeta_modes
 from neutron.plugins.vmware.extensions import maclearning as mac_ext
-from neutron.plugins.vmware.extensions import networkgw
-from neutron.plugins.vmware.extensions import qos
-from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
-from neutron.plugins.vmware.nsxlib import queue as queuelib
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
+from vmware_nsx.neutron.plugins import vmware
+from vmware_nsx.neutron.plugins.vmware.common import config  # noqa
+from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
+from vmware_nsx.neutron.plugins.vmware.common import securitygroups as sg_utils
+from vmware_nsx.neutron.plugins.vmware.common import sync
+from vmware_nsx.neutron.plugins.vmware.common import utils as c_utils
+from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
+from vmware_nsx.neutron.plugins.vmware import dhcpmeta_modes
+from vmware_nsx.neutron.plugins.vmware.extensions import networkgw
+from vmware_nsx.neutron.plugins.vmware.extensions import qos
+from vmware_nsx.neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import queue as queuelib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
 
 LOG = logging.getLogger(__name__)
 
diff --git a/vmware_nsx/neutron/plugins/vmware/plugins/nsx_v.py b/vmware_nsx/neutron/plugins/vmware/plugins/nsx_v.py
new file mode 100644
index 0000000000..adbdb9368e
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/plugins/nsx_v.py
@@ -0,0 +1,1855 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import uuid
+
+import netaddr
+from oslo.config import cfg
+from oslo.utils import excutils
+from sqlalchemy.orm import exc as sa_exc
+
+from neutron.api import extensions as neutron_extensions
+from neutron.api.v2 import attributes as attr
+from neutron.common import constants
+from neutron.common import exceptions as n_exc
+from neutron.common import utils
+from neutron import context as neutron_context
+from neutron.db import allowedaddresspairs_db as addr_pair_db
+from neutron.db import db_base_plugin_v2
+from neutron.db import external_net_db
+from neutron.db import extraroute_db
+from neutron.db import l3_db
+from neutron.db import l3_gwmode_db
+from neutron.db import models_v2
+from neutron.db import portbindings_db
+from neutron.db import portsecurity_db
+from neutron.db import quota_db  # noqa
+from neutron.db import securitygroups_db
+from neutron.extensions import allowedaddresspairs as addr_pair
+from neutron.extensions import external_net as ext_net_extn
+from neutron.extensions import l3
+from neutron.extensions import multiprovidernet as mpnet
+from neutron.extensions import portbindings as pbin
+from neutron.extensions import portsecurity as psec
+from neutron.extensions import providernet as pnet
+from neutron.extensions import securitygroup as ext_sg
+from neutron.i18n import _LE, _LW
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import uuidutils
+from neutron.plugins.vmware.common import exceptions as nsx_exc
+from neutron.plugins.vmware.dbexts import networkgw_db
+from vmware_nsx.neutron.plugins import vmware
+from vmware_nsx.neutron.plugins.vmware.common import config  # noqa
+from vmware_nsx.neutron.plugins.vmware.common import utils as c_utils
+from vmware_nsx.neutron.plugins.vmware.dbexts import (
+    distributedrouter as dist_rtr)
+from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
+from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
+from vmware_nsx.neutron.plugins.vmware.dbexts import vnic_index_db
+from vmware_nsx.neutron.plugins.vmware.extensions import (
+    metadata_providers as subnet_md)
+from vmware_nsx.neutron.plugins.vmware.extensions import (
+    vnic_index as ext_vnic_idx)
+from vmware_nsx.neutron.plugins.vmware.plugins import nsx_v_md_proxy
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
+    constants as vcns_const)
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
+    exceptions as vsh_exc)
+from vmware_nsx.neutron.plugins.vmware.vshield import edge_utils
+from vmware_nsx.neutron.plugins.vmware.vshield import securitygroup_utils
+from vmware_nsx.neutron.plugins.vmware.vshield import vcns_driver
+
+LOG = logging.getLogger(__name__)
+PORTGROUP_PREFIX = 'dvportgroup'
+
+
+class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
+                   db_base_plugin_v2.NeutronDbPluginV2,
+                   dist_rtr.DistributedRouter_mixin,
+                   external_net_db.External_net_db_mixin,
+                   extraroute_db.ExtraRoute_db_mixin,
+                   l3_gwmode_db.L3_NAT_db_mixin,
+                   networkgw_db.NetworkGatewayMixin,
+                   portbindings_db.PortBindingMixin,
+                   portsecurity_db.PortSecurityDbMixin,
+                   securitygroups_db.SecurityGroupDbMixin,
+                   vnic_index_db.VnicIndexDbMixin):
+
+    supported_extension_aliases = ["allowed-address-pairs",
+                                   "binding",
+                                   "dist-router",
+                                   "multi-provider",
+                                   "port-security",
+                                   "provider",
+                                   "quotas",
+                                   "external-net",
+                                   "extraroute",
+                                   "router",
+                                   "security-group",
+                                   "vnic-index",
+                                   "metadata-providers"]
+
+    __native_bulk_support = True
+    __native_pagination_support = True
+    __native_sorting_support = True
+
+    def __init__(self):
+        super(NsxVPluginV2, self).__init__()
+        config.validate_nsxv_config_options()
+        neutron_extensions.append_api_extensions_path([vmware.NSX_EXT_PATH])
+
+        self.base_binding_dict = {
+            pbin.VNIC_TYPE: pbin.VNIC_NORMAL,
+            pbin.VIF_TYPE: pbin.VIF_TYPE_DVS,
+            pbin.VIF_DETAILS: {
+                # TODO(rkukura): Replace with new VIF security details
+                pbin.CAP_PORT_FILTER:
+                'security-group' in self.supported_extension_aliases}}
+        # Create the client to interface with the NSX-v
+        _nsx_v_callbacks = edge_utils.NsxVCallbacks(self)
+        self.nsx_v = vcns_driver.VcnsDriver(_nsx_v_callbacks)
+        self.edge_manager = edge_utils.EdgeManager(self.nsx_v)
+        self.vdn_scope_id = cfg.CONF.nsxv.vdn_scope_id
+        self.dvs_id = cfg.CONF.nsxv.dvs_id
+        self.nsx_sg_utils = securitygroup_utils.NsxSecurityGroupUtils(
+            self.nsx_v)
+        self._validate_config()
+        self._create_cluster_default_fw_rules()
+
+        has_metadata_cfg = (cfg.CONF.nsxv.nova_metadata_ips is not None
+                            and cfg.CONF.nsxv.mgt_net_moid is not None
+                            and cfg.CONF.nsxv.mgt_net_proxy_ips is not None)
+        self.metadata_proxy_handler = (
+            nsx_v_md_proxy.NsxVMetadataProxyHandler(self)
+            if has_metadata_cfg else None)
+
+    def _create_cluster_default_fw_rules(self):
+        # default cluster rules
+        rules = [{'name': 'Default DHCP rule for OS Security Groups',
+                  'action': 'allow',
+                  'services': [('17', '67', None, None),
+                               ('17', '68', None, None)]},
+                 {'name': 'ICMPv6 neighbor protocol for Security Groups',
+                  'action': 'allow',
+                  'services': [('58', None, '135', None),
+                               ('58', None, '136', None)]},
+                 {'name': 'Block All',
+                  'action': 'deny',
+                  'services': []}]
+
+        rule_list = []
+        for cluster_moid in cfg.CONF.nsxv.cluster_moid:
+            for rule in rules:
+                rule_config = self.nsx_sg_utils.get_rule_config(
+                    cluster_moid, rule['name'], rule['action'],
+                    'ClusterComputeResource', services=rule['services'])
+                rule_list.append(rule_config)
+
+        if rule_list:
+            section_name = 'OS Cluster Security Group section'
+            section_id = self.nsx_v.vcns.get_section_id(section_name)
+            section = self.nsx_sg_utils.get_section_with_rules(
+                section_name, rule_list)
+            if section_id:
+                section.attrib['id'] = section_id
+                self.nsx_v.vcns.update_section_by_id(
+                    section_id, 'ip', self.nsx_sg_utils.to_xml_string(section))
+            else:
+                try:
+                    self.nsx_v.vcns.create_section(
+                        'ip', self.nsx_sg_utils.to_xml_string(section))
+                except vsh_exc.RequestBad as e:
+                    # Section already exists, log-it and return
+                    LOG.debug("Could not create NSX fw section for cluster"
+                              " %s: %s", cluster_moid, e.response)
+
+    def _create_dhcp_static_binding(self, context, neutron_port_db):
+
+        network_id = neutron_port_db['network_id']
+        device_owner = neutron_port_db['device_owner']
+        if device_owner.startswith("compute"):
+            s_bindings = self._create_static_binding(context,
+                                                     neutron_port_db)
+            edge_utils.create_dhcp_bindings(context, self.nsx_v,
+                                            network_id, s_bindings)
+
+    def _delete_dhcp_static_binding(self, context, neutron_port_db):
+
+        network_id = neutron_port_db['network_id']
+        device_owner = neutron_port_db['device_owner']
+        if device_owner.startswith("compute"):
+            edge_utils.delete_dhcp_binding(context, self.nsx_v, network_id,
+                                           neutron_port_db['mac_address'])
+
+    def _validate_provider_create(self, context, network):
+        if not attr.is_attr_set(network.get(mpnet.SEGMENTS)):
+            return
+
+        external = network.get(ext_net_extn.EXTERNAL)
+        for segment in network[mpnet.SEGMENTS]:
+            network_type = segment.get(pnet.NETWORK_TYPE)
+            physical_network = segment.get(pnet.PHYSICAL_NETWORK)
+            segmentation_id = segment.get(pnet.SEGMENTATION_ID)
+            network_type_set = attr.is_attr_set(network_type)
+            segmentation_id_set = attr.is_attr_set(segmentation_id)
+            physical_network_set = attr.is_attr_set(physical_network)
+
+            err_msg = None
+            if not network_type_set:
+                err_msg = _("%s required") % pnet.NETWORK_TYPE
+            elif (attr.is_attr_set(external) and external and
+                  network_type != c_utils.NsxVNetworkTypes.PORTGROUP):
+                    err_msg = _("portgroup only supported on external "
+                                "networks")
+            elif network_type == c_utils.NsxVNetworkTypes.FLAT:
+                if segmentation_id_set:
+                    err_msg = _("Segmentation ID cannot be specified with "
+                                "flat network type")
+            elif network_type == c_utils.NsxVNetworkTypes.VLAN:
+                if not segmentation_id_set:
+                    err_msg = _("Segmentation ID must be specified with "
+                                "vlan network type")
+                elif (segmentation_id_set and
+                      not utils.is_valid_vlan_tag(segmentation_id)):
+                    err_msg = (_("%(segmentation_id)s out of range "
+                                 "(%(min_id)s through %(max_id)s)") %
+                               {'segmentation_id': segmentation_id,
+                                'min_id': constants.MIN_VLAN_TAG,
+                                'max_id': constants.MAX_VLAN_TAG})
+                else:
+                    # Verify segment is not already allocated
+                    bindings = nsxv_db.get_network_bindings_by_vlanid(
+                        context.session, segmentation_id)
+                    if bindings:
+                        phy_uuid = (physical_network if physical_network_set
+                                    else self.dvs_id)
+                        for binding in bindings:
+                            if binding['phy_uuid'] == phy_uuid:
+                                raise n_exc.VlanIdInUse(
+                                    vlan_id=segmentation_id,
+                                    physical_network=phy_uuid)
+
+            elif network_type == c_utils.NsxVNetworkTypes.VXLAN:
+                # Currently unable to set the segmentation id
+                if segmentation_id_set:
+                    err_msg = _("Segmentation ID cannot be set with VXLAN")
+            elif network_type == c_utils.NsxVNetworkTypes.PORTGROUP:
+                if segmentation_id_set:
+                    err_msg = _("Segmentation ID cannot be set with portgroup")
+                if (not attr.is_attr_set(external) or
+                    attr.is_attr_set(external) and not external):
+                    err_msg = _("portgroup only supported on external "
+                                "networks")
+                physical_net_set = attr.is_attr_set(physical_network)
+                if (physical_net_set
+                    and not self.nsx_v.vcns.validate_network(
+                        physical_network)):
+                    err_msg = _("Physical network doesn't exist")
+            else:
+                err_msg = (_("%(net_type_param)s %(net_type_value)s not "
+                             "supported") %
+                           {'net_type_param': pnet.NETWORK_TYPE,
+                            'net_type_value': network_type})
+            if err_msg:
+                raise n_exc.InvalidInput(error_message=err_msg)
+            # TODO(salvatore-orlando): Validate tranport zone uuid
+            # which should be specified in physical_network
+
+    def _extend_network_dict_provider(self, context, network,
+                                      multiprovider=None, bindings=None):
+        if not bindings:
+            bindings = nsxv_db.get_network_bindings(context.session,
+                                                    network['id'])
+        if not multiprovider:
+            multiprovider = nsx_db.is_multiprovider_network(context.session,
+                                                            network['id'])
+        # With NSX plugin 'normal' overlay networks will have no binding
+        # TODO(salvatore-orlando) make sure users can specify a distinct
+        # phy_uuid as 'provider network' for STT net type
+        if bindings:
+            if not multiprovider:
+                # network came in through provider networks api
+                network[pnet.NETWORK_TYPE] = bindings[0].binding_type
+                network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid
+                network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id
+            else:
+                # network come in though multiprovider networks api
+                network[mpnet.SEGMENTS] = [
+                    {pnet.NETWORK_TYPE: binding.binding_type,
+                     pnet.PHYSICAL_NETWORK: binding.phy_uuid,
+                     pnet.SEGMENTATION_ID: binding.vlan_id}
+                    for binding in bindings]
+
+    def _get_name(self, id, name):
+        if name is not None:
+            return '%s (%s)' % (name, id)
+        return id
+
+    def _get_subnet_md_providers(self, context, subnet):
+        net_id = subnet.get('network_id')
+        if net_id is None:
+            net_id = self.get_subnet(context, subnet['id']).get('network_id')
+        md_provider_data = nsxv_db.get_edge_vnic_bindings_by_int_lswitch(
+            context.session, net_id)
+
+        md_providers = [mdp['edge_id'] for mdp in md_provider_data]
+        return md_providers
+
+    def get_subnet(self, context, id, fields=None):
+        subnet = super(NsxVPluginV2, self).get_subnet(context, id, fields)
+        if context.is_admin:
+            subnet[subnet_md.METADATA_PROVIDERS] = (
+                self._get_subnet_md_providers(context, subnet))
+        return subnet
+
+    def get_subnets(self, context, filters=None, fields=None, sorts=None,
+                    limit=None, marker=None, page_reverse=False):
+        subnets = super(NsxVPluginV2, self).get_subnets(context, filters,
+                                                        fields, sorts, limit,
+                                                        marker, page_reverse)
+
+        if not context.is_admin:
+            return subnets
+
+        new_subnets = []
+        if (not fields
+            or subnet_md.METADATA_PROVIDERS in fields
+            or (filters and filters.get(subnet_md.METADATA_PROVIDERS))):
+
+            # We only deal metadata provider field when:
+            # - All fields are retrieved
+            # - metadata_provider is explicitly retrieved
+            # - metadata_provider is used in a filter
+            for subnet in subnets:
+                md_provider = self._get_subnet_md_providers(context, subnet)
+                md_filter = (None if filters is None
+                             else filters.get('metadata_providers'))
+
+                if md_filter is None or len(set(md_provider) & set(md_filter)):
+                    # Include metadata_providers only if requested in results
+                    if not fields or subnet_md.METADATA_PROVIDERS in fields:
+                        subnet[subnet_md.METADATA_PROVIDERS] = md_provider
+
+                    new_subnets.append(subnet)
+        else:
+            # No need to handle metadata providers field
+            return subnets
+
+        return new_subnets
+
+    def _convert_to_transport_zones_dict(self, network):
+        """Converts the provider request body to multiprovider.
+        Returns: True if request is multiprovider False if provider
+        and None if neither.
+        """
+        if any(attr.is_attr_set(network.get(f))
+               for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
+                         pnet.SEGMENTATION_ID)):
+            if attr.is_attr_set(network.get(mpnet.SEGMENTS)):
+                raise mpnet.SegmentsSetInConjunctionWithProviders()
+            # convert to transport zone list
+            network[mpnet.SEGMENTS] = [
+                {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE],
+                 pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK],
+                 pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}]
+            del network[pnet.NETWORK_TYPE]
+            del network[pnet.PHYSICAL_NETWORK]
+            del network[pnet.SEGMENTATION_ID]
+            return False
+        if attr.is_attr_set(network.get(mpnet.SEGMENTS)):
+            return True
+
+    def _delete_backend_network(self, moref):
+        """Deletes the backend NSX network.
+
+        This can either be a VXLAN or a VLAN network. The type is determined
+        by the prefix of the moref.
+        """
+        if moref.startswith(PORTGROUP_PREFIX):
+            self.nsx_v.delete_port_group(self.dvs_id, moref)
+        else:
+            self.nsx_v.delete_virtual_wire(moref)
+
+    def _get_vlan_network_name(self, net_data):
+        if net_data['name'] == '':
+            return net_data['id']
+        else:
+            # Maximum name length is 80 characters. 'id' length is 36
+            # maximum prefix for name is 43
+            return '%s-%s' % (net_data['name'][:43], net_data['id'])
+
+    def _get_default_security_group(self, context, tenant_id):
+        return self._ensure_default_security_group(context, tenant_id)
+
+    def _add_security_groups_port_mapping(self, session, vnic_id,
+                                          added_sgids):
+        if vnic_id is None or added_sgids is None:
+            return
+        for add_sg in added_sgids:
+            nsx_sg_id = nsx_db.get_nsx_security_group_id(session, add_sg)
+            if nsx_sg_id is None:
+                LOG.warning(_LW("NSX security group not found for %s"), add_sg)
+            else:
+                self.nsx_sg_utils.add_port_to_security_group(nsx_sg_id,
+                                                             vnic_id)
+
+    def _delete_security_groups_port_mapping(self, session, vnic_id,
+                                             deleted_sgids):
+        if vnic_id is None or deleted_sgids is None:
+            return
+        # Remove vnic from delete security groups binding
+        for del_sg in deleted_sgids:
+            nsx_sg_id = nsx_db.get_nsx_security_group_id(session, del_sg)
+            if nsx_sg_id is None:
+                LOG.warning(_LW("NSX security group not found for %s"), del_sg)
+            else:
+                try:
+                    h, c = self.nsx_v.vcns.remove_member_from_security_group(
+                        nsx_sg_id, vnic_id)
+                except Exception:
+                    LOG.debug("NSX security group %(nsx_sg_id)s member "
+                              "delete failed %(vnic_id)s",
+                              {'nsx_sg_id': nsx_sg_id,
+                               'vnic_id': vnic_id})
+
+    def _update_security_groups_port_mapping(self, session, port_id,
+                                             vnic_id, current_sgids,
+                                             new_sgids):
+
+        new_sgids = new_sgids or []
+        current_sgids = current_sgids or []
+        # If no vnic binding is found, nothing can be done, so return
+        if vnic_id is None:
+            return
+        deleted_sgids = set()
+        added_sgids = set()
+        # Find all delete security group from port binding
+        for curr_sg in current_sgids:
+            if curr_sg not in new_sgids:
+                deleted_sgids.add(curr_sg)
+        # Find all added security group from port binding
+        for new_sg in new_sgids:
+            if new_sg not in current_sgids:
+                added_sgids.add(new_sg)
+
+        self._delete_security_groups_port_mapping(session, vnic_id,
+                                                  deleted_sgids)
+        self._add_security_groups_port_mapping(session, vnic_id,
+                                               added_sgids)
+
+    def _get_port_vnic_id(self, port_index, device_id):
+        # The vnic-id format which is expected by NSXv
+        return '%s.%03d' % (device_id, port_index)
+
+    def create_network(self, context, network):
+        net_data = network['network']
+        tenant_id = self._get_tenant_id_for_create(context, net_data)
+        self._ensure_default_security_group(context, tenant_id)
+        # Process the provider network extension
+        provider_type = self._convert_to_transport_zones_dict(net_data)
+        self._validate_provider_create(context, net_data)
+        net_data['id'] = str(uuid.uuid4())
+
+        external = net_data.get(ext_net_extn.EXTERNAL)
+        backend_network = (not attr.is_attr_set(external) or
+                           attr.is_attr_set(external) and not external)
+        if backend_network:
+            network_type = None
+            if provider_type is not None:
+                segment = net_data[mpnet.SEGMENTS][0]
+                network_type = segment.get(pnet.NETWORK_TYPE)
+
+            if (provider_type is None or
+                network_type == c_utils.NsxVNetworkTypes.VXLAN):
+                virtual_wire = {"name": net_data['id'],
+                                "tenantId": "virtual wire tenant"}
+                config_spec = {"virtualWireCreateSpec": virtual_wire}
+                h, c = self.nsx_v.vcns.create_virtual_wire(self.vdn_scope_id,
+                                                           config_spec)
+                net_moref = c
+            else:
+                network_name = self._get_vlan_network_name(net_data)
+                vlan_tag = 0
+                segment = net_data[mpnet.SEGMENTS][0]
+                if (segment.get(pnet.NETWORK_TYPE) ==
+                    c_utils.NsxVNetworkTypes.VLAN):
+                    vlan_tag = segment.get(pnet.SEGMENTATION_ID, 0)
+                physical_network = segment.get(pnet.PHYSICAL_NETWORK)
+                dvs_id = (physical_network if attr.is_attr_set(
+                    physical_network) else self.dvs_id)
+                portgroup = {'vlanId': vlan_tag,
+                             'networkBindingType': 'Static',
+                             'networkName': network_name,
+                             'networkType': 'Isolation'}
+                config_spec = {'networkSpec': portgroup}
+                try:
+                    h, c = self.nsx_v.vcns.create_port_group(dvs_id,
+                                                             config_spec)
+                    net_moref = c
+                except Exception as e:
+                    LOG.debug("Failed to create port group: %s",
+                              e.response)
+                    err_msg = (_("Physical network %s is not an existing DVS")
+                               % dvs_id)
+                    raise n_exc.InvalidInput(error_message=err_msg)
+        try:
+            with context.session.begin(subtransactions=True):
+                new_net = super(NsxVPluginV2, self).create_network(context,
+                                                                   network)
+                # Process port security extension
+                self._process_network_port_security_create(
+                    context, net_data, new_net)
+                # DB Operations for setting the network as external
+                self._process_l3_create(context, new_net, net_data)
+                if (net_data.get(mpnet.SEGMENTS) and
+                    isinstance(provider_type, bool)):
+                    net_bindings = []
+                    for tz in net_data[mpnet.SEGMENTS]:
+                        network_type = tz.get(pnet.NETWORK_TYPE)
+                        segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0)
+                        segmentation_id_set = attr.is_attr_set(segmentation_id)
+                        if not segmentation_id_set:
+                            segmentation_id = 0
+                        physical_network = tz.get(pnet.PHYSICAL_NETWORK, '')
+                        physical_net_set = attr.is_attr_set(physical_network)
+                        if not physical_net_set:
+                            physical_network = self.dvs_id
+                        net_bindings.append(nsxv_db.add_network_binding(
+                            context.session, new_net['id'],
+                            network_type,
+                            physical_network,
+                            segmentation_id))
+                    if provider_type:
+                        nsx_db.set_multiprovider_network(context.session,
+                                                         new_net['id'])
+                    self._extend_network_dict_provider(context, new_net,
+                                                       provider_type,
+                                                       net_bindings)
+                if backend_network:
+                    # Save moref in the DB for future access
+                    nsx_db.add_neutron_nsx_network_mapping(
+                        context.session, new_net['id'],
+                        net_moref)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                # Delete the backend network
+                if backend_network:
+                    self._delete_backend_network(net_moref)
+                LOG.exception(_LE('Failed to create network'))
+
+        return new_net
+
+    def delete_network(self, context, id):
+        mappings = nsx_db.get_nsx_switch_ids(
+            context.session, id)
+        bindings = nsxv_db.get_network_bindings(context.session,
+                                                id)
+
+        with context.session.begin(subtransactions=True):
+            super(NsxVPluginV2, self).delete_network(context, id)
+
+        self.edge_manager.delete_dhcp_edge_service(context, id)
+
+        # Do not delete a predefined port group that was attached to
+        # an external network
+        if (bindings and
+            bindings[0].binding_type == c_utils.NsxVNetworkTypes.PORTGROUP):
+            return
+
+        # Delete the backend network if necessary. This is done after
+        # the base operation as that may throw an exception in the case
+        # that there are ports defined on the network.
+        if mappings:
+            self._delete_backend_network(mappings[0])
+
+    def get_network(self, context, id, fields=None):
+        with context.session.begin(subtransactions=True):
+            # goto to the plugin DB and fetch the network
+            network = self._get_network(context, id)
+            # Don't do field selection here otherwise we won't be able
+            # to add provider networks fields
+            net_result = self._make_network_dict(network)
+            self._extend_network_dict_provider(context, net_result)
+        return self._fields(net_result, fields)
+
+    def get_networks(self, context, filters=None, fields=None,
+                     sorts=None, limit=None, marker=None,
+                     page_reverse=False):
+        filters = filters or {}
+        with context.session.begin(subtransactions=True):
+            networks = (
+                super(NsxVPluginV2, self).get_networks(
+                    context, filters, fields, sorts,
+                    limit, marker, page_reverse))
+            for net in networks:
+                self._extend_network_dict_provider(context, net)
+        return [self._fields(network, fields) for network in networks]
+
+    def update_network(self, context, id, network):
+        pnet._raise_if_updates_provider_attributes(network['network'])
+        if network["network"].get("admin_state_up") is False:
+            raise NotImplementedError(_("admin_state_up=False networks "
+                                        "are not supported."))
+        with context.session.begin(subtransactions=True):
+            net = super(NsxVPluginV2, self).update_network(context, id,
+                                                           network)
+            self._process_l3_update(context, net, network['network'])
+            self._extend_network_dict_provider(context, net)
+        return net
+
+    def create_port(self, context, port):
+        # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
+        # then we pass the port to the policy engine. The reason why we don't
+        # pass the value to the policy engine when the port is
+        # ATTR_NOT_SPECIFIED is for the case where a port is created on a
+        # shared network that is not owned by the tenant.
+        port_data = port['port']
+        with context.session.begin(subtransactions=True):
+            # First we allocate port in neutron database
+            neutron_db = super(NsxVPluginV2, self).create_port(context, port)
+            # Update fields obtained from neutron db (eg: MAC address)
+            port["port"].update(neutron_db)
+            # port security extension checks
+            (port_security, has_ip) = self._determine_port_security_and_has_ip(
+                context, port_data)
+            port_data[psec.PORTSECURITY] = port_security
+            self._process_port_port_security_create(
+                context, port_data, neutron_db)
+            # allowed address pair checks
+            if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)):
+                if not port_security:
+                    raise addr_pair.AddressPairAndPortSecurityRequired()
+                else:
+                    self._process_create_allowed_address_pairs(
+                        context, neutron_db,
+                        port_data[addr_pair.ADDRESS_PAIRS])
+            else:
+                # remove ATTR_NOT_SPECIFIED
+                port_data[addr_pair.ADDRESS_PAIRS] = None
+
+            # security group extension checks
+            if port_security and has_ip:
+                self._ensure_default_security_group_on_port(context, port)
+            elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)):
+                raise psec.PortSecurityAndIPRequiredForSecurityGroups()
+            port_data[ext_sg.SECURITYGROUPS] = (
+                self._get_security_groups_on_port(context, port))
+            self._process_port_create_security_group(
+                context, port_data, port_data[ext_sg.SECURITYGROUPS])
+            self._process_portbindings_create_and_update(context,
+                                                         port['port'],
+                                                         port_data)
+
+        try:
+            # Configure NSX - this should not be done in the DB transaction
+            # Configure the DHCP Edge service
+            self._create_dhcp_static_binding(context, neutron_db)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE('Failed to create network'))
+                # Revert what we have created and raise the exception
+                self.delete_port(context, port_data['id'])
+        return port_data
+
+    def update_port(self, context, id, port):
+        port_data = port['port']
+        current_port = super(NsxVPluginV2, self).get_port(context, id)
+        device_id = current_port['device_id']
+        comp_owner_update = ('device_owner' in port_data and
+                             port_data['device_owner'].startswith('compute:'))
+
+        # Process update for vnic-index
+        vnic_idx = port_data.get(ext_vnic_idx.VNIC_INDEX)
+        # Only set the vnic index for a compute VM
+        if attr.is_attr_set(vnic_idx) and self._is_compute_port(current_port):
+            # Update database only if vnic index was changed
+            if current_port.get(ext_vnic_idx.VNIC_INDEX) != vnic_idx:
+                self._set_port_vnic_index_mapping(
+                    context, id, device_id, vnic_idx)
+            vnic_id = self._get_port_vnic_id(vnic_idx, device_id)
+            self._add_security_groups_port_mapping(
+                context.session, vnic_id, current_port.get('security_groups'))
+
+        delete_security_groups = self._check_update_deletes_security_groups(
+            port)
+        has_security_groups = self._check_update_has_security_groups(port)
+        delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
+            port)
+        has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
+
+        with context.session.begin(subtransactions=True):
+            ret_port = super(NsxVPluginV2, self).update_port(
+                context, id, port)
+            # copy values over - except fixed_ips as
+            # they've already been processed
+            port['port'].pop('fixed_ips', None)
+            ret_port.update(port['port'])
+            # populate port_security setting
+            if psec.PORTSECURITY not in port['port']:
+                ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
+                    context, id)
+            has_ip = self._ip_on_port(ret_port)
+            # validate port security and allowed address pairs
+            if not ret_port[psec.PORTSECURITY]:
+                #  has address pairs in request
+                if has_addr_pairs:
+                    raise addr_pair.AddressPairAndPortSecurityRequired()
+                elif not delete_addr_pairs:
+                    # check if address pairs are in db
+                    ret_port[addr_pair.ADDRESS_PAIRS] = (
+                        self.get_allowed_address_pairs(context, id))
+                    if ret_port[addr_pair.ADDRESS_PAIRS]:
+                        raise addr_pair.AddressPairAndPortSecurityRequired()
+
+            if (delete_addr_pairs or has_addr_pairs):
+                # delete address pairs and read them in
+                self._delete_allowed_address_pairs(context, id)
+                self._process_create_allowed_address_pairs(
+                    context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS])
+            # checks if security groups were updated adding/modifying
+            # security groups, port security is set and port has ip
+            if not (has_ip and ret_port[psec.PORTSECURITY]):
+                if has_security_groups:
+                    raise psec.PortSecurityAndIPRequiredForSecurityGroups()
+                # Update did not have security groups passed in. Check
+                # that port does not have any security groups already on it.
+                filters = {'port_id': [id]}
+                security_groups = (
+                    super(NsxVPluginV2,
+                          self)._get_port_security_group_bindings(context,
+                                                                  filters)
+                )
+                if security_groups and not delete_security_groups:
+                    raise psec.PortSecurityPortHasSecurityGroup()
+
+            if delete_security_groups or has_security_groups:
+                # delete the port binding and read it with the new rules.
+                self._delete_port_security_group_bindings(context, id)
+                new_sgids = self._get_security_groups_on_port(context, port)
+                self._process_port_create_security_group(context, ret_port,
+                                                         new_sgids)
+
+            if psec.PORTSECURITY in port['port']:
+                self._process_port_port_security_update(
+                    context, port['port'], ret_port)
+
+            LOG.debug("Updating port: %s", port)
+            self._process_portbindings_create_and_update(context,
+                                                         port['port'],
+                                                         ret_port)
+
+        if comp_owner_update:
+            # Create dhcp bindings, the port is now owned by an instance
+            self._create_dhcp_static_binding(context, ret_port)
+
+        # Updating NSXv Security Group membership for vNic
+        vnic_idx = current_port.get(ext_vnic_idx.VNIC_INDEX)
+        if attr.is_attr_set(vnic_idx):
+            vnic_id = self._get_port_vnic_id(vnic_idx, device_id)
+            curr_sgids = current_port.get(ext_sg.SECURITYGROUPS)
+            if ret_port['device_id'] != device_id:
+                # Update change device_id - remove port-vnic assosiation and
+                # delete security-groups memberships for the vnic
+                self._delete_security_groups_port_mapping(
+                    context.session, vnic_id, curr_sgids)
+                self._delete_port_vnic_index_mapping(context, id)
+            elif delete_security_groups or has_security_groups:
+                # Update security-groups,
+                # calculate differences and update vnic membership accordingly.
+                self._update_security_groups_port_mapping(
+                    context.session, id, vnic_id, curr_sgids, new_sgids)
+
+        return ret_port
+
+    def delete_port(self, context, id, l3_port_check=True,
+                    nw_gw_port_check=True):
+        """Deletes a port on a specified Virtual Network.
+
+        If the port contains a remote interface attachment, the remote
+        interface is first un-plugged and then the port is deleted.
+
+        :returns: None
+        :raises: exception.PortInUse
+        :raises: exception.PortNotFound
+        :raises: exception.NetworkNotFound
+        """
+        # if needed, check to see if this is a port owned by
+        # a l3 router.  If so, we should prevent deletion here
+        if l3_port_check:
+            self.prevent_l3_port_deletion(context, id)
+        neutron_db_port = self.get_port(context, id)
+
+        # If this port is attached to a device, remove the corresponding vnic
+        # from all NSXv Security-Groups
+        port_index = neutron_db_port.get(ext_vnic_idx.VNIC_INDEX)
+        if attr.is_attr_set(port_index):
+            vnic_id = self._get_port_vnic_id(port_index,
+                                             neutron_db_port['device_id'])
+            sgids = neutron_db_port.get(ext_sg.SECURITYGROUPS)
+            self._delete_security_groups_port_mapping(
+                context.session, vnic_id, sgids)
+
+        self.disassociate_floatingips(context, id)
+        with context.session.begin(subtransactions=True):
+            super(NsxVPluginV2, self).delete_port(context, id)
+
+        self._delete_dhcp_static_binding(context, neutron_db_port)
+
+    def delete_subnet(self, context, id):
+        subnet = self._get_subnet(context, id)
+        filters = {'fixed_ips': {'subnet_id': [id]}}
+        ports = self.get_ports(context, filters=filters)
+
+        with context.session.begin(subtransactions=True):
+            super(NsxVPluginV2, self).delete_subnet(context, id)
+
+        if subnet['enable_dhcp'] and len(ports) == 1:
+            port = ports.pop()
+            self._delete_port(context, port['id'])
+            network_id = subnet['network_id']
+            # Delete the DHCP edge service
+            filters = {'network_id': [network_id]}
+            remaining_subnets = self.get_subnets(context, filters=filters)
+            if len(remaining_subnets) == 0:
+                LOG.debug("Delete the DHCP Edge for network %s", network_id)
+                self.edge_manager.delete_dhcp_edge_service(context,
+                                                           network_id)
+            else:
+                # Update address group and delete the DHCP port only
+                address_groups = self._create_network_dhcp_address_group(
+                    context, network_id)
+                self.edge_manager.update_dhcp_edge_service(
+                    context, network_id, address_groups=address_groups)
+
+    def create_subnet(self, context, subnet):
+        """Create subnet on nsx_v provider network.
+
+        If the subnet is created with DHCP enabled, and the network which
+        the subnet is attached is not bound to an DHCP Edge, nsx_v will
+        create the Edge and make sure the network is bound to the Edge
+        """
+        if subnet['subnet']['enable_dhcp']:
+            filters = {'id': [subnet['subnet']['network_id']],
+                       'router:external': [True]}
+            nets = self.get_networks(context, filters=filters)
+            if len(nets) > 0:
+                err_msg = _("Can not enable DHCP on external network")
+                raise n_exc.InvalidInput(error_message=err_msg)
+            if netaddr.IPNetwork(subnet['subnet']['cidr']) == 6:
+                err_msg = _("No support for DHCP for IPv6")
+                raise n_exc.InvalidInput(error_message=err_msg)
+
+        with context.session.begin(subtransactions=True):
+            s = super(NsxVPluginV2, self).create_subnet(context, subnet)
+
+        if s['enable_dhcp']:
+            try:
+                self._update_dhcp_service_with_subnet(context, s)
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    self.delete_subnet(context, s['id'])
+        return s
+
+    def _update_dhcp_service_with_subnet(self, context, subnet):
+        network_id = subnet['network_id']
+        # Create DHCP port
+        port_dict = {'name': '',
+                     'admin_state_up': True,
+                     'network_id': network_id,
+                     'tenant_id': subnet['tenant_id'],
+                     'fixed_ips': [{'subnet_id': subnet['id']}],
+                     'device_owner': constants.DEVICE_OWNER_DHCP,
+                     'device_id': '',
+                     'mac_address': attr.ATTR_NOT_SPECIFIED
+                     }
+        self.create_port(context, {'port': port_dict})
+        # The DHCP for network with different physical network can not be used
+        # The flat network should be located in different DHCP
+        conflicting_networks = []
+        network_ids = self.get_networks(neutron_context.get_admin_context(),
+                                        fields=['id'])
+        phy_net = nsxv_db.get_network_bindings(context.session, network_id)
+        if phy_net:
+            binding_type = phy_net[0]['binding_type']
+            phy_uuid = phy_net[0]['phy_uuid']
+            for net_id in network_ids:
+                p_net = nsxv_db.get_network_bindings(context.session,
+                                                    net_id['id'])
+                if (p_net and binding_type == p_net[0]['binding_type']
+                    and binding_type == c_utils.NsxVNetworkTypes.FLAT):
+                    conflicting_networks.append(net_id['id'])
+                elif (p_net and phy_uuid != p_net[0]['phy_uuid']):
+                    conflicting_networks.append(net_id['id'])
+        # Query all networks with overlap subnet
+        if cfg.CONF.allow_overlapping_ips:
+            # Query all subnet first to get the conflict networks
+            fields = ['id', 'network_id', 'cidr']
+            subnets = self.get_subnets(neutron_context.get_admin_context(),
+                                       fields=fields)
+            subnet_set = netaddr.IPSet([subnet['cidr']])
+            for s in subnets:
+                s_set = netaddr.IPSet([s['cidr']])
+                if (s['id'] != subnet['id'] and subnet_set & s_set and
+                    s['network_id'] not in conflicting_networks):
+                    conflicting_networks.append(s['network_id'])
+
+        try:
+            resource_id = self.edge_manager.create_dhcp_edge_service(
+                context, network_id, conflicting_networks)
+            # Create all dhcp ports within the network
+            address_groups = self._create_network_dhcp_address_group(
+                context, network_id)
+            self.edge_manager.update_dhcp_edge_service(
+                context, network_id, address_groups=address_groups)
+
+            if resource_id and self.metadata_proxy_handler:
+                self.metadata_proxy_handler.configure_router_edge(resource_id)
+                fw_rules = {
+                    'firewall_rule_list':
+                    self.metadata_proxy_handler.get_router_fw_rules()}
+                edge_utils.update_firewall(
+                    self.nsx_v, context, resource_id, fw_rules,
+                    allow_external=False)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Failed to update DHCP for subnet %s"),
+                              subnet['id'])
+
+    def _create_network_dhcp_address_group(self, context, network_id):
+        """Create dhcp address group for subnets attached to the network."""
+
+        filters = {'network_id': [network_id],
+                   'device_owner': [constants.DEVICE_OWNER_DHCP]}
+        ports = self.get_ports(context, filters=filters)
+
+        filters = {'network_id': [network_id], 'enable_dhcp': [True]}
+        subnets = self.get_subnets(context, filters=filters)
+
+        address_groups = []
+        for subnet in subnets:
+            address_group = {}
+            net = netaddr.IPNetwork(subnet['cidr'])
+            address_group['subnetPrefixLength'] = str(net.prefixlen)
+            for port in ports:
+                fixed_ips = port['fixed_ips']
+                for fip in fixed_ips:
+                    s_id = fip['subnet_id']
+                    ip_addr = fip['ip_address']
+                    if s_id == subnet['id'] and self._is_valid_ip(ip_addr):
+                        address_group['primaryAddress'] = ip_addr
+                        break
+            address_groups.append(address_group)
+        LOG.debug("Update the DHCP address group to %s", address_groups)
+        return address_groups
+
+    def _create_static_binding(self, context, port):
+        """Create the DHCP Edge static binding configuration
+
+        <staticBinding>
+            <macAddress></macAddress>
+            <ipAddress></ipAddress>
+            <hostname></hostname> <!--disallow duplicate-->
+            <defaultGateway></defaultGateway> <!--optional.-->
+            <primaryNameServer></primaryNameServer> <!--optional-->
+            <secondaryNameServer></secondaryNameServer> <!--optional-->
+        </staticBinding>
+        """
+        static_bindings = []
+        static_config = {}
+        static_config['macAddress'] = port['mac_address']
+        static_config['hostname'] = port['id']
+
+        for fixed_ip in port['fixed_ips']:
+            static_config['ipAddress'] = fixed_ip['ip_address']
+            # Query the subnet to get gateway and DNS
+            try:
+                subnet_id = fixed_ip['subnet_id']
+                subnet = self._get_subnet(context, subnet_id)
+            except n_exc.SubnetNotFound:
+                LOG.debug("No related subnet for port %s", port['id'])
+                continue
+            # Set gateway for static binding
+            static_config['defaultGateway'] = subnet['gateway_ip']
+            # set primary and secondary dns
+            name_servers = [dns['address']
+                            for dns in subnet['dns_nameservers']]
+            if len(name_servers) == 1:
+                static_config['primaryNameServer'] = name_servers[0]
+            elif len(name_servers) >= 2:
+                static_config['primaryNameServer'] = name_servers[0]
+                static_config['secondaryNameServer'] = name_servers[1]
+
+            static_bindings.append(static_config)
+        return static_bindings
+
+    def _extract_external_gw(self, context, router, is_extract=True):
+        r = router['router']
+        gw_info = None
+        # First extract the gateway info in case of updating
+        # gateway before edge is deployed.
+        if 'external_gateway_info' in r:
+            gw_info = r['external_gateway_info']
+            if is_extract:
+                del r['external_gateway_info']
+            network_id = (gw_info.get('network_id') if gw_info
+                          else None)
+            if network_id:
+                ext_net = self._get_network(context, network_id)
+                if not ext_net.external:
+                    msg = (_("Network '%s' is not a valid external network") %
+                           network_id)
+                    raise n_exc.BadRequest(resource='router', msg=msg)
+        return gw_info
+
+    def create_router(self, context, router, allow_metadata=True):
+        # First extract the gateway info in case of updating
+        # gateway before edge is deployed.
+        # TODO(berlin): admin_state_up and routes update
+        if router['router'].get('admin_state_up') is False:
+            LOG.warning(_LW("admin_state_up=False router is not supported."))
+        gw_info = self._extract_external_gw(context, router)
+        lrouter = super(NsxVPluginV2, self).create_router(context, router)
+        r = router['router']
+        distributed = r.get('distributed')
+        r['distributed'] = attr.is_attr_set(distributed) and distributed
+        self.edge_manager.create_lrouter(context, lrouter,
+                                         dist=r['distributed'])
+        with context.session.begin(subtransactions=True):
+            router_db = self._get_router(context, lrouter['id'])
+            self._process_nsx_router_create(context, router_db, r)
+        if gw_info is not None:
+            self._update_router_gw_info(context, lrouter['id'], gw_info)
+        if allow_metadata and self.metadata_proxy_handler:
+            self.metadata_proxy_handler.configure_router_edge(lrouter['id'])
+        return self.get_router(context, lrouter['id'])
+
+    def update_router(self, context, router_id, router):
+        # TODO(berlin): admin_state_up update
+        if router['router'].get('admin_state_up') is False:
+            LOG.warning(_LW("admin_state_up=False router is not supported."))
+        gw_info = self._extract_external_gw(context, router, is_extract=False)
+        router_updated = super(NsxVPluginV2, self).update_router(
+            context, router_id, router)
+        # here is used to handle routes which tenant updates.
+        if gw_info is None:
+            router_db = self._get_router(context, router_id)
+            nexthop = self._get_external_attachment_info(context, router_db)[2]
+            self._update_routes(context, router_id, nexthop)
+        return router_updated
+
+    def _check_router_in_use(self, context, router_id):
+        with context.session.begin(subtransactions=True):
+            # Ensure that the router is not used
+            router_filter = {'router_id': [router_id]}
+            fips = self.get_floatingips_count(context.elevated(),
+                                              filters=router_filter)
+            if fips:
+                raise l3.RouterInUse(router_id=router_id)
+
+            device_filter = {'device_id': [router_id],
+                             'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}
+            ports = self.get_ports_count(context.elevated(),
+                                         filters=device_filter)
+            if ports:
+                raise l3.RouterInUse(router_id=router_id)
+
+    def delete_router(self, context, id):
+        self._check_router_in_use(context, id)
+        distributed = self.get_router(context, id).get('distributed', False)
+        self.edge_manager.delete_lrouter(context, id, dist=distributed)
+        super(NsxVPluginV2, self).delete_router(context, id)
+
+    def _get_external_attachment_info(self, context, router):
+        gw_port = router.gw_port
+        ipaddress = None
+        netmask = None
+        nexthop = None
+
+        if gw_port:
+            # gw_port may have multiple IPs, only configure the first one
+            if gw_port.get('fixed_ips'):
+                ipaddress = gw_port['fixed_ips'][0]['ip_address']
+
+            network_id = gw_port.get('network_id')
+            if network_id:
+                ext_net = self._get_network(context, network_id)
+                if not ext_net.external:
+                    msg = (_("Network '%s' is not a valid external "
+                             "network") % network_id)
+                    raise n_exc.BadRequest(resource='router', msg=msg)
+                if ext_net.subnets:
+                    ext_subnet = ext_net.subnets[0]
+                    netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask)
+                    nexthop = ext_subnet.gateway_ip
+
+        return (ipaddress, netmask, nexthop)
+
+    def _add_network_info_for_routes(self, context, routes, ports):
+        for route in routes:
+            for port in ports:
+                for ip in port['fixed_ips']:
+                    subnet = self.get_subnet(context, ip['subnet_id'])
+                    if netaddr.all_matching_cidrs(
+                        route['nexthop'], [subnet['cidr']]):
+                        net = self.get_network(context, subnet['network_id'])
+                        route['network_id'] = net['id']
+                        if net.get(ext_net_extn.EXTERNAL):
+                            route['external'] = True
+
+    def _update_routes(self, context, router_id, nexthop):
+        routes = self._get_extra_routes_by_router_id(context, router_id)
+        filters = {'device_id': [router_id]}
+        ports = self.get_ports(context, filters)
+        self._add_network_info_for_routes(context, routes, ports)
+        edge_utils.update_routes(self.nsx_v, context, router_id,
+                                 routes, nexthop)
+
+    def _update_routes_on_plr(self, context, router_id, plr_id, newnexthop):
+        subnets = self._find_router_subnets_cidrs(
+            context.elevated(), router_id)
+        routes = []
+        for subnet in subnets:
+            routes.append({
+                'destination': subnet,
+                'nexthop': (vcns_const.INTEGRATION_LR_IPADDRESS.
+                            split('/')[0])
+            })
+        edge_utils.update_routes_on_plr(self.nsx_v, context,
+                                        plr_id, router_id, routes,
+                                        nexthop=newnexthop)
+
+    def _update_router_gw_info(self, context, router_id, info):
+        router = self._get_router(context, router_id)
+        org_ext_net_id = router.gw_port_id and router.gw_port.network_id
+        org_enable_snat = router.enable_snat
+        orgaddr, orgmask, orgnexthop = self._get_external_attachment_info(
+            context, router)
+
+        super(NsxVPluginV2, self)._update_router_gw_info(
+            context, router_id, info, router=router)
+
+        new_ext_net_id = router.gw_port_id and router.gw_port.network_id
+        new_enable_snat = router.enable_snat
+        newaddr, newmask, newnexthop = self._get_external_attachment_info(
+            context, router)
+
+        router_dict = self._make_router_dict(router)
+        if not router_dict.get('distributed', False):
+            if new_ext_net_id != org_ext_net_id and orgnexthop:
+                # network changed, so need to remove default gateway before
+                # vnic can be configured
+                LOG.debug("Delete default gateway %s", orgnexthop)
+                edge_utils.clear_gateway(self.nsx_v, context, router_id)
+                # Delete SNAT rules
+                if org_enable_snat:
+                    edge_utils.clear_nat_rules(self.nsx_v, context, router_id)
+
+            # Update external vnic if addr or mask is changed
+            if orgaddr != newaddr or orgmask != newmask:
+                edge_utils.update_external_interface(
+                    self.nsx_v, context, router_id,
+                    new_ext_net_id, newaddr, newmask)
+
+            # Update SNAT rules if ext net changed and snat enabled
+            # or ext net not changed but snat is changed.
+            if ((new_ext_net_id != org_ext_net_id and
+                 newnexthop and new_enable_snat) or
+                (new_ext_net_id == org_ext_net_id and
+                 new_enable_snat != org_enable_snat)):
+                self._update_nat_rules(context, router)
+
+            # Update static routes in all.
+            self._update_routes(context, router_id, newnexthop)
+        else:
+            plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id)
+            if not new_ext_net_id:
+                if plr_id:
+                    # delete all plr relative conf
+                    self.edge_manager.delete_plr_by_tlr_id(
+                        context, plr_id, router_id)
+            else:
+                # Connecting one plr to the tlr if new_ext_net_id is not None.
+                if not plr_id:
+                    plr_id = self.edge_manager.create_plr_with_tlr_id(
+                        context, router_id, router_dict.get('name'))
+                if new_ext_net_id != org_ext_net_id and orgnexthop:
+                    # network changed, so need to remove default gateway and
+                    # all static routes before vnic can be configured
+                    edge_utils.clear_gateway(self.nsx_v, context, plr_id)
+                    # Delete SNAT rules
+                    if org_enable_snat:
+                        edge_utils.clear_nat_rules(self.nsx_v, context, plr_id)
+
+                # Update external vnic if addr or mask is changed
+                if orgaddr != newaddr or orgmask != newmask:
+                    edge_utils.update_external_interface(
+                        self.nsx_v, context, plr_id,
+                        new_ext_net_id, newaddr, newmask)
+
+                # Update SNAT rules if ext net changed and snat enabled
+                # or ext net not changed but snat is changed.
+                if ((new_ext_net_id != org_ext_net_id and
+                     newnexthop and new_enable_snat) or
+                    (new_ext_net_id == org_ext_net_id and
+                     new_enable_snat != org_enable_snat)):
+                    self._update_nat_rules(context, router, plr_id)
+                    # Open firewall flows on plr
+                    self._update_subnets_and_dnat_firewall(
+                        context, router, router_id=plr_id)
+                    # Update static routes of plr
+                    self._update_routes_on_plr(
+                        context, router_id, plr_id, newnexthop)
+
+    def _get_router_interface_ports_by_network(
+        self, context, router_id, network_id):
+        port_filters = {'device_id': [router_id],
+                        'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF],
+                        'network_id': [network_id]}
+        return self.get_ports(context, filters=port_filters)
+
+    def _get_address_groups(self, context, router_id, network_id):
+        address_groups = []
+        ports = self._get_router_interface_ports_by_network(
+            context, router_id, network_id)
+        for port in ports:
+            address_group = {}
+            gateway_ip = port['fixed_ips'][0]['ip_address']
+            subnet = self.get_subnet(context,
+                                     port['fixed_ips'][0]['subnet_id'])
+            prefixlen = str(netaddr.IPNetwork(subnet['cidr']).prefixlen)
+            address_group['primaryAddress'] = gateway_ip
+            address_group['subnetPrefixLength'] = prefixlen
+            address_groups.append(address_group)
+        return address_groups
+
+    def _get_port_by_device_id(self, context, device_id, device_owner):
+        """Retrieve ports associated with a specific device id.
+
+        Used for retrieving all neutron ports attached to a given router.
+        """
+        port_qry = context.session.query(models_v2.Port)
+        return port_qry.filter_by(
+            device_id=device_id,
+            device_owner=device_owner,).all()
+
+    def _find_router_subnets_cidrs(self, context, router_id):
+        """Retrieve subnets attached to the specified router."""
+        ports = self._get_port_by_device_id(context, router_id,
+                                            l3_db.DEVICE_OWNER_ROUTER_INTF)
+        # No need to check for overlapping CIDRs
+        cidrs = []
+        for port in ports:
+            for ip in port.get('fixed_ips', []):
+                subnet_qry = context.session.query(models_v2.Subnet)
+                subnet = subnet_qry.filter_by(id=ip.subnet_id).one()
+                cidrs.append(subnet.cidr)
+        return cidrs
+
+    def _get_nat_rules(self, context, router):
+        fip_qry = context.session.query(l3_db.FloatingIP)
+        fip_db = fip_qry.filter_by(router_id=router['id']).all()
+
+        snat = []
+
+        dnat = [{'dst': fip.floating_ip_address,
+                 'translated': fip.fixed_ip_address}
+                for fip in fip_db if fip.fixed_port_id]
+
+        gw_port = router.gw_port
+        if gw_port and router.enable_snat:
+            snat_ip = gw_port['fixed_ips'][0]['ip_address']
+            subnets = self._find_router_subnets_cidrs(context.elevated(),
+                                                      router['id'])
+            for subnet in subnets:
+                snat.append({
+                    'src': subnet,
+                    'translated': snat_ip
+                })
+        return (snat, dnat)
+
+    def _update_nat_rules(self, context, router, router_id=None):
+        snat, dnat = self._get_nat_rules(context, router)
+        if not router_id:
+            router_id = router['id']
+        edge_utils.update_nat_rules(
+            self.nsx_v, context, router_id, snat, dnat)
+
+    def add_router_interface(self, context, router_id, interface_info):
+        info = super(NsxVPluginV2, self).add_router_interface(
+            context, router_id, interface_info)
+
+        router_db = self._get_router(context, router_id)
+        router = self._make_router_dict(router_db)
+        distributed = router.get('distributed', False)
+        subnet = self.get_subnet(context, info['subnet_id'])
+        network_id = subnet['network_id']
+
+        address_groups = self._get_address_groups(
+            context, router_id, network_id)
+        if not distributed:
+            edge_utils.update_internal_interface(
+                self.nsx_v, context, router_id, network_id, address_groups)
+        else:
+            try:
+                edge_utils.add_vdr_internal_interface(
+                    self.nsx_v, context, router_id, network_id, address_groups)
+            except n_exc.BadRequest:
+                with excutils.save_and_reraise_exception():
+                    super(NsxVPluginV2, self).remove_router_interface(
+                        context, router_id, interface_info)
+        # Update edge's firewall rules to accept subnets flows.
+        self._update_subnets_and_dnat_firewall(context, router_db)
+
+        if router_db.gw_port and router_db.enable_snat:
+            if not distributed:
+                # Update Nat rules on external edge vnic
+                self._update_nat_rules(context, router_db)
+            else:
+                plr_id = self.edge_manager.get_plr_by_tlr_id(
+                    context, router_id)
+                self._update_nat_rules(context, router_db, plr_id)
+                # Open firewall flows on plr
+                self._update_subnets_and_dnat_firewall(
+                    context, router_db, router_id=plr_id)
+                # Update static routes of plr
+                nexthop = self._get_external_attachment_info(
+                    context, router_db)[2]
+                self._update_routes_on_plr(
+                    context, router_id, plr_id, nexthop)
+        return info
+
+    def remove_router_interface(self, context, router_id, interface_info):
+        info = super(NsxVPluginV2, self).remove_router_interface(
+            context, router_id, interface_info)
+        router_db = self._get_router(context, router_id)
+        router = self._make_router_dict(router_db)
+        distributed = router.get('distributed', False)
+
+        subnet = self.get_subnet(context, info['subnet_id'])
+        network_id = subnet['network_id']
+        if router_db.gw_port and router_db.enable_snat:
+            if not distributed:
+                # First update nat rules
+                self._update_nat_rules(context, router_db)
+            else:
+                plr_id = self.edge_manager.get_plr_by_tlr_id(
+                    context, router_id)
+                self._update_nat_rules(context, router_db, plr_id)
+                # Open firewall flows on plr
+                self._update_subnets_and_dnat_firewall(
+                    context, router_db, router_id=plr_id)
+                # Update static routes of plr
+                nexthop = self._get_external_attachment_info(
+                    context, router_db)[2]
+                nexthop = self._get_external_attachment_info(
+                    context, router_db)[2]
+                self._update_routes_on_plr(
+                    context, router_id, plr_id, nexthop)
+
+        ports = self._get_router_interface_ports_by_network(
+            context, router_id, network_id)
+        self._update_subnets_and_dnat_firewall(context, router_db)
+        # No subnet on the network connects to the edge vnic
+        if not ports:
+            edge_utils.delete_interface(self.nsx_v, context,
+                                        router_id, network_id,
+                                        dist=distributed)
+        else:
+            address_groups = self._get_address_groups(
+                context, router_id, network_id)
+            if not distributed:
+                edge_utils.update_internal_interface(self.nsx_v, context,
+                                                     router_id, network_id,
+                                                     address_groups)
+            else:
+                edge_utils.update_vdr_internal_interface(
+                    self.nsx_v, context, router_id, network_id, address_groups)
+        return info
+
+    def _get_floatingips_by_router(self, context, router_id):
+        fip_qry = context.session.query(l3_db.FloatingIP)
+        fip_db = fip_qry.filter_by(router_id=router_id).all()
+        return [fip.floating_ip_address
+                for fip in fip_db if fip.fixed_port_id]
+
+    def _update_external_interface(self, context, router, router_id=None):
+        ext_net_id = router.gw_port_id and router.gw_port.network_id
+        addr, mask, nexthop = self._get_external_attachment_info(
+            context, router)
+        secondary = self._get_floatingips_by_router(context, router['id'])
+        if not router_id:
+            router_id = router['id']
+        edge_utils.update_external_interface(
+            self.nsx_v, context, router_id, ext_net_id,
+            addr, mask, secondary)
+
+    def _set_floatingip_status(self, context, floatingip_db, status=None):
+        if not status:
+            status = (constants.FLOATINGIP_STATUS_ACTIVE
+                      if floatingip_db.get('router_id')
+                      else constants.FLOATINGIP_STATUS_DOWN)
+        if floatingip_db['status'] != status:
+            floatingip_db['status'] = status
+            self.update_floatingip_status(context, floatingip_db['id'], status)
+
+    def create_floatingip(self, context, floatingip):
+        fip_db = super(NsxVPluginV2, self).create_floatingip(
+            context, floatingip)
+        router_id = fip_db['router_id']
+        if router_id:
+            try:
+                self._update_edge_router(context, router_id)
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    LOG.exception(_LE("Failed to update edge router"))
+                    super(NsxVPluginV2, self).delete_floatingip(context,
+                                                                fip_db['id'])
+        self._set_floatingip_status(context, fip_db)
+        return fip_db
+
+    def update_floatingip(self, context, id, floatingip):
+        old_fip = self._get_floatingip(context, id)
+        old_router_id = old_fip.router_id
+        old_port_id = old_fip.fixed_port_id
+        fip_db = super(NsxVPluginV2, self).update_floatingip(
+            context, id, floatingip)
+        router_id = fip_db.get('router_id')
+        try:
+            # Update old router's nat rules if old_router_id is not None.
+            if old_router_id:
+                self._update_edge_router(context, old_router_id)
+            # Update current router's nat rules if router_id is not None.
+            if router_id:
+                self._update_edge_router(context, router_id)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Failed to update edge router"))
+                super(NsxVPluginV2, self).update_floatingip(
+                    context, id, {'floatingip': {'port_id': old_port_id}})
+        self._set_floatingip_status(context, fip_db)
+        return fip_db
+
+    def _update_edge_router(self, context, router_id):
+        router = self._get_router(context, router_id)
+        distributed = self._make_router_dict(router).get(
+            'distributed', False)
+        if distributed:
+            plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id)
+        else:
+            plr_id = None
+        self._update_external_interface(context, router, router_id=plr_id)
+        self._update_nat_rules(context, router, router_id=plr_id)
+        self._update_subnets_and_dnat_firewall(context, router,
+                                               router_id=plr_id)
+
+    def delete_floatingip(self, context, id):
+        fip_db = self._get_floatingip(context, id)
+        router_id = None
+        if fip_db.fixed_port_id:
+            router_id = fip_db.router_id
+        super(NsxVPluginV2, self).delete_floatingip(context, id)
+        if router_id:
+            router = self._get_router(context, router_id)
+            distributed = self._make_router_dict(router).get(
+                'distributed', False)
+            if not distributed:
+                self._update_subnets_and_dnat_firewall(context, router)
+                self._update_nat_rules(context, router)
+                self._update_external_interface(context, router)
+            else:
+                plr_id = self.edge_manager.get_plr_by_tlr_id(context,
+                                                             router_id)
+                self._update_subnets_and_dnat_firewall(
+                    context, router, router_id=plr_id)
+                self._update_nat_rules(context, router, router_id=plr_id)
+                self._update_external_interface(
+                    context, router, router_id=plr_id)
+
+    def disassociate_floatingips(self, context, port_id):
+        router_id = None
+        try:
+            fip_qry = context.session.query(l3_db.FloatingIP)
+            fip_db = fip_qry.filter_by(fixed_port_id=port_id)
+            for fip in fip_db:
+                if fip.router_id:
+                    router_id = fip.router_id
+                    break
+        except sa_exc.NoResultFound:
+            router_id = None
+        super(NsxVPluginV2, self).disassociate_floatingips(context, port_id)
+        if router_id:
+            router = self._get_router(context, router_id)
+            distributed = self._make_router_dict(router).get(
+                'distributed', False)
+            if not distributed:
+                self._update_subnets_and_dnat_firewall(context, router)
+                self._update_nat_rules(context, router)
+                self._update_external_interface(context, router)
+            else:
+                plr_id = self.edge_manager.get_plr_by_tlr_id(context,
+                                                             router_id)
+                self._update_subnets_and_dnat_firewall(
+                    context, router, router_id=plr_id)
+                self._update_nat_rules(context, router, router_id=plr_id)
+                self._update_external_interface(
+                    context, router, router_id=plr_id)
+
+    def _update_subnets_and_dnat_firewall(self, context, router,
+                                          router_id=None, allow_external=True):
+        fake_fw_rules = []
+        if not router_id:
+            router_id = router['id']
+        subnet_cidrs = self._find_router_subnets_cidrs(context, router['id'])
+        if subnet_cidrs:
+            # Fake fw rule to open subnets firewall flows
+            fake_subnet_fw_rule = {
+                'action': 'allow',
+                'enabled': True,
+                'source_ip_address': subnet_cidrs,
+                'destination_ip_address': subnet_cidrs}
+            fake_fw_rules.append(fake_subnet_fw_rule)
+        _, dnat_rules = self._get_nat_rules(context, router)
+
+        # If metadata service is enabled, block access to inter-edge network
+        if self.metadata_proxy_handler:
+            fake_fw_rules += self.metadata_proxy_handler.get_router_fw_rules()
+
+        dnat_cidrs = [rule['dst'] for rule in dnat_rules]
+        if dnat_cidrs:
+            # Fake fw rule to open dnat firewall flows
+            fake_dnat_fw_rule = {
+                'action': 'allow',
+                'enabled': True,
+                'destination_ip_address': dnat_cidrs}
+            fake_fw_rules.append(fake_dnat_fw_rule)
+        # TODO(berlin): Add fw rules if fw service is supported
+        fake_fw = {'firewall_rule_list': fake_fw_rules}
+        edge_utils.update_firewall(self.nsx_v, context, router_id, fake_fw,
+                                   allow_external=allow_external)
+
+    # Security group handling section #
+    def _delete_security_group(self, nsx_sg_id):
+        """Helper method to delete nsx security group."""
+        if nsx_sg_id is not None:
+            h, c = self.nsx_v.vcns.delete_security_group(nsx_sg_id)
+
+    def _delete_section(self, section_uri):
+        """Helper method to delete nsx rule section."""
+        if section_uri is not None:
+            h, c = self.nsx_v.vcns.delete_section(section_uri)
+
+    def _get_section_uri(self, session, security_group_id, type):
+        mapping = nsxv_db.get_nsx_section(session, security_group_id)
+        if mapping is not None:
+            if type == 'ip':
+                return mapping['ip_section_id']
+            else:
+                return mapping['mac_section_id']
+
+    def create_security_group(self, context, security_group,
+                              default_sg=False):
+        """Create a security group."""
+        sg_data = security_group["security_group"]
+        tenant_id = self._get_tenant_id_for_create(context, sg_data)
+        if not default_sg:
+            self._ensure_default_security_group(context, tenant_id)
+
+        sg_data["id"] = str(uuid.uuid4())
+
+        nsx_sg_name = self._get_name(sg_data['id'],
+                                     sg_data['name'])
+        security_group_config = {"name": nsx_sg_name,
+                                 "description": sg_data["name"]}
+        security_group_dict = {"securitygroup": security_group_config}
+
+        # Create the nsx security group container
+        h, c = self.nsx_v.vcns.create_security_group(security_group_dict)
+        nsx_sg_id = c
+        section_uri = None
+        try:
+            with context.session.begin(subtransactions=True):
+                new_security_group = super(
+                    NsxVPluginV2, self).create_security_group(
+                        context, security_group, default_sg)
+
+                # Save moref in the DB for future access
+                nsx_db.add_neutron_nsx_security_group_mapping(
+                    context.session, new_security_group['id'],
+                    nsx_sg_id)
+
+                # (shadabs): For now only IPv4 rules are processed while group
+                # creation. This is to avoid duplicate rules since NSXv manager
+                # does not distinguish between IPv4 and IPv6 rules. Remove the
+                # TODO(shadabs): comment once NSXv provides API to define ether
+                # type.
+                nsx_rules = []
+                rules = new_security_group['security_group_rules']
+                for rule in rules:
+                    _r, _n = self._create_nsx_rule(context, rule, nsx_sg_id)
+                    nsx_rules.append(_r)
+
+                section_name = ('SG Section: %(name)s (%(id)s)'
+                                % new_security_group)
+                section = self.nsx_sg_utils.get_section_with_rules(
+                    section_name, nsx_rules)
+
+                # Execute REST API for creating the section
+                h, c = self.nsx_v.vcns.create_section(
+                    'ip', self.nsx_sg_utils.to_xml_string(section))
+
+                # Save ip section uri in DB for furture access
+                section_uri = h['location']
+                nsxv_db.add_neutron_nsx_section_mapping(
+                    context.session, new_security_group['id'],
+                    section_uri)
+
+                # Parse the rule id pairs and save in db
+                rule_pairs = self.nsx_sg_utils.get_rule_id_pair_from_section(c)
+                for pair in rule_pairs:
+                    _nsx_id = pair.get('nsx_id')  # nsx_rule_id
+                    _neutron_id = pair.get('neutron_id')  # neutron_rule_id
+                    # Save nsx rule id in the DB for future access
+                    LOG.debug('rules %s-%s', _nsx_id, _neutron_id)
+                    nsxv_db.add_neutron_nsx_rule_mapping(
+                        context.session, _neutron_id, _nsx_id)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                # Delete the nsx rule section
+                self._delete_section(section_uri)
+                # Delete the nsx security group container
+                self._delete_security_group(nsx_sg_id)
+                LOG.exception(_LE('Failed to create security group'))
+
+        return new_security_group
+
+    def delete_security_group(self, context, id):
+        """Delete a security group."""
+        try:
+            with context.session.begin(subtransactions=True):
+                security_group = super(
+                    NsxVPluginV2, self).get_security_group(context, id)
+
+                # Find nsx rule sections
+                section_mapping = nsxv_db.get_nsx_section(
+                    context.session, security_group['id'])
+
+                # Find nsx security group
+                nsx_sg_id = nsx_db.get_nsx_security_group_id(
+                    context.session, security_group['id'])
+
+                # Delete neutron security group
+                super(NsxVPluginV2, self).delete_security_group(
+                    context, id)
+
+                # Delete nsx rule sections
+                self._delete_section(section_mapping['ip_section_id'])
+                self._delete_section(section_mapping['mac_section_id'])
+
+                # Delete nsx security group
+                self._delete_security_group(nsx_sg_id)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE('Failed to delete security group'))
+
+    def _create_nsx_rule(self, context, rule, nsx_sg_id):
+        src = None
+        dest = None
+        port = None
+        protocol = None
+        icmptype = None
+        icmpcode = None
+        flags = {}
+
+        if nsx_sg_id is None:
+            # Find nsx security group for neutron security group
+            nsx_sg_id = nsx_db.get_nsx_security_group_id(
+                context.session, rule['security_group_id'])
+            if nsx_sg_id is None:
+                # TODO(shadabs): raise an exception here
+                LOG.warning(_LW("NSX security group not found for %s"),
+                            rule['security_group_id'])
+
+        # Find the remote nsx security group id, if given in rule
+        remote_nsx_sg_id = nsx_db.get_nsx_security_group_id(
+            context.session, rule['remote_group_id'])
+
+        # Get source and destination containers from rule
+        if rule['direction'] == 'ingress':
+            src = self.nsx_sg_utils.get_remote_container(
+                remote_nsx_sg_id, rule['remote_ip_prefix'])
+            dest = self.nsx_sg_utils.get_container(nsx_sg_id)
+            flags['direction'] = 'in'
+        else:
+            dest = self.nsx_sg_utils.get_remote_container(
+                remote_nsx_sg_id, rule['remote_ip_prefix'])
+            src = self.nsx_sg_utils.get_container(nsx_sg_id)
+            flags['direction'] = 'out'
+
+        protocol = rule.get('protocol')
+        if rule['port_range_min'] is not None:
+            if protocol == '1' or protocol == 'icmp':
+                icmptype = str(rule['port_range_min'])
+                if rule['port_range_max'] is not None:
+                    icmpcode = str(rule['port_range_max'])
+            else:
+                port = str(rule['port_range_min'])
+                if rule['port_range_max'] != rule['port_range_min']:
+                    port = port + '-' + str(rule['port_range_max'])
+
+        # Get the neutron rule id to use as name in nsxv rule
+        name = rule.get('id')
+        services = [(protocol, port, icmptype, icmpcode)] if protocol else []
+
+        flags['ethertype'] = rule.get('ethertype')
+        # Add rule in nsx rule section
+        nsx_rule = self.nsx_sg_utils.get_rule_config(
+            applied_to_id=nsx_sg_id,
+            name=name,
+            source=src,
+            destination=dest,
+            services=services,
+            flags=flags)
+        return nsx_rule, nsx_sg_id
+
+    def create_security_group_rule(self, context, security_group_rule):
+        """Create a single security group rule."""
+        bulk_rule = {'security_group_rules': [security_group_rule]}
+        return self.create_security_group_rule_bulk(context, bulk_rule)[0]
+
+    def create_security_group_rule_bulk(self, context, security_group_rule):
+        """Create security group rules.
+
+        :param security_group_rule: list of rules to create
+        """
+        try:
+            with context.session.begin(subtransactions=True):
+                # Validate and store rule in neutron DB
+                new_rule_list = super(
+                    NsxVPluginV2, self).create_security_group_rule_bulk_native(
+                        context, security_group_rule)
+                ruleids = set()
+                nsx_sg_id = None
+                section_uri = None
+                section = None
+                _h = None
+                for rule in new_rule_list:
+                    # Find nsx rule section for neutron security group
+                    if section_uri is None:
+                        section_uri = self._get_section_uri(
+                            context.session, rule['security_group_id'], 'ip')
+                        if section_uri is None:
+                            # TODO(shadabs): raise an exception here
+                            LOG.warning(_LW("NSX rule section not found for "
+                                            "%s"), rule['security_group_id'])
+
+                    # Parse neutron rule and get nsx rule xml
+                    _r, _n = self._create_nsx_rule(context, rule, nsx_sg_id)
+                    nsx_rule = _r
+                    nsx_sg_id = _n
+                    if section is None:
+                        _h, _c = self.nsx_v.vcns.get_section(section_uri)
+                        section = self.nsx_sg_utils.parse_section(_c)
+
+                    # Insert rule in nsx section
+                    self.nsx_sg_utils.insert_rule_in_section(section, nsx_rule)
+                    ruleids.add(rule['id'])
+
+                # Update the section
+                h, c = self.nsx_v.vcns.update_section(
+                    section_uri, self.nsx_sg_utils.to_xml_string(section), _h)
+
+                # Parse the rule id pairs and save in db
+                rule_pairs = self.nsx_sg_utils.get_rule_id_pair_from_section(c)
+                for pair in rule_pairs:
+                    _nsx_id = pair.get('nsx_id')  # nsx_rule_id
+                    _neutron_id = pair.get('neutron_id')  # neutron_rule_id
+                    # Save nsx rule id in the DB for future access
+                    if _neutron_id in ruleids:
+                        nsxv_db.add_neutron_nsx_rule_mapping(
+                            context.session, _neutron_id, _nsx_id)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE('Failed to update security group rule'))
+
+        return new_rule_list
+
+    def delete_security_group_rule(self, context, sgrid):
+        """Delete a security group rule."""
+        try:
+            with context.session.begin(subtransactions=True):
+                # Get security group rule from DB
+                security_group_rule = super(
+                    NsxVPluginV2, self).get_security_group_rule(
+                        context, sgrid)
+                if not security_group_rule:
+                    raise ext_sg.SecurityGroupRuleNotFound(id=sgrid)
+
+                # Get the nsx rule from neutron DB
+                nsx_rule_id = nsxv_db.get_nsx_rule_id(
+                    context.session, security_group_rule['id'])
+                section_uri = self._get_section_uri(
+                    context.session, security_group_rule['security_group_id'],
+                    'ip')
+
+                # Delete the rule from neutron DB
+                ret = super(NsxVPluginV2, self).delete_security_group_rule(
+                    context, sgrid)
+
+                # Delete the nsx rule
+                if nsx_rule_id is not None and section_uri is not None:
+                    h, c = self.nsx_v.vcns.remove_rule_from_section(
+                        section_uri, nsx_rule_id)
+
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE('Failed to delete security group rule'))
+
+        return ret
+
+    def _is_compute_port(self, port):
+        try:
+            if (port['device_id'] and uuidutils.is_uuid_like(port['device_id'])
+                and port['device_owner'].startswith('compute:')):
+                return True
+        except (KeyError, AttributeError):
+            pass
+        return False
+
+    def _is_valid_ip(self, ip_addr):
+        return netaddr.valid_ipv4(ip_addr) or netaddr.valid_ipv6(ip_addr)
+
+    def _validate_config(self):
+        if not self.nsx_v.vcns.validate_dvs(cfg.CONF.nsxv.dvs_id):
+            error = _("configured dvs_id not found")
+            raise nsx_exc.NsxPluginException(err_msg=error)
+
+        if not self.nsx_v.vcns.validate_datacenter_moid(
+                cfg.CONF.nsxv.datacenter_moid):
+            error = _("configured datacenter_moid not found")
+            raise nsx_exc.NsxPluginException(err_msg=error)
+
+        if not self.nsx_v.vcns.validate_network(
+                cfg.CONF.nsxv.external_network):
+            error = _("configured external_network not found")
+            raise nsx_exc.NsxPluginException(err_msg=error)
+
+        if not self.nsx_v.vcns.validate_vdn_scope(cfg.CONF.nsxv.vdn_scope_id):
+            error = _("configured vdn_scope_id not found")
+            raise nsx_exc.NsxPluginException(err_msg=error)
diff --git a/vmware_nsx/neutron/plugins/vmware/plugins/nsx_v_md_proxy.py b/vmware_nsx/neutron/plugins/vmware/plugins/nsx_v_md_proxy.py
new file mode 100644
index 0000000000..c557e4e013
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/plugins/nsx_v_md_proxy.py
@@ -0,0 +1,367 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import netaddr
+from oslo.config import cfg
+from oslo.db import exception as db_exc
+
+from neutron.api.v2 import attributes as attr
+from neutron.common import constants
+from neutron import context as neutron_context
+from neutron.openstack.common import log as logging
+from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
+from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
+from vmware_nsx.neutron.plugins.vmware.vshield import (
+    nsxv_loadbalancer as nsxv_lb)
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
+    constants as vcns_const)
+from vmware_nsx.neutron.plugins.vmware.vshield import edge_utils
+
+
+METADATA_IP_ADDR = '169.254.169.254'
+METADATA_TCP_PORT = 80
+INTERNAL_SUBNET = '169.254.0.0/16'
+
+LOG = logging.getLogger(__name__)
+
+
+class NsxVMetadataProxyHandler:
+
+    def __init__(self, nsxv_plugin):
+        self.nsxv_plugin = nsxv_plugin
+        self.context = neutron_context.get_admin_context()
+
+        self.internal_net, self.internal_subnet = self._get_internal_network()
+
+        if not self.internal_net or not self.internal_subnet:
+            self.internal_net, self.internal_subnet = (
+                self._create_internal_network())
+
+        self.proxy_edge_ids, self.proxy_edge_ips = self._get_proxy_edges()
+        if not self.proxy_edge_ids or not self.proxy_edge_ips:
+            self.proxy_edge_ids, self.proxy_edge_ips = (
+                self._create_proxy_edges())
+
+    def _create_metadata_internal_network(self, cidr):
+        net_data = {'network': {'name': 'inter-edge-net',
+                                'admin_state_up': True,
+                                'port_security_enabled': False,
+                                'shared': False,
+                                'tenant_id': None}}
+        net = self.nsxv_plugin.create_network(self.context, net_data)
+
+        subnet_data = {'subnet':
+                       {'cidr': cidr,
+                        'name': 'inter-edge-subnet',
+                        'gateway_ip': attr.ATTR_NOT_SPECIFIED,
+                        'allocation_pools': attr.ATTR_NOT_SPECIFIED,
+                        'ip_version': 4,
+                        'dns_nameservers': attr.ATTR_NOT_SPECIFIED,
+                        'host_routes': attr.ATTR_NOT_SPECIFIED,
+                        'enable_dhcp': False,
+                        'network_id': net['id'],
+                        'tenant_id': None}}
+
+        subnet = self.nsxv_plugin.create_subnet(
+            self.context,
+            subnet_data)
+
+        return net['id'], subnet['id']
+
+    def _get_internal_network(self):
+        internal_net = None
+        internal_subnet = None
+
+        net_list = nsxv_db.get_nsxv_internal_network(
+            self.context.session,
+            nsxv_constants.INTER_EDGE_PURPOSE)
+
+        if net_list:
+            internal_net = net_list[0]['network_id']
+            internal_subnet = self.nsxv_plugin.get_subnets(
+                self.context,
+                fields=['id'],
+                filters={'network_id': [internal_net]})[0]['id']
+
+        return internal_net, internal_subnet
+
+    def _create_internal_network(self):
+        internal_net, internal_subnet = (
+            self._create_metadata_internal_network(INTERNAL_SUBNET))
+
+        try:
+            nsxv_db.create_nsxv_internal_network(
+                self.context.session,
+                nsxv_constants.INTER_EDGE_PURPOSE,
+                internal_net)
+        except db_exc.DBDuplicateEntry:
+            # We may have a race condition, where another Neutron instance
+            #  initialized these elements. Delete and use existing elements
+            self.nsxv_plugin.delete_network(self.context, internal_net)
+            internal_net, internal_subnet = self._get_internal_network()
+
+        return internal_net, internal_subnet
+
+    def _get_proxy_edges(self):
+        proxy_edge_ids = []
+        proxy_edge_ips = []
+
+        rtr_list = nsxv_db.get_nsxv_internal_edges_by_purpose(
+            self.context.session,
+            nsxv_constants.INTER_EDGE_PURPOSE)
+
+        for rtr in rtr_list:
+            rtr_id = rtr['router_id']
+            proxy_edge_ids.append(rtr_id)
+            proxy_edge_ips.append(self._get_edge_internal_ip(rtr_id))
+
+        return proxy_edge_ids, proxy_edge_ips
+
+    def _get_edge_internal_ip(self, rtr_id):
+            filters = {
+                'network_id': [self.internal_net],
+                'device_id': [rtr_id]}
+            ports = self.nsxv_plugin.get_ports(self.context, filters=filters)
+            return ports[0]['fixed_ips'][0]['ip_address']
+
+    def _create_proxy_edges(self):
+        proxy_edge_ids = []
+        proxy_edge_ips = []
+
+        for rtr_ip in cfg.CONF.nsxv.mgt_net_proxy_ips:
+            router_data = {
+                'router': {
+                    'name': 'metadata_proxy_router',
+                    'admin_state_up': True,
+                    'tenant_id': None}}
+
+            rtr = self.nsxv_plugin.create_router(
+                self.context,
+                router_data,
+                allow_metadata=False)
+
+            rtr_id = rtr['id']
+            binding = nsxv_db.get_nsxv_router_binding(
+                self.context.session,
+                rtr_id)
+
+            self.nsxv_plugin.nsx_v.update_interface(
+                rtr['id'],
+                binding['edge_id'],
+                vcns_const.EXTERNAL_VNIC_INDEX,
+                cfg.CONF.nsxv.mgt_net_moid,
+                address=rtr_ip,
+                netmask=cfg.CONF.nsxv.mgt_net_proxy_netmask,
+                secondary=[])
+
+            port_data = {
+                'port': {
+                    'network_id': self.internal_net,
+                    'name': None,
+                    'admin_state_up': True,
+                    'device_id': rtr_id,
+                    'device_owner': constants.DEVICE_OWNER_ROUTER_INTF,
+                    'fixed_ips': attr.ATTR_NOT_SPECIFIED,
+                    'mac_address': attr.ATTR_NOT_SPECIFIED,
+                    'port_security_enabled': False,
+                    'tenant_id': None}}
+
+            port = self.nsxv_plugin.create_port(self.context, port_data)
+
+            address_groups = self._get_address_groups(
+                self.context, self.internal_net, rtr_id, is_proxy=True)
+
+            edge_ip = port['fixed_ips'][0]['ip_address']
+            edge_utils.update_internal_interface(
+                self.nsxv_plugin.nsx_v, self.context, rtr_id,
+                self.internal_net, address_groups)
+
+            self._setup_metadata_lb(rtr_id,
+                                    port['fixed_ips'][0]['ip_address'],
+                                    cfg.CONF.nsxv.nova_metadata_port,
+                                    cfg.CONF.nsxv.nova_metadata_port,
+                                    cfg.CONF.nsxv.nova_metadata_ips,
+                                    proxy_lb=True)
+
+            firewall_rule = {
+                'action': 'allow',
+                'enabled': True,
+                'source_ip_address': [INTERNAL_SUBNET]}
+
+            edge_utils.update_firewall(
+                self.nsxv_plugin.nsx_v,
+                self.context,
+                rtr_id,
+                {'firewall_rule_list': [firewall_rule]},
+                allow_external=False)
+
+            # If DB Entry already defined by another Neutron instance, remove
+            #  and resume
+            try:
+                nsxv_db.create_nsxv_internal_edge(
+                    self.context.session,
+                    rtr_ip,
+                    nsxv_constants.INTER_EDGE_PURPOSE,
+                    rtr_id)
+            except db_exc.DBDuplicateEntry:
+                self.nsxv_plugin.delete_router(self.context, rtr_id)
+                rtr_id = nsxv_db.get_nsxv_internal_edge(self.context, rtr_ip)
+                edge_ip = self._get_edge_internal_ip(rtr_id)
+
+            proxy_edge_ids.append(rtr_id)
+            proxy_edge_ips.append(edge_ip)
+        return proxy_edge_ids, proxy_edge_ips
+
+    def _get_address_groups(self, context, network_id, device_id, is_proxy):
+
+        filters = {'network_id': [network_id],
+                   'device_id': [device_id]}
+        ports = self.nsxv_plugin.get_ports(context, filters=filters)
+
+        subnets = self.nsxv_plugin.get_subnets(context, filters=filters)
+
+        address_groups = []
+        for subnet in subnets:
+            address_group = {}
+            net = netaddr.IPNetwork(subnet['cidr'])
+            address_group['subnetMask'] = str(net.netmask)
+            address_group['subnetPrefixLength'] = str(net.prefixlen)
+            for port in ports:
+                fixed_ips = port['fixed_ips']
+                for fip in fixed_ips:
+                    s_id = fip['subnet_id']
+                    ip_addr = fip['ip_address']
+                    if s_id == subnet['id'] and netaddr.valid_ipv4(ip_addr):
+                        address_group['primaryAddress'] = ip_addr
+                        break
+
+            # For Edge appliances which aren't the metadata proxy Edge
+            #  we add the metadata IP address
+            if not is_proxy and network_id == self.internal_net:
+                address_group['secondaryAddresses'] = {
+                    'type': 'secondary_addresses',
+                    'ipAddress': [METADATA_IP_ADDR]}
+
+            address_groups.append(address_group)
+        return address_groups
+
+    def _setup_metadata_lb(
+            self, rtr_id, vip, v_port, s_port, member_ips, proxy_lb=False):
+
+        binding = nsxv_db.get_nsxv_router_binding(self.context.session, rtr_id)
+        edge_id = binding['edge_id']
+        LOG.debug('Setting up Edge device %s', edge_id)
+
+        lb_obj = nsxv_lb.NsxvLoadbalancer()
+
+        # Create virtual server
+        virt_srvr = nsxv_lb.NsxvLBVirtualServer(
+            name='MdSrv',
+            ip_address=vip,
+            port=v_port)
+
+        # For router Edge, we add X-LB-Proxy-ID header
+        if not proxy_lb:
+            app_rule = nsxv_lb.NsxvLBAppRule(
+                'insert-reqadd',
+                'reqadd X-Metadata-Provider:' + edge_id)
+            virt_srvr.add_app_rule(app_rule)
+
+        # Create app profile
+        #  XFF is inserted in router LBs
+        app_profile = nsxv_lb.NsxvLBAppProfile(
+            name='MDSrvProxy',
+            template='HTTP',
+            insert_xff=not proxy_lb)
+
+        virt_srvr.set_app_profile(app_profile)
+
+        # Create pool, members and monitor
+        pool = nsxv_lb.NsxvLBPool(
+            name='MDSrvPool')
+
+        monitor = nsxv_lb.NsxvLBMonitor(
+            name='MDSrvMon')
+        pool.add_monitor(monitor)
+
+        i = 0
+        for member_ip in member_ips:
+            i += 1
+            member = nsxv_lb.NsxvLBPoolMember(
+                name='Member-%d' % i,
+                ip_address=member_ip,
+                port=s_port,
+                monitor_port=s_port)
+            pool.add_member(member)
+
+        virt_srvr.set_default_pool(pool)
+        lb_obj.add_virtual_server(virt_srvr)
+
+        lb_obj.submit_to_backend(
+            self.nsxv_plugin.nsx_v.vcns,
+            edge_id)
+
+    def configure_router_edge(self, rtr_id):
+        # Connect router interface to inter-edge network
+        port_data = {
+            'port': {
+                'network_id': self.internal_net,
+                'name': None,
+                'admin_state_up': True,
+                'device_id': rtr_id,
+                'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
+                'fixed_ips': attr.ATTR_NOT_SPECIFIED,
+                'mac_address': attr.ATTR_NOT_SPECIFIED,
+                'port_security_enabled': False,
+                'tenant_id': None}}
+
+        self.nsxv_plugin.create_port(self.context, port_data)
+
+        address_groups = self._get_address_groups(
+            self.context,
+            self.internal_net,
+            rtr_id,
+            is_proxy=False)
+
+        edge_utils.update_internal_interface(
+            self.nsxv_plugin.nsx_v,
+            self.context,
+            rtr_id,
+            self.internal_net,
+            address_groups=address_groups)
+
+        self._setup_metadata_lb(rtr_id,
+                                METADATA_IP_ADDR,
+                                METADATA_TCP_PORT,
+                                cfg.CONF.nsxv.nova_metadata_port,
+                                self.proxy_edge_ips,
+                                proxy_lb=False)
+
+    def get_router_fw_rules(self):
+        fw_rules = [
+            {
+                'name': 'MDServiceIP',
+                'enabled': True,
+                'action': 'allow',
+                'destination_ip_address': [METADATA_IP_ADDR]
+            },
+            {
+                'name': 'MDInterEdgeNet',
+                'enabled': True,
+                'action': 'deny',
+                'destination_ip_address': [INTERNAL_SUBNET]
+            }]
+
+        return fw_rules
diff --git a/vmware_nsx/neutron/plugins/vmware/shell/__init__.py b/vmware_nsx/neutron/plugins/vmware/shell/__init__.py
index e0b15b8d2f..1dbe1a5ae9 100644
--- a/vmware_nsx/neutron/plugins/vmware/shell/__init__.py
+++ b/vmware_nsx/neutron/plugins/vmware/shell/__init__.py
@@ -16,8 +16,8 @@
 
 import sys
 
-from neutron.plugins.vmware.shell import commands as cmd
 from neutronclient import shell
+from vmware_nsx.neutron.plugins.vmware.shell import commands as cmd
 
 
 class NsxManage(shell.NeutronShell):
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/common/VcnsApiClient.py b/vmware_nsx/neutron/plugins/vmware/vshield/common/VcnsApiClient.py
index 1ad688804b..1e3c3e91d5 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/common/VcnsApiClient.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/common/VcnsApiClient.py
@@ -17,24 +17,48 @@ import base64
 import eventlet
 from oslo.serialization import jsonutils
 
-from neutron.plugins.vmware.vshield.common import exceptions
+from vmware_nsx.neutron.plugins.vmware.vshield.common import exceptions
 
 httplib2 = eventlet.import_patched('httplib2')
 
 
-def xmldumps(obj):
+def _xmldump(obj):
+    """Sort of imporved xml creation method.
+
+    This converts the dict to xml with following assumptions:
+    keys starting with _(underscore) are to be used as attributes and not
+    elements keys starting with @ are to there so that dict can be made.
+    The keys are not part of any xml schema.
+    """
+
     config = ""
+    attr = ""
     if isinstance(obj, dict):
         for key, value in obj.iteritems():
-            cfg = "<%s>%s</%s>" % (key, xmldumps(value), key)
-            config += cfg
+            if (key.startswith('_')):
+                attr += ' %s="%s"' % (key[1:], value)
+            else:
+                a, x = _xmldump(value)
+                if (key.startswith('@')):
+                    cfg = "%s" % (x)
+                else:
+                    cfg = "<%s%s>%s</%s>" % (key, a, x, key)
+
+                config += cfg
     elif isinstance(obj, list):
         for value in obj:
-            config += xmldumps(value)
+            a, x = _xmldump(value)
+            attr += a
+            config += x
     else:
         config = obj
 
-    return config
+    return attr, config
+
+
+def xmldumps(obj):
+    attr, xml = _xmldump(obj)
+    return xml
 
 
 class VcnsApiHelper(object):
@@ -43,6 +67,7 @@ class VcnsApiHelper(object):
         400: exceptions.RequestBad,
         403: exceptions.Forbidden,
         404: exceptions.ResourceNotFound,
+        409: exceptions.ServiceConflict,
         415: exceptions.MediaTypeUnsupport,
         503: exceptions.ServiceUnavailable
     }
@@ -58,16 +83,22 @@ class VcnsApiHelper(object):
         else:
             self.encode = xmldumps
 
-    def request(self, method, uri, params=None):
+    def request(self, method, uri, params=None, headers=None,
+                encodeparams=True):
         uri = self.address + uri
         http = httplib2.Http()
         http.disable_ssl_certificate_validation = True
-        headers = {
-            'Content-Type': 'application/' + self.format,
-            'Accept': 'application/' + 'json',
-            'Authorization': 'Basic ' + self.authToken
-        }
-        body = self.encode(params) if params else None
+        if headers is None:
+            headers = {}
+
+        headers['Content-Type'] = 'application/' + self.format
+        headers['Accept'] = 'application/' + self.format,
+        headers['Authorization'] = 'Basic ' + self.authToken
+
+        if encodeparams is True:
+            body = self.encode(params) if params else None
+        else:
+            body = params if params else None
         header, response = http.request(uri, method,
                                         body=body, headers=headers)
         status = int(header['status'])
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/common/constants.py b/vmware_nsx/neutron/plugins/vmware/vshield/common/constants.py
index 1c2aa25db1..96bbfbfcd4 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/common/constants.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/common/constants.py
@@ -13,14 +13,29 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from oslo.config import cfg
+
+from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
+
+
 EDGE_ID = 'edge_id'
 ROUTER_ID = 'router_id'
+DHCP_EDGE_PREFIX = 'dhcp-'
+ROUTER_EDGE_PREFIX = 'router-'
+PLR_EDGE_PREFIX = 'plr-'
+BACKUP_ROUTER_PREFIX = 'backup-'
+EDGE_NAME_LEN = 20
 
 # Interface
 EXTERNAL_VNIC_INDEX = 0
 INTERNAL_VNIC_INDEX = 1
 EXTERNAL_VNIC_NAME = "external"
 INTERNAL_VNIC_NAME = "internal"
+MAX_VNIC_NUM = 10
+MAX_TUNNEL_NUM = (cfg.CONF.nsxv.maximum_tunnels_per_vnic if
+                  (cfg.CONF.nsxv.maximum_tunnels_per_vnic < 110 and
+                   cfg.CONF.nsxv.maximum_tunnels_per_vnic > 0)
+                  else 10)
 
 INTEGRATION_LR_IPADDRESS = "169.254.2.1/28"
 INTEGRATION_EDGE_IPADDRESS = "169.254.2.3"
@@ -35,6 +50,20 @@ VCNS_ERROR_CODE_EDGE_NOT_RUNNING = 10013
 
 SUFFIX_LENGTH = 8
 
+#Edge size
+SERVICE_SIZE_MAPPING = {
+    'router': nsxv_constants.LARGE,
+    'dhcp': nsxv_constants.COMPACT
+}
+ALLOWED_EDGE_SIZES = (nsxv_constants.COMPACT,
+                      nsxv_constants.LARGE,
+                      nsxv_constants.XLARGE,
+                      nsxv_constants.QUADLARGE)
+
+#Edge type
+ALLOWED_EDGE_TYPES = (nsxv_constants.SERVICE_EDGE,
+                      nsxv_constants.VDR_EDGE)
+
 
 # router status by number
 class RouterStatus(object):
@@ -43,3 +72,7 @@ class RouterStatus(object):
     ROUTER_STATUS_PENDING_CREATE = 2
     ROUTER_STATUS_PENDING_DELETE = 3
     ROUTER_STATUS_ERROR = 4
+
+
+class InternalEdgePurposes(object):
+    INTER_EDGE_PURPOSE = 'inter_edge_net'
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/common/exceptions.py b/vmware_nsx/neutron/plugins/vmware/vshield/common/exceptions.py
index e90ca1825c..9c61994b42 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/common/exceptions.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/common/exceptions.py
@@ -66,3 +66,7 @@ class MediaTypeUnsupport(VcnsApiException):
 
 class ServiceUnavailable(VcnsApiException):
     message = _("Service Unavailable: %(uri)s")
+
+
+class ServiceConflict(VcnsApiException):
+    message = _("Concurrent object access error: %(uri)s")
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/edge_appliance_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_appliance_driver.py
index 26c6262040..c7008eacfe 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/edge_appliance_driver.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/edge_appliance_driver.py
@@ -1,3 +1,5 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 # Copyright 2013 VMware, Inc
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,16 +14,21 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
+from oslo.config import cfg
 from oslo.serialization import jsonutils
 from oslo.utils import excutils
 
-from neutron.i18n import _LE
+from neutron.i18n import _LE, _LI, _LW
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.vshield.common import constants as vcns_const
-from neutron.plugins.vmware.vshield.common import exceptions
-from neutron.plugins.vmware.vshield.tasks import constants
-from neutron.plugins.vmware.vshield.tasks import tasks
+from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware.vshield.common import constants
+from vmware_nsx.neutron.plugins.vmware.vshield.common import exceptions
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import (
+    constants as task_constants)
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks
 
 LOG = logging.getLogger(__name__)
 
@@ -36,13 +43,11 @@ class EdgeApplianceDriver(object):
 
     def _assemble_edge(self, name, appliance_size="compact",
                        deployment_container_id=None, datacenter_moid=None,
-                       enable_aesni=True, hypervisor_assist=False,
+                       enable_aesni=True, dist=False,
                        enable_fips=False, remote_access=False):
         edge = {
             'name': name,
             'fqdn': name,
-            'hypervisorAssist': hypervisor_assist,
-            'type': 'gatewayServices',
             'enableAesni': enable_aesni,
             'enableFips': enable_fips,
             'cliSettings': {
@@ -51,10 +56,14 @@ class EdgeApplianceDriver(object):
             'appliances': {
                 'applianceSize': appliance_size
             },
-            'vnics': {
-                'vnics': []
-            }
         }
+        if not dist:
+            edge['type'] = "gatewayServices"
+            edge['vnics'] = {'vnics': []}
+        else:
+            edge['type'] = "distributedRouter"
+            edge['interfaces'] = {'interfaces': []}
+
         if deployment_container_id:
             edge['appliances']['deploymentContainerId'] = (
                 deployment_container_id)
@@ -71,14 +80,15 @@ class EdgeApplianceDriver(object):
             appliance['datastoreId'] = datastore_id
         return appliance
 
-    def _assemble_edge_vnic(self, name, index, portgroup_id,
+    def _assemble_edge_vnic(self, name, index, portgroup_id, tunnel_index=-1,
                             primary_address=None, subnet_mask=None,
                             secondary=None,
                             type="internal",
                             enable_proxy_arp=False,
                             enable_send_redirects=True,
                             is_connected=True,
-                            mtu=1500):
+                            mtu=1500,
+                            address_groups=None):
         vnic = {
             'index': index,
             'name': name,
@@ -89,30 +99,76 @@ class EdgeApplianceDriver(object):
             'enableSendRedirects': enable_send_redirects,
             'isConnected': is_connected
         }
-        if primary_address and subnet_mask:
-            address_group = {
-                'primaryAddress': primary_address,
-                'subnetMask': subnet_mask
-            }
-            if secondary:
-                address_group['secondaryAddresses'] = {
-                    'ipAddress': secondary,
-                    'type': 'IpAddressesDto'
+        if address_groups is None:
+            address_groups = []
+        if not address_groups:
+            if primary_address and subnet_mask:
+                address_group = {
+                    'primaryAddress': primary_address,
+                    'subnetMask': subnet_mask
                 }
+                if secondary:
+                    address_group['secondaryAddresses'] = {
+                        'ipAddress': secondary,
+                        'type': 'secondary_addresses'
+                    }
 
-            vnic['addressGroups'] = {
-                'addressGroups': [address_group]
-            }
+                vnic['addressGroups'] = {
+                    'addressGroups': [address_group]
+                }
+            else:
+                vnic['subInterfaces'] = {'subInterfaces': address_groups}
+        else:
+            if tunnel_index < 0:
+                vnic['addressGroups'] = {'addressGroups': address_groups}
+            else:
+                vnic['subInterfaces'] = {'subInterfaces': address_groups}
 
         return vnic
 
+    def _assemble_vdr_interface(self, portgroup_id,
+                                primary_address=None, subnet_mask=None,
+                                secondary=None,
+                                type="internal",
+                                is_connected=True,
+                                mtu=1500,
+                                address_groups=None):
+        interface = {
+            'type': type,
+            'connectedToId': portgroup_id,
+            'mtu': mtu,
+            'isConnected': is_connected
+        }
+        if address_groups is None:
+            address_groups = []
+        if not address_groups:
+            if primary_address and subnet_mask:
+                address_group = {
+                    'primaryAddress': primary_address,
+                    'subnetMask': subnet_mask
+                }
+                if secondary:
+                    address_group['secondaryAddresses'] = {
+                        'ipAddress': secondary,
+                        'type': 'secondary_addresses'
+                    }
+
+                interface['addressGroups'] = {
+                    'addressGroups': [address_group]
+                }
+        else:
+            interface['addressGroups'] = {'addressGroups': address_groups}
+        interfaces = {'interfaces': [interface]}
+
+        return interfaces
+
     def _edge_status_to_level(self, status):
         if status == 'GREEN':
-            status_level = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE
+            status_level = constants.RouterStatus.ROUTER_STATUS_ACTIVE
         elif status in ('GREY', 'YELLOW'):
-            status_level = vcns_const.RouterStatus.ROUTER_STATUS_DOWN
+            status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
         else:
-            status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
+            status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
         return status_level
 
     def _enable_loadbalancer(self, edge):
@@ -131,13 +187,12 @@ class EdgeApplianceDriver(object):
         except exceptions.VcnsApiException as e:
             LOG.exception(_LE("VCNS: Failed to get edge status:\n%s"),
                           e.response)
-            status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
+            status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
             try:
                 desc = jsonutils.loads(e.response)
                 if desc.get('errorCode') == (
-                    vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
-                    status_level = (
-                        vcns_const.RouterStatus.ROUTER_STATUS_DOWN)
+                    constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
+                    status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
             except ValueError:
                 LOG.exception(e.response)
 
@@ -153,59 +208,131 @@ class EdgeApplianceDriver(object):
 
         return edges_status_level
 
-    def _update_interface(self, task):
-        edge_id = task.userdata['edge_id']
-        config = task.userdata['config']
-        LOG.debug("VCNS: start updating vnic %s", config)
+    def get_interface(self, edge_id, vnic_index):
+        self.check_edge_jobs(edge_id)
+        # get vnic interface address groups
         try:
-            self.vcns.update_interface(edge_id, config)
-        except exceptions.VcnsApiException as e:
+            return self.vcns.query_interface(edge_id, vnic_index)
+        except exceptions.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Failed to update vnic %(config)s:\n"
-                                  "%(response)s"), {
-                                    'config': config,
-                                    'response': e.response})
-        except Exception:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Failed to update vnic %d"),
-                              config['index'])
+                LOG.exception(_LE("NSXv: Failed to query vnic %s"), vnic_index)
 
-        return constants.TaskStatus.COMPLETED
+    def check_edge_jobs(self, edge_id):
+        retries = max(cfg.CONF.nsxv.retries, 1)
+        delay = 0.5
+        for attempt in range(1, retries + 1):
+            if attempt != 1:
+                time.sleep(delay)
+                delay = min(2 * delay, 60)
+            h, jobs = self.vcns.get_edge_jobs(edge_id)
+            if jobs['edgeJob'] == []:
+                return
+            LOG.warning(_LW('NSXv: jobs still running.'))
+        LOG.error(_LE('NSXv: jobs are still runnings!'))
 
     def update_interface(self, router_id, edge_id, index, network,
-                         address=None, netmask=None, secondary=None,
-                         jobdata=None):
+                         tunnel_index=-1, address=None, netmask=None,
+                         secondary=None, jobdata=None,
+                         address_groups=None):
         LOG.debug("VCNS: update vnic %(index)d: %(addr)s %(netmask)s", {
             'index': index, 'addr': address, 'netmask': netmask})
-        if index == vcns_const.EXTERNAL_VNIC_INDEX:
-            name = vcns_const.EXTERNAL_VNIC_NAME
+        if index == constants.EXTERNAL_VNIC_INDEX:
+            name = constants.EXTERNAL_VNIC_NAME
             intf_type = 'uplink'
-        elif index == vcns_const.INTERNAL_VNIC_INDEX:
-            name = vcns_const.INTERNAL_VNIC_NAME
-            intf_type = 'internal'
         else:
-            msg = _("Vnic %d currently not supported") % index
-            raise exceptions.VcnsGeneralException(msg)
+            name = constants.INTERNAL_VNIC_NAME + str(index)
+            if tunnel_index < 0:
+                intf_type = 'internal'
+            else:
+                intf_type = 'trunk'
 
         config = self._assemble_edge_vnic(
-            name, index, network, address, netmask, secondary, type=intf_type)
+            name, index, network, tunnel_index,
+            address, netmask, secondary, type=intf_type,
+            address_groups=address_groups)
 
+        self.vcns.update_interface(edge_id, config)
+
+    def add_vdr_internal_interface(self, edge_id,
+                                   network, address=None, netmask=None,
+                                   secondary=None, address_groups=None,
+                                   type="internal"):
+        LOG.debug("Add VDR interface on edge: %s", edge_id)
+        if address_groups is None:
+            address_groups = []
+        interface_req = self._assemble_vdr_interface(
+            network, address, netmask, secondary,
+            address_groups=address_groups,
+            type=type)
+        self.vcns.add_vdr_internal_interface(edge_id, interface_req)
+        header, response = self.vcns.get_edge_interfaces(edge_id)
+        for interface in response['interfaces']:
+            if interface['connectedToId'] == network:
+                vnic_index = int(interface['index'])
+                return vnic_index
+
+    def update_vdr_internal_interface(self, edge_id, index, network,
+                                      address=None, netmask=None,
+                                      secondary=None, address_groups=None):
+        if not address_groups:
+            address_groups = []
+        interface_req = self._assemble_vdr_interface(
+            network, address, netmask, secondary,
+            address_groups=address_groups)
+        try:
+            header, response = self.vcns.update_vdr_internal_interface(
+                edge_id, index, interface_req)
+        except exceptions.VcnsApiException:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Failed to update vdr interface on edge: "
+                                  "%s"), edge_id)
+
+    def delete_vdr_internal_interface(self, edge_id, interface_index):
+        LOG.debug("Delete VDR interface on edge: %s", edge_id)
+        try:
+            header, response = self.vcns.delete_vdr_internal_interface(
+                edge_id, interface_index)
+        except exceptions.VcnsApiException:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Failed to delete vdr interface on edge: "
+                                  "%s"),
+                              edge_id)
+
+    def _delete_interface(self, task):
+        edge_id = task.userdata['edge_id']
+        vnic_index = task.userdata['vnic_index']
+        LOG.debug("start deleting vnic %s", vnic_index)
+        try:
+            self.vcns.delete_interface(edge_id, vnic_index)
+        except exceptions.VcnsApiException:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Failed to delete vnic %(vnic_index)s: "
+                                  "on edge %(edge_id)s"),
+                              {'vnic_index': vnic_index,
+                               'edge_id': edge_id})
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Failed to delete vnic %d"), vnic_index)
+
+        return task_constants.TaskStatus.COMPLETED
+
+    def delete_interface(self, router_id, edge_id, index, jobdata=None):
+        task_name = "delete-interface-%s-%d" % (edge_id, index)
         userdata = {
+            'router_id': router_id,
             'edge_id': edge_id,
-            'config': config,
+            'vnic_index': index,
             'jobdata': jobdata
         }
-        task_name = "update-interface-%s-%d" % (edge_id, index)
-        task = tasks.Task(task_name, router_id,
-                          self._update_interface, userdata=userdata)
-        task.add_result_monitor(self.callbacks.interface_update_result)
+        task = tasks.Task(task_name, router_id, self._delete_interface,
+                          userdata=userdata)
+        task.add_result_monitor(self.callbacks.interface_delete_result)
         self.task_manager.add(task)
         return task
 
     def _deploy_edge(self, task):
         userdata = task.userdata
-        name = userdata['router_name']
-        LOG.debug("VCNS: start deploying edge %s", name)
+        LOG.debug("NSXv: start deploying edge")
         request = userdata['request']
         try:
             header = self.vcns.deploy_edge(request)[0]
@@ -215,11 +342,10 @@ class EdgeApplianceDriver(object):
             edge_id = response['edgeId']
             LOG.debug("VCNS: deploying edge %s", edge_id)
             userdata['edge_id'] = edge_id
-            status = constants.TaskStatus.PENDING
+            status = task_constants.TaskStatus.PENDING
         except exceptions.VcnsApiException:
             with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: deploy edge failed for router %s."),
-                              name)
+                LOG.exception(_LE("NSXv: deploy edge failed."))
 
         return status
 
@@ -230,16 +356,15 @@ class EdgeApplianceDriver(object):
             task.userdata['retries'] = 0
             system_status = response.get('systemStatus', None)
             if system_status is None:
-                status = constants.TaskStatus.PENDING
+                status = task_constants.TaskStatus.PENDING
             elif system_status == 'good':
-                status = constants.TaskStatus.COMPLETED
+                status = task_constants.TaskStatus.COMPLETED
             else:
-                status = constants.TaskStatus.ERROR
-        except exceptions.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Edge %s status query failed."),
-                              edge_id)
-        except Exception:
+                status = task_constants.TaskStatus.ERROR
+        except exceptions.VcnsApiException as e:
+            LOG.exception(_LE("VCNS: Edge %s status query failed."), edge_id)
+            raise e
+        except Exception as e:
             retries = task.userdata.get('retries', 0) + 1
             if retries < 3:
                 task.userdata['retries'] = retries
@@ -247,34 +372,42 @@ class EdgeApplianceDriver(object):
                                   "status. Retry %(retries)d."),
                               {'edge_id': edge_id,
                                'retries': retries})
-                status = constants.TaskStatus.PENDING
+                status = task_constants.TaskStatus.PENDING
             else:
                 LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. "
-                                 "Abort."), edge_id)
-                status = constants.TaskStatus.ERROR
+                                  "Abort."), edge_id)
+                status = task_constants.TaskStatus.ERROR
         LOG.debug("VCNS: Edge %s status", edge_id)
         return status
 
     def _result_edge(self, task):
-        router_name = task.userdata['router_name']
         edge_id = task.userdata.get('edge_id')
-        if task.status != constants.TaskStatus.COMPLETED:
-            LOG.error(_LE("VCNS: Failed to deploy edge %(edge_id)s "
-                          "for %(name)s, status %(status)d"), {
-                            'edge_id': edge_id,
-                            'name': router_name,
-                            'status': task.status
-                        })
+        if task.status != task_constants.TaskStatus.COMPLETED:
+            LOG.error(_LE("NSXv: Failed to deploy edge %(edge_id)s "
+                          "status %(status)d"),
+                      {'edge_id': edge_id,
+                       'status': task.status})
         else:
-            LOG.debug("VCNS: Edge %(edge_id)s deployed for "
-                      "router %(name)s", {
-                          'edge_id': edge_id, 'name': router_name
-                      })
+            LOG.debug("NSXv: Edge %s is deployed", edge_id)
+
+    def _update_edge(self, task):
+        edge_id = task.userdata['edge_id']
+        LOG.debug("start update edge %s", edge_id)
+        request = task.userdata['request']
+        try:
+            self.vcns.update_edge(edge_id, request)
+            status = task_constants.TaskStatus.COMPLETED
+        except exceptions.VcnsApiException as e:
+            LOG.error(_LE("Failed to update edge: %s"),
+                      e.response)
+            status = task_constants.TaskStatus.ERROR
+
+        return status
 
     def _delete_edge(self, task):
         edge_id = task.userdata['edge_id']
         LOG.debug("VCNS: start destroying edge %s", edge_id)
-        status = constants.TaskStatus.COMPLETED
+        status = task_constants.TaskStatus.COMPLETED
         if edge_id:
             try:
                 self.vcns.delete_edge(edge_id)
@@ -284,10 +417,10 @@ class EdgeApplianceDriver(object):
                 LOG.exception(_LE("VCNS: Failed to delete %(edge_id)s:\n"
                                   "%(response)s"),
                               {'edge_id': edge_id, 'response': e.response})
-                status = constants.TaskStatus.ERROR
+                status = task_constants.TaskStatus.ERROR
             except Exception:
                 LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
-                status = constants.TaskStatus.ERROR
+                status = task_constants.TaskStatus.ERROR
 
         return status
 
@@ -295,42 +428,49 @@ class EdgeApplianceDriver(object):
         try:
             return self.vcns.get_edges()[1]
         except exceptions.VcnsApiException as e:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Failed to get edges:\n%s"),
-                              e.response)
+            LOG.exception(_LE("VCNS: Failed to get edges:\n%s"), e.response)
+            raise e
 
-    def deploy_edge(self, router_id, name, internal_network, jobdata=None,
-                    wait_for_exec=False, loadbalancer_enable=True):
+    def deploy_edge(self, resource_id, name, internal_network, jobdata=None,
+                    dist=False, wait_for_exec=False, loadbalancer_enable=True,
+                    appliance_size=nsxv_constants.LARGE):
         task_name = 'deploying-%s' % name
         edge_name = name
         edge = self._assemble_edge(
             edge_name, datacenter_moid=self.datacenter_moid,
             deployment_container_id=self.deployment_container_id,
-            appliance_size='large', remote_access=True)
+            appliance_size=appliance_size, remote_access=True, dist=dist)
         appliance = self._assemble_edge_appliance(self.resource_pool_id,
                                                   self.datastore_id)
         if appliance:
             edge['appliances']['appliances'] = [appliance]
 
-        vnic_external = self._assemble_edge_vnic(
-            vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX,
-            self.external_network, type="uplink")
-        edge['vnics']['vnics'].append(vnic_external)
-        vnic_inside = self._assemble_edge_vnic(
-            vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX,
-            internal_network,
-            vcns_const.INTEGRATION_EDGE_IPADDRESS,
-            vcns_const.INTEGRATION_SUBNET_NETMASK,
-            type="internal")
-        edge['vnics']['vnics'].append(vnic_inside)
-        if loadbalancer_enable:
+        if not dist:
+            vnic_external = self._assemble_edge_vnic(
+                constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX,
+                self.external_network, type="uplink")
+            edge['vnics']['vnics'].append(vnic_external)
+        else:
+            edge['mgmtInterface'] = {
+                'connectedToId': self.external_network,
+                'name': "mgmtInterface"}
+        if internal_network:
+            vnic_inside = self._assemble_edge_vnic(
+                constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX,
+                internal_network,
+                constants.INTEGRATION_EDGE_IPADDRESS,
+                constants.INTEGRATION_SUBNET_NETMASK,
+                type="internal")
+            edge['vnics']['vnics'].append(vnic_inside)
+        if not dist and loadbalancer_enable:
             self._enable_loadbalancer(edge)
         userdata = {
+            'dist': dist,
             'request': edge,
             'router_name': name,
             'jobdata': jobdata
         }
-        task = tasks.Task(task_name, router_id,
+        task = tasks.Task(task_name, resource_id,
                           self._deploy_edge,
                           status_callback=self._status_edge,
                           result_callback=self._result_edge,
@@ -340,19 +480,68 @@ class EdgeApplianceDriver(object):
         self.task_manager.add(task)
 
         if wait_for_exec:
-            # wait until the deploy task is executed so edge_id is available
-            task.wait(constants.TaskState.EXECUTED)
+            # waitl until the deploy task is executed so edge_id is available
+            task.wait(task_constants.TaskState.EXECUTED)
 
         return task
 
-    def delete_edge(self, router_id, edge_id, jobdata=None):
+    def update_edge(self, router_id, edge_id, name, internal_network,
+                    jobdata=None, dist=False, loadbalancer_enable=True,
+                    appliance_size=nsxv_constants.LARGE):
+        """Update edge name."""
+        task_name = 'update-%s' % name
+        edge_name = name
+        edge = self._assemble_edge(
+            edge_name, datacenter_moid=self.datacenter_moid,
+            deployment_container_id=self.deployment_container_id,
+            appliance_size=appliance_size, remote_access=True, dist=dist)
+        edge['id'] = edge_id
+        appliance = self._assemble_edge_appliance(self.resource_pool_id,
+                                                  self.datastore_id)
+        if appliance:
+            edge['appliances']['appliances'] = [appliance]
+
+        if not dist:
+            vnic_external = self._assemble_edge_vnic(
+                constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX,
+                self.external_network, type="uplink")
+            edge['vnics']['vnics'].append(vnic_external)
+        else:
+            edge['mgmtInterface'] = {
+                'connectedToId': self.external_network,
+                'name': "mgmtInterface"}
+
+        if internal_network:
+            internal_vnic = self._assemble_edge_vnic(
+                constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX,
+                internal_network,
+                constants.INTEGRATION_EDGE_IPADDRESS,
+                constants.INTEGRATION_SUBNET_NETMASK,
+                type="internal")
+            edge['vnics']['vnics'].append(internal_vnic)
+        if not dist and loadbalancer_enable:
+            self._enable_loadbalancer(edge)
+        userdata = {
+            'edge_id': edge_id,
+            'request': edge,
+            'jobdata': jobdata
+        }
+        task = tasks.Task(task_name, router_id,
+                          self._update_edge,
+                          userdata=userdata)
+        task.add_result_monitor(self.callbacks.edge_update_result)
+        self.task_manager.add(task)
+        return task
+
+    def delete_edge(self, resource_id, edge_id, jobdata=None, dist=False):
         task_name = 'delete-%s' % edge_id
         userdata = {
-            'router_id': router_id,
+            'router_id': resource_id,
+            'dist': dist,
             'edge_id': edge_id,
             'jobdata': jobdata
         }
-        task = tasks.Task(task_name, router_id, self._delete_edge,
+        task = tasks.Task(task_name, resource_id, self._delete_edge,
                           userdata=userdata)
         task.add_result_monitor(self.callbacks.edge_delete_result)
         self.task_manager.add(task)
@@ -360,23 +549,30 @@ class EdgeApplianceDriver(object):
 
     def _assemble_nat_rule(self, action, original_address,
                            translated_address,
-                           vnic_index=vcns_const.EXTERNAL_VNIC_INDEX,
-                           enabled=True):
+                           vnic_index=constants.EXTERNAL_VNIC_INDEX,
+                           enabled=True,
+                           protocol='any',
+                           original_port='any',
+                           translated_port='any'):
         nat_rule = {}
         nat_rule['action'] = action
         nat_rule['vnic'] = vnic_index
         nat_rule['originalAddress'] = original_address
         nat_rule['translatedAddress'] = translated_address
         nat_rule['enabled'] = enabled
+        nat_rule['protocol'] = protocol
+        nat_rule['originalPort'] = original_port
+        nat_rule['translatedPort'] = translated_port
+
         return nat_rule
 
     def get_nat_config(self, edge_id):
         try:
             return self.vcns.get_nat_config(edge_id)[1]
         except exceptions.VcnsApiException as e:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
-                              e.response)
+            LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
+                          e.response)
+            raise e
 
     def _create_nat_rule(self, task):
         # TODO(fank): use POST for optimization
@@ -389,18 +585,18 @@ class EdgeApplianceDriver(object):
 
         del nat['version']
 
-        if location is None or location == vcns_const.APPEND:
+        if location is None or location == constants.APPEND:
             nat['rules']['natRulesDtos'].append(rule)
         else:
             nat['rules']['natRulesDtos'].insert(location, rule)
 
         try:
             self.vcns.update_nat_config(edge_id, nat)
-            status = constants.TaskStatus.COMPLETED
+            status = task_constants.TaskStatus.COMPLETED
         except exceptions.VcnsApiException as e:
             LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
                           e.response)
-            status = constants.TaskStatus.ERROR
+            status = task_constants.TaskStatus.ERROR
 
         return status
 
@@ -433,7 +629,7 @@ class EdgeApplianceDriver(object):
             'type': addrtype, 'addr': address})
         nat = self.get_nat_config(edge_id)
         del nat['version']
-        status = constants.TaskStatus.COMPLETED
+        status = task_constants.TaskStatus.COMPLETED
         for nat_rule in nat['rules']['natRulesDtos']:
             if nat_rule[addrtype] == address:
                 rule_id = nat_rule['ruleId']
@@ -442,7 +638,7 @@ class EdgeApplianceDriver(object):
                 except exceptions.VcnsApiException as e:
                     LOG.exception(_LE("VCNS: Failed to delete snat rule:\n"
                                       "%s"), e.response)
-                    status = constants.TaskStatus.ERROR
+                    status = task_constants.TaskStatus.ERROR
 
         return status
 
@@ -507,7 +703,7 @@ class EdgeApplianceDriver(object):
         if task != self.updated_task['nat'][edge_id]:
             # this task does not have the latest config, abort now
             # for speedup
-            return constants.TaskStatus.ABORT
+            return task_constants.TaskStatus.ABORT
 
         rules = task.userdata['rules']
         LOG.debug("VCNS: start updating nat rules: %s", rules)
@@ -521,11 +717,11 @@ class EdgeApplianceDriver(object):
 
         try:
             self.vcns.update_nat_config(edge_id, nat)
-            status = constants.TaskStatus.COMPLETED
+            status = task_constants.TaskStatus.COMPLETED
         except exceptions.VcnsApiException as e:
             LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
                           e.response)
-            status = constants.TaskStatus.ERROR
+            status = task_constants.TaskStatus.ERROR
 
         return status
 
@@ -534,7 +730,7 @@ class EdgeApplianceDriver(object):
         LOG.debug("VCNS: update nat rule\n"
                   "SNAT:%(snat)s\n"
                   "DNAT:%(dnat)s\n", {
-                      'snat': snats, 'dnat': dnats})
+                        'snat': snats, 'dnat': dnats})
         nat_rules = []
 
         for dnat in dnats:
@@ -560,24 +756,56 @@ class EdgeApplianceDriver(object):
         self.task_manager.add(task)
         return task
 
+    def update_dnat_rules(self, edge_id, dnat_rules):
+        edge_nat_rules = []
+        for rule in dnat_rules:
+            edge_nat_rules.append(
+                self._assemble_nat_rule(
+                    'dnat',
+                    rule['dst'],
+                    rule['translated'],
+                    vnic_index=rule['vnic_index'],
+                    enabled=True,
+                    protocol=rule['protocol'],
+                    original_port=rule['original_port'],
+                    translated_port=rule['translated_port']))
+
+        nat = {
+            'featureType': 'nat',
+            'rules': {
+                'natRulesDtos': edge_nat_rules
+            }
+        }
+
+        self.vcns.update_nat_config(edge_id, nat)
+
     def _update_routes(self, task):
         edge_id = task.userdata['edge_id']
         if (task != self.updated_task['route'][edge_id] and
             task.userdata.get('skippable', True)):
             # this task does not have the latest config, abort now
             # for speedup
-            return constants.TaskStatus.ABORT
+            return task_constants.TaskStatus.ABORT
         gateway = task.userdata['gateway']
+        gateway_vnic_index = task.userdata['gateway_vnic_index']
         routes = task.userdata['routes']
         LOG.debug("VCNS: start updating routes for %s", edge_id)
         static_routes = []
         for route in routes:
-            static_routes.append({
-                "description": "",
-                "vnic": vcns_const.INTERNAL_VNIC_INDEX,
-                "network": route['cidr'],
-                "nextHop": route['nexthop']
-            })
+            if route.get('vnic_index') is None:
+                static_routes.append({
+                    "description": "",
+                    "vnic": constants.INTERNAL_VNIC_INDEX,
+                    "network": route['cidr'],
+                    "nextHop": route['nexthop']
+                })
+            else:
+                static_routes.append({
+                    "description": "",
+                    "vnic": route['vnic_index'],
+                    "network": route['cidr'],
+                    "nextHop": route['nexthop']
+                })
         request = {
             "staticRoutes": {
                 "staticRoutes": static_routes
@@ -587,26 +815,28 @@ class EdgeApplianceDriver(object):
             request["defaultRoute"] = {
                 "description": "default-gateway",
                 "gatewayAddress": gateway,
-                "vnic": vcns_const.EXTERNAL_VNIC_INDEX
+                "vnic": gateway_vnic_index
             }
         try:
             self.vcns.update_routes(edge_id, request)
-            status = constants.TaskStatus.COMPLETED
+            status = task_constants.TaskStatus.COMPLETED
         except exceptions.VcnsApiException as e:
             LOG.exception(_LE("VCNS: Failed to update routes:\n%s"),
                           e.response)
-            status = constants.TaskStatus.ERROR
+            status = task_constants.TaskStatus.ERROR
 
         return status
 
     def update_routes(self, router_id, edge_id, gateway, routes,
-                      skippable=True, jobdata=None):
+                      skippable=True, jobdata=None,
+                      gateway_vnic_index=constants.EXTERNAL_VNIC_INDEX):
         if gateway:
             gateway = gateway.split('/')[0]
 
         userdata = {
             'edge_id': edge_id,
             'gateway': gateway,
+            'gateway_vnic_index': gateway_vnic_index,
             'routes': routes,
             'skippable': skippable,
             'jobdata': jobdata
@@ -659,3 +889,90 @@ class EdgeApplianceDriver(object):
             with excutils.save_and_reraise_exception():
                 LOG.exception(_LE("Failed to enable loadbalancer "
                                   "service config"))
+
+    def _delete_port_group(self, task):
+        try:
+            header, response = self.vcns.get_edge_id(task.userdata['job_id'])
+        except exceptions.VcnsApiException:
+            with excutils.save_and_reraise_exception():
+                LOG.error(_LE("NSXv: Failed to get job for %s"),
+                          task.userdata)
+        status = response['status']
+        if status != 'COMPLETED':
+            if (status == 'QUEUED' or status == 'RUNNING' or
+                status == 'ROLLBACK'):
+                LOG.debug("NSXv: job is still pending for %s", task.userdata)
+                return task_constants.TaskStatus.PENDING
+        try:
+            self.vcns.delete_port_group(
+                task.userdata['dvs_id'],
+                task.userdata['port_group_id'])
+        except Exception as e:
+            LOG.error(_LE('Unable to delete %(pg)s (job status %(state)s) '
+                          'exception %(ex)s'),
+                      {'pg': task.userdata['port_group_id'],
+                       'state': status,
+                       'ex': e})
+        if status == 'FAILED':
+            return task_constants.TaskStatus.ERROR
+        return task_constants.TaskStatus.COMPLETED
+
+    def delete_portgroup(self, dvs_id, port_group_id, job_id):
+        task_name = "delete-port-group-%s" % port_group_id
+        userdata = {'dvs_id': dvs_id,
+                    'port_group_id': port_group_id,
+                    'job_id': job_id}
+        task = tasks.Task(task_name, port_group_id,
+                          self._delete_port_group,
+                          status_callback=self._delete_port_group,
+                          userdata=userdata)
+        self.task_manager.add(task)
+
+    def _retry_task(self, task):
+        delay = 0.5
+        max_retries = max(cfg.CONF.nsxv.retries, 1)
+        args = task.userdata.get('args', [])
+        kwargs = task.userdata.get('kwargs', {})
+        retry_number = task.userdata['retry_number']
+        retry_command = task.userdata['retry_command']
+        try:
+            retry_command(*args, **kwargs)
+        except Exception as exc:
+            LOG.debug("Task %(name)s retry %(retry)s failed %(exc)s",
+                      {'name': task.name,
+                       'exc': exc,
+                       'retry': retry_number})
+            retry_number += 1
+            if retry_number > max_retries:
+                with excutils.save_and_reraise_exception():
+                    LOG.exception(_LE("Failed to %s"), task.name)
+            else:
+                task.userdata['retry_number'] = retry_number
+                # Sleep twice as long as the previous retry
+                tts = (2 ** (retry_number - 1)) * delay
+                time.sleep(min(tts, 60))
+                return task_constants.TaskStatus.PENDING
+        LOG.info(_LI("Task %(name)s completed."), {'name': task.name})
+        return task_constants.TaskStatus.COMPLETED
+
+    def delete_port_group(self, dvs_id, port_group_id):
+        task_name = 'delete-port-group-%s-%s' % (port_group_id, dvs_id)
+        userdata = {'retry_number': 1,
+                    'retry_command': self.vcns.delete_port_group,
+                    'args': [dvs_id, port_group_id]}
+        task = tasks.Task(task_name, port_group_id,
+                          self._retry_task,
+                          status_callback=self._retry_task,
+                          userdata=userdata)
+        self.task_manager.add(task)
+
+    def delete_virtual_wire(self, vw_id):
+        task_name = 'delete-virtualwire-%s' % vw_id
+        userdata = {'retry_number': 1,
+                    'retry_command': self.vcns.delete_virtual_wire,
+                    'args': [vw_id]}
+        task = tasks.Task(task_name, vw_id,
+                          self._retry_task,
+                          status_callback=self._retry_task,
+                          userdata=userdata)
+        self.task_manager.add(task)
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/edge_firewall_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_firewall_driver.py
index 6a6843fa98..5d43e271ad 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/edge_firewall_driver.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/edge_firewall_driver.py
@@ -15,12 +15,15 @@
 from oslo.utils import excutils
 
 from neutron.db import db_base_plugin_v2
-from neutron.i18n import _LE
+from neutron.i18n import _, _LE
 from neutron.openstack.common import log as logging
 from neutron.plugins.common import constants
-from neutron.plugins.vmware.dbexts import vcns_db
-from neutron.plugins.vmware.vshield.common import (
+from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
     exceptions as vcns_exc)
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import (
+    constants as task_const)
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks
 
 LOG = logging.getLogger(__name__)
 
@@ -69,17 +72,19 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
 
     def _convert_firewall_rule(self, context, rule, index=None):
         vcns_rule = {
-            "name": rule['name'],
-            "description": rule['description'],
             "action": self._convert_firewall_action(rule['action']),
-            "enabled": rule['enabled']}
+            "enabled": rule.get('enabled', True)}
+        if rule.get('name'):
+            vcns_rule['name'] = rule['name']
+        if rule.get('description'):
+            vcns_rule['description'] = rule['description']
         if rule.get('source_ip_address'):
             vcns_rule['source'] = {
-                "ipAddress": [rule['source_ip_address']]
+                "ipAddress": rule['source_ip_address']
             }
         if rule.get('destination_ip_address'):
             vcns_rule['destination'] = {
-                "ipAddress": [rule['destination_ip_address']]
+                "ipAddress": rule['destination_ip_address']
             }
         service = {}
         if rule.get('source_port'):
@@ -102,27 +107,30 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
 
     def _restore_firewall_rule(self, context, edge_id, response):
         rule = response
-        rule_binding = vcns_db.get_vcns_edge_firewallrule_binding_by_vseid(
+        rule_binding = nsxv_db.get_nsxv_edge_firewallrule_binding_by_vseid(
             context.session, edge_id, rule['ruleId'])
         service = rule['application']['service'][0]
         src_port_range = self._get_port_range_from_min_max_ports(
             service['sourcePort'][0], service['sourcePort'][-1])
         dst_port_range = self._get_port_range_from_min_max_ports(
             service['port'][0], service['port'][-1])
-        return {
+        fw_rule = {
             'firewall_rule': {
-                'name': rule['name'],
                 'id': rule_binding['rule_id'],
-                'description': rule['description'],
-                'source_ip_address': rule['source']['ipAddress'][0],
-                'destination_ip_address': rule['destination']['ipAddress'][0],
+                'source_ip_address': rule['source']['ipAddress'],
+                'destination_ip_address': rule['destination']['ipAddress'],
                 'protocol': service['protocol'],
                 'destination_port': dst_port_range,
                 'source_port': src_port_range,
                 'action': self._restore_firewall_action(rule['action']),
                 'enabled': rule['enabled']}}
+        if rule.get('name'):
+            fw_rule['firewall_rule']['name'] = rule['name']
+        if rule.get('description'):
+            fw_rule['firewall_rule']['description'] = rule['description']
+        return fw_rule
 
-    def _convert_firewall(self, context, firewall):
+    def _convert_firewall(self, context, firewall, allow_external=False):
         #bulk configuration on firewall and rescheduling the rule binding
         ruleTag = 1
         vcns_rules = []
@@ -130,6 +138,11 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             vcns_rule = self._convert_firewall_rule(context, rule, ruleTag)
             vcns_rules.append(vcns_rule)
             ruleTag += 1
+        if allow_external:
+            vcns_rules.append(
+                {'action': "accept",
+                 'enabled': True,
+                 'destination': {'vnicGroupId': ["external"]}})
         return {
             'featureType': "firewall_4.0",
             'firewallRules': {
@@ -140,7 +153,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
         res['firewall_rule_list'] = []
         for rule in response['firewallRules']['firewallRules']:
             rule_binding = (
-                vcns_db.get_vcns_edge_firewallrule_binding_by_vseid(
+                nsxv_db.get_nsxv_edge_firewallrule_binding_by_vseid(
                     context.session, edge_id, rule['ruleId']))
             if rule_binding is None:
                 continue
@@ -151,17 +164,19 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
                 service['port'][0], service['port'][-1])
             item = {
                 'firewall_rule': {
-                    'name': rule['name'],
                     'id': rule_binding['rule_id'],
-                    'description': rule['description'],
-                    'source_ip_address': rule['source']['ipAddress'][0],
+                    'source_ip_address': rule['source']['ipAddress'],
                     'destination_ip_address': rule[
-                        'destination']['ipAddress'][0],
+                        'destination']['ipAddress'],
                     'protocol': service['protocol'],
                     'destination_port': dst_port_range,
                     'source_port': src_port_range,
                     'action': self._restore_firewall_action(rule['action']),
                     'enabled': rule['enabled']}}
+            if rule.get('name'):
+                item['firewall_rule']['name'] = rule['name']
+            if rule.get('description'):
+                item['firewall_rule']['description'] = rule['description']
             res['firewall_rule_list'].append(item)
         return res
 
@@ -179,16 +194,16 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
                     'rule_vseid': rule_vseid,
                     'edge_id': edge_id
                 }
-                vcns_db.add_vcns_edge_firewallrule_binding(
+                nsxv_db.add_nsxv_edge_firewallrule_binding(
                     context.session, map_info)
 
     def _get_firewall(self, context, edge_id):
         try:
             return self.vcns.get_firewall(edge_id)[1]
-        except vcns_exc.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("Failed to get firewall with edge "
-                                  "id: %s"), edge_id)
+        except vcns_exc.VcnsApiException as e:
+            LOG.exception(_LE("Failed to get firewall with edge "
+                              "id: %s"), edge_id)
+            raise e
 
     def _get_firewall_rule_next(self, context, edge_id, rule_vseid):
         # Return the firewall rule below 'rule_vseid'
@@ -202,7 +217,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
                     return fw_cfg['firewallRules']['firewallRules'][i + 1]
 
     def get_firewall_rule(self, context, id, edge_id):
-        rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
+        rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
             context.session, id, edge_id)
         if rule_map is None:
             msg = _("No rule id:%s found in the edge_firewall_binding") % id
@@ -213,12 +228,12 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
         try:
             response = self.vcns.get_firewall_rule(
                 edge_id, vcns_rule_id)[1]
-        except vcns_exc.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("Failed to get firewall rule: %(rule_id)s "
-                                  "with edge_id: %(edge_id)s"), {
-                                    'rule_id': id,
-                                    'edge_id': edge_id})
+        except vcns_exc.VcnsApiException as e:
+            LOG.exception(_LE("Failed to get firewall rule: %(rule_id)s "
+                              "with edge_id: %(edge_id)s"), {
+                                'rule_id': id,
+                                'edge_id': edge_id})
+            raise e
         return self._restore_firewall_rule(context, edge_id, response)
 
     def get_firewall(self, context, edge_id):
@@ -229,27 +244,27 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
         fw_req = self._convert_firewall(context, firewall)
         try:
             self.vcns.update_firewall(edge_id, fw_req)
-        except vcns_exc.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("Failed to update firewall "
-                                  "with edge_id: %s"), edge_id)
+        except vcns_exc.VcnsApiException as e:
+            LOG.exception(_LE("Failed to update firewall "
+                              "with edge_id: %s"), edge_id)
+            raise e
         fw_res = self._get_firewall(context, edge_id)
-        vcns_db.cleanup_vcns_edge_firewallrule_binding(
+        nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
             context.session, edge_id)
         self._create_rule_id_mapping(context, edge_id, firewall, fw_res)
 
     def delete_firewall(self, context, edge_id):
         try:
             self.vcns.delete_firewall(edge_id)
-        except vcns_exc.VcnsApiException:
-            with excutils.save_and_reraise_exception():
-                LOG.exception(_LE("Failed to delete firewall "
-                                  "with edge_id:%s"), edge_id)
-        vcns_db.cleanup_vcns_edge_firewallrule_binding(
+        except vcns_exc.VcnsApiException as e:
+            LOG.exception(_LE("Failed to delete firewall "
+                              "with edge_id:%s"), edge_id)
+            raise e
+        nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
             context.session, edge_id)
 
     def update_firewall_rule(self, context, id, edge_id, firewall_rule):
-        rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
+        rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
             context.session, id, edge_id)
         vcns_rule_id = rule_map.rule_vseid
         fwr_req = self._convert_firewall_rule(context, firewall_rule)
@@ -258,12 +273,13 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
                 LOG.exception(_LE("Failed to update firewall rule: "
-                                  "%(rule_id)s with edge_id: %(edge_id)s"),
+                                  "%(rule_id)s "
+                                  "with edge_id: %(edge_id)s"),
                               {'rule_id': id,
                                'edge_id': edge_id})
 
     def delete_firewall_rule(self, context, id, edge_id):
-        rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
+        rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
             context.session, id, edge_id)
         vcns_rule_id = rule_map.rule_vseid
         try:
@@ -271,14 +287,15 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
                 LOG.exception(_LE("Failed to delete firewall rule: "
-                                  "%(rule_id)s with edge_id: %(edge_id)s"),
+                                  "%(rule_id)s "
+                                  "with edge_id: %(edge_id)s"),
                               {'rule_id': id,
                                'edge_id': edge_id})
-        vcns_db.delete_vcns_edge_firewallrule_binding(
-            context.session, id, edge_id)
+        nsxv_db.delete_nsxv_edge_firewallrule_binding(
+            context.session, id)
 
     def _add_rule_above(self, context, ref_rule_id, edge_id, firewall_rule):
-        rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
+        rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
             context.session, ref_rule_id, edge_id)
         ref_vcns_rule_id = rule_map.rule_vseid
         fwr_req = self._convert_firewall_rule(context, firewall_rule)
@@ -298,11 +315,11 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             'rule_id': firewall_rule['id'],
             'rule_vseid': fwr_vseid,
             'edge_id': edge_id}
-        vcns_db.add_vcns_edge_firewallrule_binding(
+        nsxv_db.add_nsxv_edge_firewallrule_binding(
             context.session, map_info)
 
     def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule):
-        rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
+        rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
             context.session, ref_rule_id, edge_id)
         ref_vcns_rule_id = rule_map.rule_vseid
         fwr_vse_next = self._get_firewall_rule_next(
@@ -336,7 +353,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             'rule_vseid': fwr_vseid,
             'edge_id': edge_id
         }
-        vcns_db.add_vcns_edge_firewallrule_binding(
+        nsxv_db.add_nsxv_edge_firewallrule_binding(
             context.session, map_info)
 
     def insert_rule(self, context, rule_info, edge_id, fwr):
@@ -350,3 +367,34 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
             msg = _("Can't execute insert rule operation "
                     "without reference rule_id")
             raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg)
+
+    def _asyn_update_firewall(self, task):
+        edge_id = task.userdata['edge_id']
+        config = task.userdata['config']
+        context = task.userdata['jobdata']['context']
+        try:
+            self.vcns.update_firewall(edge_id, config)
+        except vcns_exc.VcnsApiException:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("Failed to update firewall "
+                                  "with edge_id: %s"), edge_id)
+        vcns_fw_config = self._get_firewall(context, edge_id)
+        task.userdata['vcns_fw_config'] = vcns_fw_config
+        return task_const.TaskStatus.COMPLETED
+
+    def asyn_update_firewall(self, router_id, edge_id, firewall,
+                             jobdata=None, allow_external=True):
+        # TODO(berlin): Remove uncessary context input parameter.
+        config = self._convert_firewall(None, firewall,
+                                        allow_external=allow_external)
+        userdata = {
+            'edge_id': edge_id,
+            'config': config,
+            'fw_config': firewall,
+            'jobdata': jobdata}
+        task_name = "update-firewall-%s" % edge_id
+        task = tasks.Task(task_name, router_id,
+                          self._asyn_update_firewall, userdata=userdata)
+        task.add_result_monitor(self.callbacks.firewall_update_result)
+        self.task_manager.add(task)
+        return task
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py
index 4b0993fdb4..59ba5c6ae2 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py
@@ -16,7 +16,7 @@ from oslo.utils import excutils
 
 from neutron.i18n import _LE, _LW
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.vshield.common import (
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
     exceptions as vcns_exc)
 
 LOG = logging.getLogger(__name__)
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py
index 86ab578b03..3d2cc03eca 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py
@@ -16,10 +16,10 @@ from oslo.utils import excutils
 
 from neutron.i18n import _LE
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.dbexts import vcns_db
-from neutron.plugins.vmware.vshield.common import (
+from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
     constants as vcns_const)
-from neutron.plugins.vmware.vshield.common import (
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
     exceptions as vcns_exc)
 try:
     from neutron_lbaas.services.loadbalancer import constants as lb_constants
@@ -54,7 +54,7 @@ class EdgeLbDriver():
 
     def _convert_lb_vip(self, context, edge_id, vip, app_profileid):
         pool_id = vip.get('pool_id')
-        poolid_map = vcns_db.get_vcns_edge_pool_binding(
+        poolid_map = nsxv_db.get_vcns_edge_pool_binding(
             context.session, pool_id, edge_id)
         pool_vseid = poolid_map['pool_vseid']
         return {
@@ -70,7 +70,7 @@ class EdgeLbDriver():
         }
 
     def _restore_lb_vip(self, context, edge_id, vip_vse):
-        pool_binding = vcns_db.get_vcns_edge_pool_binding_by_vseid(
+        pool_binding = nsxv_db.get_vcns_edge_pool_binding_by_vseid(
             context.session,
             edge_id,
             vip_vse['defaultPoolId'])
@@ -105,7 +105,7 @@ class EdgeLbDriver():
         monitors = pool.get('health_monitors')
         if not monitors:
             return vsepool
-        monitorid_map = vcns_db.get_vcns_edge_monitor_binding(
+        monitorid_map = nsxv_db.get_vcns_edge_monitor_binding(
             context.session,
             monitors[0],
             edge_id)
@@ -204,10 +204,10 @@ class EdgeLbDriver():
             "edge_id": edge_id,
             "app_profileid": app_profileid
         }
-        vcns_db.add_vcns_edge_vip_binding(context.session, map_info)
+        nsxv_db.add_nsxv_edge_vip_binding(context.session, map_info)
 
     def _get_vip_binding(self, session, id):
-        vip_binding = vcns_db.get_vcns_edge_vip_binding(session, id)
+        vip_binding = nsxv_db.get_nsxv_edge_vip_binding(session, id)
         if not vip_binding:
             msg = (_("vip_binding not found with id: %(id)s "
                      "edge_id: %(edge_id)s") % {
@@ -219,7 +219,7 @@ class EdgeLbDriver():
         return vip_binding
 
     def get_vip(self, context, id):
-        vip_binding = vcns_db.get_vcns_edge_vip_binding(context.session, id)
+        vip_binding = nsxv_db.get_nsxv_edge_vip_binding(context.session, id)
         edge_id = vip_binding[vcns_const.EDGE_ID]
         vip_vseid = vip_binding['vip_vseid']
         try:
@@ -276,7 +276,7 @@ class EdgeLbDriver():
                 LOG.exception(_LE("Failed to delete app profile on edge: %s"),
                               edge_id)
 
-        vcns_db.delete_vcns_edge_vip_binding(context.session, id)
+        nsxv_db.delete_nsxv_edge_vip_binding(context.session, id)
 
     def create_pool(self, context, edge_id, pool, members):
         pool_new = self._convert_lb_pool(context, edge_id, pool, members)
@@ -295,10 +295,10 @@ class EdgeLbDriver():
             "pool_vseid": pool_vseid,
             "edge_id": edge_id
         }
-        vcns_db.add_vcns_edge_pool_binding(context.session, map_info)
+        nsxv_db.add_vcns_edge_pool_binding(context.session, map_info)
 
     def get_pool(self, context, id, edge_id):
-        pool_binding = vcns_db.get_vcns_edge_pool_binding(
+        pool_binding = nsxv_db.get_vcns_edge_pool_binding(
             context.session, id, edge_id)
         if not pool_binding:
             msg = (_("pool_binding not found with id: %(id)s "
@@ -315,7 +315,7 @@ class EdgeLbDriver():
         return self._restore_lb_pool(context, edge_id, response)
 
     def update_pool(self, context, edge_id, pool, members):
-        pool_binding = vcns_db.get_vcns_edge_pool_binding(
+        pool_binding = nsxv_db.get_vcns_edge_pool_binding(
             context.session, pool['id'], edge_id)
         pool_vseid = pool_binding['pool_vseid']
         pool_new = self._convert_lb_pool(context, edge_id, pool, members)
@@ -326,7 +326,7 @@ class EdgeLbDriver():
                 LOG.exception(_LE("Failed to update pool"))
 
     def delete_pool(self, context, id, edge_id):
-        pool_binding = vcns_db.get_vcns_edge_pool_binding(
+        pool_binding = nsxv_db.get_vcns_edge_pool_binding(
             context.session, id, edge_id)
         pool_vseid = pool_binding['pool_vseid']
         try:
@@ -334,7 +334,7 @@ class EdgeLbDriver():
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
                 LOG.exception(_LE("Failed to delete pool"))
-        vcns_db.delete_vcns_edge_pool_binding(
+        nsxv_db.delete_vcns_edge_pool_binding(
             context.session, id, edge_id)
 
     def create_health_monitor(self, context, edge_id, health_monitor):
@@ -355,10 +355,10 @@ class EdgeLbDriver():
             "monitor_vseid": monitor_vseid,
             "edge_id": edge_id
         }
-        vcns_db.add_vcns_edge_monitor_binding(context.session, map_info)
+        nsxv_db.add_vcns_edge_monitor_binding(context.session, map_info)
 
     def get_health_monitor(self, context, id, edge_id):
-        monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
+        monitor_binding = nsxv_db.get_vcns_edge_monitor_binding(
             context.session, id, edge_id)
         if not monitor_binding:
             msg = (_("monitor_binding not found with id: %(id)s "
@@ -377,7 +377,7 @@ class EdgeLbDriver():
 
     def update_health_monitor(self, context, edge_id,
                               old_health_monitor, health_monitor):
-        monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
+        monitor_binding = nsxv_db.get_vcns_edge_monitor_binding(
             context.session,
             old_health_monitor['id'], edge_id)
         monitor_vseid = monitor_binding['monitor_vseid']
@@ -392,7 +392,7 @@ class EdgeLbDriver():
                               edge_id)
 
     def delete_health_monitor(self, context, id, edge_id):
-        monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
+        monitor_binding = nsxv_db.get_vcns_edge_monitor_binding(
             context.session, id, edge_id)
         monitor_vseid = monitor_binding['monitor_vseid']
         try:
@@ -400,5 +400,5 @@ class EdgeLbDriver():
         except vcns_exc.VcnsApiException:
             with excutils.save_and_reraise_exception():
                 LOG.exception(_LE("Failed to delete monitor"))
-        vcns_db.delete_vcns_edge_monitor_binding(
+        nsxv_db.delete_vcns_edge_monitor_binding(
             context.session, id, edge_id)
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/edge_utils.py b/vmware_nsx/neutron/plugins/vmware/vshield/edge_utils.py
new file mode 100644
index 0000000000..923dbc4ced
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/edge_utils.py
@@ -0,0 +1,1349 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo.config import cfg
+from oslo.utils import excutils
+from sqlalchemy.orm import exc as sa_exc
+
+from neutron.common import exceptions as n_exc
+from neutron import context as q_context
+from neutron.extensions import l3
+from neutron.i18n import _LE, _LW
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import uuidutils
+from neutron.plugins.common import constants as plugin_const
+from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
+from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
+from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
+    constants as vcns_const)
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import (
+    constants as task_const)
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks
+
+
+LOG = logging.getLogger(__name__)
+
+_uuid = uuidutils.generate_uuid
+
+
+def parse_backup_edge_pool_opt():
+    """Parse edge pool opts and returns result."""
+    edge_pool_opts = cfg.CONF.nsxv.backup_edge_pool
+    res = []
+    for edge_pool_def in edge_pool_opts:
+        split = edge_pool_def.split(':')
+        try:
+            (edge_type, edge_size, minimum_pooled_edges,
+             maximum_pooled_edges) = split[:4]
+        except ValueError:
+            raise n_exc.Invalid(_("Invalid edge pool format"))
+        if edge_type not in vcns_const.ALLOWED_EDGE_TYPES:
+            msg = (_("edge type '%(edge_type)s' is not allowed, "
+                     "allowed types: %(allowed)s") %
+                   {'edge_type': edge_type,
+                    'allowed': vcns_const.ALLOWED_EDGE_TYPES})
+            LOG.error(msg)
+            raise n_exc.Invalid(msg)
+        edge_size = edge_size or nsxv_constants.LARGE
+        if edge_size not in vcns_const.ALLOWED_EDGE_SIZES:
+            msg = (_("edge size '%(edge_size)s' is not allowed, "
+                     "allowed types: %(allowed)s") %
+                   {'edge_type': edge_size,
+                    'allowed': vcns_const.ALLOWED_EDGE_SIZES})
+            LOG.error(msg)
+            raise n_exc.Invalid(msg)
+        res.append({'edge_type': edge_type,
+                    'edge_size': edge_size,
+                    'minimum_pooled_edges': int(minimum_pooled_edges),
+                    'maximum_pooled_edges': int(maximum_pooled_edges)})
+
+    edge_pool_dicts = {}
+    for edge_type in vcns_const.ALLOWED_EDGE_TYPES:
+        edge_pool_dicts[edge_type] = {}
+    for r in res:
+        edge_pool_dict = edge_pool_dicts[r['edge_type']]
+        if r['edge_size'] in edge_pool_dict.keys():
+            raise n_exc.Invalid(_("Duplicate edge pool configuration"))
+        else:
+            edge_pool_dict[r['edge_size']] = {
+                'minimum_pooled_edges': r['minimum_pooled_edges'],
+                'maximum_pooled_edges': r['maximum_pooled_edges']}
+    return edge_pool_dicts
+
+
+class EdgeManager(object):
+    """Edge Appliance Management.
+    EdgeManager provides a pool of edge appliances which we can use
+    to support DHCP&metadata, L3&FIP and LB&FW&VPN services.
+    """
+
+    def __init__(self, nsxv_manager):
+        LOG.debug("Start Edge Manager initialization")
+        self.nsxv_manager = nsxv_manager
+        self.dvs_id = cfg.CONF.nsxv.dvs_id
+        self.edge_pool_dicts = parse_backup_edge_pool_opt()
+        self.nsxv_plugin = nsxv_manager.callbacks.plugin
+        self._check_backup_edge_pools()
+
+    def _deploy_edge(self, context, lrouter,
+                     lswitch=None, appliance_size=nsxv_constants.LARGE,
+                     edge_type=nsxv_constants.SERVICE_EDGE):
+        """Create an edge for logical router support."""
+        router_id = lrouter['id']
+        # deploy edge
+        jobdata = {
+            'router_id': router_id,
+            'lrouter': lrouter,
+            'lswitch': lswitch,
+            'context': context
+        }
+
+        task = self.nsxv_manager.deploy_edge(
+            lrouter['id'], lrouter['name'], internal_network=None,
+            jobdata=jobdata, wait_for_exec=True,
+            appliance_size=appliance_size,
+            dist=(edge_type == nsxv_constants.VDR_EDGE))
+        return task
+
+    def _deploy_backup_edges(self, context, num,
+                             appliance_size=nsxv_constants.LARGE,
+                             edge_type=nsxv_constants.SERVICE_EDGE):
+        """Asynchronously deploy edges to populate edge pool."""
+        router_ids = [(vcns_const.BACKUP_ROUTER_PREFIX +
+                       _uuid())[:vcns_const.EDGE_NAME_LEN]
+                      for i in xrange(num)]
+        with context.session.begin(subtransactions=True):
+            for router_id in router_ids:
+                nsxv_db.add_nsxv_router_binding(
+                    context.session, router_id, None, None,
+                    plugin_const.PENDING_CREATE,
+                    appliance_size=appliance_size, edge_type=edge_type)
+        for router_id in router_ids:
+            fake_router = {
+                'id': router_id,
+                'name': router_id}
+            self._deploy_edge(context, fake_router,
+                              appliance_size=appliance_size,
+                              edge_type=edge_type)
+
+    def _delete_edge(self, context, router_binding):
+        if router_binding['status'] == plugin_const.ERROR:
+            LOG.warning(_LW("Start deleting %(router_id)s  corresponding"
+                            "edge: %(edge_id)s due to status error"),
+                        {'router_id': router_binding['router_id'],
+                         'edge_id': router_binding['edge_id']})
+        jobdata = {'context': context}
+        nsxv_db.update_nsxv_router_binding(
+            context.session, router_binding['router_id'],
+            status=plugin_const.PENDING_DELETE)
+        self.nsxv_manager.delete_edge(
+            router_binding['router_id'], router_binding['edge_id'],
+            jobdata=jobdata,
+            dist=(router_binding['edge_type'] == nsxv_constants.VDR_EDGE))
+
+    def _delete_backup_edges(self, context, backup_router_bindings, num):
+        with context.session.begin(subtransactions=True):
+            for binding in backup_router_bindings[:num]:
+                nsxv_db.update_nsxv_router_binding(
+                    context.session, binding['router_id'],
+                    status=plugin_const.PENDING_DELETE)
+        for binding in backup_router_bindings:
+            # delete edge
+            LOG.debug("Start deleting extra edge: %s in pool",
+                      binding['edge_id'])
+            jobdata = {
+                'context': context
+            }
+            self.nsxv_manager.delete_edge(
+                binding['router_id'], binding['edge_id'], jobdata=jobdata,
+                dist=(binding['edge_type'] == nsxv_constants.VDR_EDGE))
+
+    def _clean_all_error_edge_bindings(self, context):
+        router_bindings = nsxv_db.get_nsxv_router_bindings(context.session)
+        for binding in router_bindings:
+            if (binding['router_id'].startswith(
+                    vcns_const.BACKUP_ROUTER_PREFIX) and
+                binding['status'] == plugin_const.ERROR):
+                self._delete_edge(context, binding)
+
+    def _get_backup_edge_bindings(self, context,
+                                  appliance_size=nsxv_constants.LARGE,
+                                  edge_type=nsxv_constants.SERVICE_EDGE):
+        router_bindings = nsxv_db.get_nsxv_router_bindings(context.session)
+        return [router_binding for router_binding in router_bindings
+                if router_binding[
+                    'router_id'].startswith(vcns_const.BACKUP_ROUTER_PREFIX)
+                and router_binding['appliance_size'] == appliance_size
+                and router_binding['edge_type'] == edge_type
+                and router_binding['status'] != plugin_const.PENDING_DELETE
+                and router_binding['status'] != plugin_const.ERROR]
+
+    def _check_backup_edge_pools(self):
+        admin_ctx = q_context.get_admin_context()
+        self._clean_all_error_edge_bindings(admin_ctx)
+        for edge_type, v in self.edge_pool_dicts.items():
+            for edge_size in vcns_const.ALLOWED_EDGE_SIZES:
+                if edge_size in v.keys():
+                    edge_pool_range = v[edge_size]
+                    self._check_backup_edge_pool(
+                        edge_pool_range['minimum_pooled_edges'],
+                        edge_pool_range['maximum_pooled_edges'],
+                        appliance_size=edge_size, edge_type=edge_type)
+                else:
+                    self._check_backup_edge_pool(
+                        0, 0,
+                        appliance_size=edge_size, edge_type=edge_type)
+
+    def _check_backup_edge_pool(self,
+                                minimum_pooled_edges,
+                                maximum_pooled_edges,
+                                appliance_size=nsxv_constants.LARGE,
+                                edge_type=nsxv_constants.SERVICE_EDGE):
+        """Check edge pool's status and return one available edge for use."""
+        admin_ctx = q_context.get_admin_context()
+        backup_router_bindings = self._get_backup_edge_bindings(
+            admin_ctx, appliance_size=appliance_size, edge_type=edge_type)
+        backup_num = len(backup_router_bindings)
+        if backup_num > maximum_pooled_edges:
+            self._delete_backup_edges(admin_ctx, backup_router_bindings,
+                                      backup_num - maximum_pooled_edges)
+        elif backup_num < minimum_pooled_edges:
+            self._deploy_backup_edges(admin_ctx,
+                                      minimum_pooled_edges - backup_num,
+                                      appliance_size=appliance_size,
+                                      edge_type=edge_type)
+
+    def check_edge_exist_at_backend(self, edge_id):
+        try:
+            status = self.nsxv_manager.get_edge_status(edge_id)
+            return (status == vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE)
+        except Exception:
+            return False
+
+    def _get_available_router_binding(self, context,
+                                      appliance_size=nsxv_constants.LARGE,
+                                      edge_type=nsxv_constants.SERVICE_EDGE):
+        backup_router_bindings = self._get_backup_edge_bindings(
+            context, appliance_size=appliance_size, edge_type=edge_type)
+        for router_binding in backup_router_bindings:
+            if (router_binding['status'] == plugin_const.ACTIVE):
+                if not self.check_edge_exist_at_backend(
+                    router_binding['edge_id']):
+                    self._delete_edge(context, router_binding)
+                else:
+                    return router_binding
+
+    def _get_physical_provider_network(self, context, network_id):
+        phy_net = nsxv_db.get_network_bindings(context.session, network_id)
+        return (phy_net[0]['phy_uuid'] if (
+            phy_net and phy_net[0]['phy_uuid'] != '') else self.dvs_id)
+
+    def _create_sub_interface(self, context, network_id, network_name,
+                              tunnel_index, address_groups,
+                              port_group_id=None):
+        # Get the physical port group /wire id of the network id
+        mappings = nsx_db.get_nsx_switch_ids(context.session, network_id)
+        if mappings:
+            vcns_network_id = mappings[0]
+        if port_group_id is None:
+            portgroup = {'vlanId': 0,
+                         'networkName': network_name,
+                         'networkBindingType': 'Static',
+                         'networkType': 'Isolation'}
+            config_spec = {'networkSpec': portgroup}
+            dvs_id = self._get_physical_provider_network(context, network_id)
+            _, port_group_id = self.nsxv_manager.vcns.create_port_group(
+                dvs_id, config_spec)
+
+        interface = {
+            'name': _uuid(),
+            'tunnelId': tunnel_index,
+            'logicalSwitchId': vcns_network_id,
+            'isConnected': True
+        }
+        interface['addressGroups'] = {'addressGroups': address_groups}
+        return port_group_id, interface
+
+    def _getvnic_config(self, edge_id, vnic_index):
+        _, vnic_config = self.nsxv_manager.get_interface(edge_id,
+                                                         vnic_index)
+        return vnic_config
+
+    def _delete_dhcp_internal_interface(self, context, edge_id, vnic_index,
+                                        tunnel_index, network_id):
+        """Delete the dhcp internal interface."""
+
+        LOG.debug("Query the vnic %s for DHCP Edge %s", vnic_index, edge_id)
+        resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36]
+        vnic_config = self._getvnic_config(edge_id, vnic_index)
+        sub_interfaces = vnic_config['subInterfaces']['subInterfaces']
+        port_group_id = vnic_config['portgroupId']
+        for sub_interface in sub_interfaces:
+            if tunnel_index == sub_interface['tunnelId']:
+                LOG.debug("Delete the tunnel %d on vnic %d",
+                          tunnel_index, vnic_index)
+                (vnic_config['subInterfaces']['subInterfaces'].
+                 remove(sub_interface))
+                break
+
+        # Clean the vnic if there is no sub-interface attached
+        if len(sub_interfaces) == 0:
+            header, _ = self.nsxv_manager.vcns.delete_interface(edge_id,
+                                                                vnic_index)
+            objuri = header['location']
+            job_id = objuri[objuri.rfind("/") + 1:]
+            dvs_id = self._get_physical_provider_network(context, network_id)
+            self.nsxv_manager.delete_portgroup(
+                dvs_id, port_group_id, job_id)
+        else:
+            self.nsxv_manager.vcns.update_interface(edge_id, vnic_config)
+
+        # Delete the router binding or clean the edge appliance
+        bindings = nsxv_db.get_nsxv_router_bindings(context.session)
+        all_dhcp_edges = {binding['router_id']: binding['edge_id'] for
+                          binding in bindings if binding['router_id'].
+                          startswith(vcns_const.DHCP_EDGE_PREFIX)}
+        for router_id in all_dhcp_edges:
+            if (router_id != resource_id and
+                all_dhcp_edges[router_id] == edge_id):
+                nsxv_db.delete_nsxv_router_binding(context.session,
+                                                   resource_id)
+                return
+        self._free_dhcp_edge_appliance(context, network_id)
+
+    def _update_dhcp_internal_interface(self, context, edge_id, vnic_index,
+                                        tunnel_index, network_id,
+                                        address_groups):
+        """Update the dhcp internal interface:
+           1. Add a new vnic tunnel with the address groups
+           2. Update the address groups to an existing tunnel
+        """
+        LOG.debug("Query the vnic %s for DHCP Edge %s", vnic_index, edge_id)
+        _, vnic_config = self.nsxv_manager.get_interface(edge_id, vnic_index)
+        sub_iface_dict = vnic_config.get('subInterfaces')
+        port_group_id = vnic_config.get('portgroupId')
+        new_tunnel_creation = True
+        iface_list = []
+
+        # Update the sub interface address groups for specific tunnel
+        if sub_iface_dict:
+            sub_interfaces = sub_iface_dict.get('subInterfaces')
+            for sb in sub_interfaces:
+                if tunnel_index == sb['tunnelId']:
+                    new_tunnel_creation = False
+                    sb['addressGroups']['addressGroups'] = address_groups
+                    break
+            iface_list = sub_interfaces
+
+        # The first DHCP service creation, not update
+        if new_tunnel_creation:
+            network_name_item = [edge_id, str(vnic_index), str(tunnel_index)]
+            network_name = ('-'.join(network_name_item) + _uuid())[:36]
+            port_group_id, iface = self._create_sub_interface(
+                context, network_id, network_name, tunnel_index,
+                address_groups, port_group_id)
+
+            iface_list.append(iface)
+
+        LOG.debug("Update the vnic %d for DHCP Edge %s", vnic_index, edge_id)
+        self.nsxv_manager.update_interface('fake_router_id', edge_id,
+                                           vnic_index, port_group_id,
+                                           tunnel_index,
+                                           address_groups=iface_list)
+
+    def _allocate_edge_appliance(self, context, resource_id, name,
+                                 appliance_size=nsxv_constants.LARGE,
+                                 dist=False):
+        """Try to allocate one avaliable edge from pool."""
+
+        edge_type = (nsxv_constants.VDR_EDGE if dist else
+                     nsxv_constants.SERVICE_EDGE)
+        lrouter = {'id': resource_id,
+                   'name': name}
+        edge_pool_range = self.edge_pool_dicts[edge_type].get(appliance_size)
+        if edge_pool_range is None:
+            nsxv_db.add_nsxv_router_binding(
+                context.session, resource_id, None, None,
+                plugin_const.PENDING_CREATE,
+                appliance_size=appliance_size,
+                edge_type=edge_type)
+            task = self._deploy_edge(context, lrouter,
+                                     appliance_size=appliance_size,
+                                     edge_type=edge_type)
+            task.wait(task_const.TaskState.RESULT)
+            return
+
+        self._clean_all_error_edge_bindings(context)
+        available_router_binding = self._get_available_router_binding(
+            context, appliance_size=appliance_size, edge_type=edge_type)
+        # Synchronously deploy an edge if no avaliable edge in pool.
+        if not available_router_binding:
+            # store router-edge mapping binding
+            nsxv_db.add_nsxv_router_binding(
+                context.session, resource_id, None, None,
+                plugin_const.PENDING_CREATE,
+                appliance_size=appliance_size,
+                edge_type=edge_type)
+            task = self._deploy_edge(context, lrouter,
+                                     appliance_size=appliance_size,
+                                     edge_type=edge_type)
+            task.wait(task_const.TaskState.RESULT)
+        else:
+            LOG.debug("Select edge: %(edge_id)s from pool for %(name)s",
+                      {'edge_id': available_router_binding['edge_id'],
+                       'name': name})
+            # select the first avaliable edge in pool.
+            nsxv_db.delete_nsxv_router_binding(
+                context.session, available_router_binding['router_id'])
+            nsxv_db.add_nsxv_router_binding(
+                context.session,
+                lrouter['id'],
+                available_router_binding['edge_id'],
+                None,
+                plugin_const.PENDING_CREATE,
+                appliance_size=appliance_size,
+                edge_type=edge_type)
+            fake_jobdata = {
+                'context': context,
+                'router_id': lrouter['id']}
+            fake_userdata = {'jobdata': fake_jobdata,
+                             'router_name': lrouter['name'],
+                             'edge_id': available_router_binding['edge_id'],
+                             'dist': dist}
+            fake_task = tasks.Task(name='fake-deploy-edge-task',
+                                   resource_id='fake-resource_id',
+                                   execute_callback=None,
+                                   userdata=fake_userdata)
+            fake_task.status = task_const.TaskStatus.COMPLETED
+            self.nsxv_manager.callbacks.edge_deploy_result(fake_task)
+            # change edge's name at backend
+            task = self.nsxv_manager.update_edge(
+                resource_id, available_router_binding['edge_id'],
+                name, None, appliance_size=appliance_size, dist=dist)
+            task.wait(task_const.TaskState.RESULT)
+        backup_num = len(self._get_backup_edge_bindings(
+            context, appliance_size=appliance_size, edge_type=edge_type))
+        self._deploy_backup_edges(
+            context, edge_pool_range['minimum_pooled_edges'] - backup_num,
+            appliance_size=appliance_size, edge_type=edge_type)
+
+    def _free_edge_appliance(self, context, router_id):
+        """Try to collect one edge to pool."""
+        binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+        if not binding:
+            LOG.warning(_LW("router binding for router: %s "
+                            "not found"), router_id)
+            return
+        dist = (binding['edge_type'] == nsxv_constants.VDR_EDGE)
+        edge_pool_range = self.edge_pool_dicts[binding['edge_type']].get(
+            binding['appliance_size'])
+        if edge_pool_range is None:
+            nsxv_db.update_nsxv_router_binding(
+                context.session, router_id,
+                status=plugin_const.PENDING_DELETE)
+            # delete edge
+            jobdata = {
+                'context': context,
+                'router_id': router_id
+            }
+            self.nsxv_manager.delete_edge(
+                router_id, binding['edge_id'], jobdata=jobdata, dist=dist)
+            return
+
+        self._clean_all_error_edge_bindings(context)
+        backup_router_bindings = self._get_backup_edge_bindings(
+            context, appliance_size=binding['appliance_size'],
+            edge_type=binding['edge_type'])
+        backup_num = len(backup_router_bindings)
+        # collect the edge to pool if pool not full
+        if backup_num < edge_pool_range['maximum_pooled_edges']:
+            LOG.debug("Collect edge: %s to pool", binding['edge_id'])
+            nsxv_db.delete_nsxv_router_binding(
+                context.session, router_id)
+            backup_router_id = (vcns_const.BACKUP_ROUTER_PREFIX +
+                                _uuid())[:vcns_const.EDGE_NAME_LEN]
+            nsxv_db.add_nsxv_router_binding(
+                context.session,
+                backup_router_id,
+                binding['edge_id'],
+                None,
+                plugin_const.ACTIVE,
+                appliance_size=binding['appliance_size'],
+                edge_type=binding['edge_type'])
+            # change edge's name at backend
+            task = self.nsxv_manager.update_edge(
+                router_id, binding['edge_id'], backup_router_id, None,
+                appliance_size=binding['appliance_size'], dist=dist)
+            task.wait(task_const.TaskState.RESULT)
+        else:
+            nsxv_db.update_nsxv_router_binding(
+                context.session, router_id,
+                status=plugin_const.PENDING_DELETE)
+            # delete edge
+            jobdata = {
+                'context': context,
+                'router_id': router_id
+            }
+            self.nsxv_manager.delete_edge(
+                router_id, binding['edge_id'], jobdata=jobdata, dist=dist)
+
+    def _allocate_dhcp_edge_appliance(self, context, resource_id):
+        resource_name = resource_id
+        self._allocate_edge_appliance(
+            context, resource_id, resource_name,
+            appliance_size=vcns_const.SERVICE_SIZE_MAPPING['dhcp'])
+
+    def _free_dhcp_edge_appliance(self, context, network_id):
+        router_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36]
+        self._free_edge_appliance(context, router_id)
+
+    def create_lrouter(self, context, lrouter, lswitch=None, dist=False):
+        """Create an edge for logical router support."""
+        router_name = lrouter['name'] + '-' + lrouter['id']
+        self._allocate_edge_appliance(
+            context, lrouter['id'], router_name,
+            appliance_size=vcns_const.SERVICE_SIZE_MAPPING['router'],
+            dist=dist)
+
+    def delete_lrouter(self, context, router_id, dist=False):
+        self._free_edge_appliance(context, router_id)
+
+    def update_dhcp_service_config(self, context, edge_id):
+        """Reconfigure the DHCP to the edge."""
+        # Get all networks attached to the edge
+        edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge(
+            context.session, edge_id)
+        dhcp_networks = [edge_vnic_binding.network_id
+                         for edge_vnic_binding in edge_vnic_bindings]
+        ports = self.nsxv_plugin.get_ports(
+            context, filters={'network_id': dhcp_networks})
+        inst_ports = [port
+                      for port in ports
+                      if port['device_owner'].startswith("compute")]
+        static_bindings = []
+        for port in inst_ports:
+            static_bindings.extend(
+                self.nsxv_plugin._create_static_binding(context, port))
+        dhcp_request = {
+            'featureType': "dhcp_4.0",
+            'enabled': True,
+            'staticBindings': {'staticBindings': static_bindings}}
+        self.nsxv_manager.vcns.reconfigure_dhcp_service(edge_id, dhcp_request)
+        bindings_get = get_dhcp_binding_mappings(self.nsxv_manager, edge_id)
+        # Refresh edge_dhcp_static_bindings attached to edge
+        nsxv_db.clean_edge_dhcp_static_bindings_by_edge(
+            context.session, edge_id)
+        for mac_address, binding_id in bindings_get.items():
+            nsxv_db.create_edge_dhcp_static_binding(context.session, edge_id,
+                                                    mac_address, binding_id)
+
+    def create_dhcp_edge_service(self, context, network_id,
+                                 conflict_networks=[]):
+        """
+        Create an edge if there is no available edge for dhcp service,
+        Update an edge if there is available edge for dhcp service
+
+        If new edge was allocated, return resource_id, else return None
+        """
+        # Query all conflict edges and available edges first
+        conflict_edge_ids = []
+        available_edge_ids = []
+        router_bindings = nsxv_db.get_nsxv_router_bindings(context.session)
+        all_dhcp_edges = {binding['router_id']: binding['edge_id'] for
+                          binding in router_bindings if (binding['router_id'].
+                          startswith(vcns_const.DHCP_EDGE_PREFIX) and
+                          binding['status'] == plugin_const.ACTIVE)}
+        if all_dhcp_edges:
+            for net_id in conflict_networks:
+                router_id = (vcns_const.DHCP_EDGE_PREFIX + net_id)[:36]
+                edge_id = all_dhcp_edges.get(router_id)
+                if (edge_id and edge_id not in conflict_edge_ids):
+                    conflict_edge_ids.append(edge_id)
+
+            for x in all_dhcp_edges.values():
+                if (x not in conflict_edge_ids and
+                    x not in available_edge_ids):
+                    available_edge_ids.append(x)
+
+        LOG.debug('The available edges %s, the conflict edges %s',
+                  available_edge_ids, conflict_edge_ids)
+        # Check if the network has one related dhcp edge
+        resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36]
+        dhcp_edge_binding = nsxv_db.get_nsxv_router_binding(context.session,
+                                                            resource_id)
+
+        # case 1: update a subnet to an existing dhcp edge
+        if dhcp_edge_binding:
+            edge_id = dhcp_edge_binding['edge_id']
+            # Delete the existing vnic interface if there is overlap subnet
+            if edge_id in conflict_edge_ids:
+                old_binding = nsxv_db.get_edge_vnic_binding(
+                    context.session, edge_id, network_id)
+                old_vnic_index = old_binding['vnic_index']
+                old_tunnel_index = old_binding['tunnel_index']
+                # Cut off the port group/virtual wire connection
+                nsxv_db.free_edge_vnic_by_network(context.session, edge_id,
+                                                  network_id)
+                # update dhcp service config on edge_id
+                self.update_dhcp_service_config(context, edge_id)
+
+                try:
+                    self._delete_dhcp_internal_interface(context, edge_id,
+                                                         old_vnic_index,
+                                                         old_tunnel_index,
+                                                         network_id)
+                except Exception:
+                    with excutils.save_and_reraise_exception():
+                        LOG.exception(_LE('Failed to delete vnic '
+                                          '%(vnic_index)d tunnel '
+                                          '%(tunnel_index)d on edge '
+                                          '%(edge_id)s'),
+                                      {'vnic_index': old_vnic_index,
+                                       'tunnel_index': old_tunnel_index,
+                                       'edge_id': edge_id})
+                #Move the network to anther Edge and update vnic:
+                #1. Find an available existing edge or create a new one
+                #2. For the existing one, cut off the old port group connection
+                #3. Create the new port group connection to the existing one
+                #4. Update the address groups to the vnic
+                if available_edge_ids:
+                    new_id = available_edge_ids.pop()
+                    nsxv_db.add_nsxv_router_binding(
+                        context.session, resource_id,
+                        new_id, None, plugin_const.ACTIVE,
+                        appliance_size=vcns_const.SERVICE_SIZE_MAPPING['dhcp'])
+                    nsxv_db.allocate_edge_vnic_with_tunnel_index(
+                        context.session, new_id, network_id)
+                else:
+                    self._allocate_dhcp_edge_appliance(context, resource_id)
+                    new_edge = nsxv_db.get_nsxv_router_binding(
+                        context.session, resource_id)
+                    nsxv_db.allocate_edge_vnic_with_tunnel_index(
+                        context.session, new_edge['edge_id'], network_id)
+
+                    # If a new Edge was allocated, return resource_id
+                    return resource_id
+
+        # case 2: attach the subnet to a new edge and update vnic
+        else:
+            # There is available one
+            if available_edge_ids:
+                new_id = available_edge_ids.pop()
+                nsxv_db.add_nsxv_router_binding(
+                    context.session, resource_id,
+                    new_id, None, plugin_const.ACTIVE,
+                    appliance_size=vcns_const.SERVICE_SIZE_MAPPING['dhcp'])
+                nsxv_db.allocate_edge_vnic_with_tunnel_index(
+                    context.session, new_id, network_id)
+            else:
+                self._allocate_dhcp_edge_appliance(context, resource_id)
+                new_edge = nsxv_db.get_nsxv_router_binding(context.session,
+                                                           resource_id)
+                nsxv_db.allocate_edge_vnic_with_tunnel_index(
+                    context.session, new_edge['edge_id'], network_id)
+
+                # If a new Edge was allocated, return resource_id
+                return resource_id
+
+    def update_dhcp_edge_service(self, context, network_id,
+                                 address_groups=None):
+        """Update the subnet to the dhcp edge vnic."""
+        if address_groups is None:
+            address_groups = []
+
+        resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36]
+        edge_binding = nsxv_db.get_nsxv_router_binding(context.session,
+                                                       resource_id)
+        dhcp_binding = nsxv_db.get_edge_vnic_binding(context.session,
+                                                     edge_binding['edge_id'],
+                                                     network_id)
+        if dhcp_binding:
+            edge_id = dhcp_binding['edge_id']
+            vnic_index = dhcp_binding['vnic_index']
+            tunnel_index = dhcp_binding['tunnel_index']
+            LOG.debug('Update the dhcp service for %s on vnic %d tunnel %d',
+                      edge_id, vnic_index, tunnel_index)
+            try:
+                self._update_dhcp_internal_interface(
+                    context, edge_id, vnic_index, tunnel_index, network_id,
+                    address_groups)
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    LOG.exception(_LE('Failed to update the dhcp service for '
+                                      '%(edge_id)s  on vnic %(vnic_index)d '
+                                      'tunnel %(tunnel_index)d'),
+                                  {'edge_id': edge_id,
+                                   'vnic_index': vnic_index,
+                                   'tunnel_index': tunnel_index})
+            ports = self.nsxv_plugin.get_ports(
+                context, filters={'network_id': [network_id]})
+            inst_ports = [port
+                          for port in ports
+                          if port['device_owner'].startswith("compute")]
+            if inst_ports:
+                # update dhcp service config for the new added network
+                self.update_dhcp_service_config(context, edge_id)
+
+    def delete_dhcp_edge_service(self, context, network_id):
+        """Delete an edge for dhcp service."""
+        resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36]
+        edge_binding = nsxv_db.get_nsxv_router_binding(context.session,
+                                                       resource_id)
+        if edge_binding:
+            dhcp_binding = nsxv_db.get_edge_vnic_binding(
+                context.session, edge_binding['edge_id'], network_id)
+            if dhcp_binding:
+                edge_id = dhcp_binding['edge_id']
+                vnic_index = dhcp_binding['vnic_index']
+                tunnel_index = dhcp_binding['tunnel_index']
+
+                LOG.debug("Delete the tunnel %d on vnic %d from DHCP Edge %s",
+                          tunnel_index, vnic_index, edge_id)
+                nsxv_db.free_edge_vnic_by_network(context.session,
+                                                  edge_id,
+                                                  network_id)
+                try:
+                    self._delete_dhcp_internal_interface(context, edge_id,
+                                                         vnic_index,
+                                                         tunnel_index,
+                                                         network_id)
+                except Exception:
+                    with excutils.save_and_reraise_exception():
+                        LOG.exception(_LE('Failed to delete the tunnel '
+                                          '%(tunnel_index)d on vnic '
+                                          '%(vnic_index)d'
+                                          'from DHCP Edge %(edge_id)s'),
+                                      {'tunnel_index': tunnel_index,
+                                       'vnic_index': vnic_index,
+                                       'edge_id': edge_id})
+
+    def get_plr_by_tlr_id(self, context, router_id):
+        lswitch_id = nsxv_db.get_nsxv_router_binding(
+            context.session, router_id).lswitch_id
+        if lswitch_id:
+            edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_int_lswitch(
+                context.session, lswitch_id)
+            if edge_vnic_bindings:
+                for edge_vnic_binding in edge_vnic_bindings:
+                    plr_router_id = nsxv_db.get_nsxv_router_binding_by_edge(
+                        context.session, edge_vnic_binding.edge_id).router_id
+                    if plr_router_id != router_id:
+                        return plr_router_id
+
+    def create_plr_with_tlr_id(self, context, router_id, router_name):
+        # Add an internal network preparing for connecting the VDR
+        # to a PLR
+        tlr_edge_id = nsxv_db.get_nsxv_router_binding(
+            context.session, router_id).edge_id
+        # First create an internal lswitch
+        lswitch_name = ('int-' + router_name + router_id)[:36]
+        virtual_wire = {"name": lswitch_name,
+                        "tenantId": "virtual wire tenant"}
+        config_spec = {"virtualWireCreateSpec": virtual_wire}
+        vdn_scope_id = cfg.CONF.nsxv.vdn_scope_id
+        h, lswitch_id = self.nsxv_manager.vcns.create_virtual_wire(
+            vdn_scope_id, config_spec)
+
+        # add vdr's external interface to the lswitch
+        tlr_vnic_index = self.nsxv_manager.add_vdr_internal_interface(
+            tlr_edge_id, lswitch_id,
+            address=vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0],
+            netmask=vcns_const.INTEGRATION_SUBNET_NETMASK,
+            type="uplink")
+        nsxv_db.create_edge_vnic_binding(
+            context.session, tlr_edge_id, tlr_vnic_index, lswitch_id)
+        # store the lswitch_id into vcns_router_binding
+        nsxv_db.update_nsxv_router_binding(
+            context.session, router_id,
+            lswitch_id=lswitch_id)
+        # TODO(berlin): update vdr's default gateway flexibly.
+        task = self.nsxv_manager.update_routes(
+            router_id, tlr_edge_id,
+            vcns_const.INTEGRATION_EDGE_IPADDRESS, [],
+            gateway_vnic_index=tlr_vnic_index)
+        task.wait(task_const.TaskState.RESULT)
+
+        # Handle plr relative op
+        plr_router = {'name': router_name,
+                      'id': (vcns_const.PLR_EDGE_PREFIX + _uuid())[:36]}
+        self.create_lrouter(context, plr_router)
+        binding = nsxv_db.get_nsxv_router_binding(
+            context.session, plr_router['id'])
+        plr_edge_id = binding['edge_id']
+        plr_vnic_index = nsxv_db.allocate_edge_vnic(
+            context.session, plr_edge_id, lswitch_id).vnic_index
+        #TODO(berlin): the internal ip should change based on vnic_index
+        self.nsxv_manager.update_interface(
+            plr_router['id'], plr_edge_id, plr_vnic_index, lswitch_id,
+            address=vcns_const.INTEGRATION_EDGE_IPADDRESS,
+            netmask=vcns_const.INTEGRATION_SUBNET_NETMASK)
+        return plr_router['id']
+
+    def delete_plr_by_tlr_id(self, context, plr_id, router_id):
+        # Delete plr's internal interface which connects to internal switch
+        tlr_binding = nsxv_db.get_nsxv_router_binding(
+            context.session, router_id)
+        lswitch_id = tlr_binding.lswitch_id
+        tlr_edge_id = tlr_binding.edge_id
+        plr_edge_id = nsxv_db.get_nsxv_router_binding(
+            context.session, plr_id).edge_id
+        plr_vnic_index = nsxv_db.get_edge_vnic_binding(
+            context.session, plr_edge_id, lswitch_id).vnic_index
+        # Clear static routes before delete internal vnic
+        task = self.nsxv_manager.update_routes(
+            plr_id, plr_edge_id, None, [])
+        task.wait(task_const.TaskState.RESULT)
+        # Delete internal vnic
+        task = self.nsxv_manager.delete_interface(
+            plr_id, plr_edge_id, plr_vnic_index)
+        task.wait(task_const.TaskState.RESULT)
+        nsxv_db.free_edge_vnic_by_network(
+            context.session, plr_edge_id, lswitch_id)
+        # Delete the PLR
+        self.delete_lrouter(context, plr_id)
+
+        # Clear static routes of vdr
+        task = self.nsxv_manager.update_routes(
+            router_id, tlr_edge_id, None, [])
+        task.wait(task_const.TaskState.RESULT)
+        #First delete the vdr's external interface
+        tlr_vnic_index = nsxv_db.get_edge_vnic_binding(
+            context.session, tlr_edge_id, lswitch_id).vnic_index
+        self.nsxv_manager.delete_vdr_internal_interface(
+            tlr_edge_id, tlr_vnic_index)
+        nsxv_db.delete_edge_vnic_binding_by_network(
+            context.session, tlr_edge_id, lswitch_id)
+        try:
+            # Then delete the internal lswitch
+            self.nsxv_manager.vcns.delete_virtual_wire(lswitch_id)
+        except Exception:
+            LOG.warning(_LW("Failed to delete virtual wire: %s"), lswitch_id)
+
+
+def create_lrouter(nsxv_manager, context, lrouter, lswitch=None, dist=False):
+    """Create an edge for logical router support."""
+    router_id = lrouter['id']
+    router_name = lrouter['name'] + '-' + router_id
+    appliance_size = vcns_const.SERVICE_SIZE_MAPPING['router']
+    # store router-edge mapping binding
+    nsxv_db.add_nsxv_router_binding(
+        context.session, router_id, None, None,
+        plugin_const.PENDING_CREATE,
+        appliance_size=appliance_size)
+
+    # deploy edge
+    jobdata = {
+        'router_id': router_id,
+        'lrouter': lrouter,
+        'lswitch': lswitch,
+        'context': context
+    }
+
+    # deploy and wait until the deploy request has been requested
+    # so we will have edge_id ready. The wait here should be fine
+    # as we're not in a database transaction now
+    task = nsxv_manager.deploy_edge(
+        router_id, router_name, internal_network=None,
+        dist=dist, jobdata=jobdata, appliance_size=appliance_size)
+    task.wait(task_const.TaskState.RESULT)
+
+
+def delete_lrouter(nsxv_manager, context, router_id, dist=False):
+    binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    if binding:
+        nsxv_db.update_nsxv_router_binding(
+            context.session, router_id,
+            status=plugin_const.PENDING_DELETE)
+        edge_id = binding['edge_id']
+        # delete edge
+        jobdata = {
+            'context': context
+        }
+        task = nsxv_manager.delete_edge(router_id, edge_id,
+                                        jobdata=jobdata, dist=dist)
+        task.wait(task_const.TaskState.RESULT)
+    else:
+        LOG.warning(_LW("router binding for router: %s not found"), router_id)
+
+
+def create_dhcp_service(context, nsxv_manager, network):
+    """Create an Edge for dhcp service."""
+    edge_name = "%s-%s" % (network['name'], network['id'])
+    jobdata = {'network_id': network['id'], 'context': context}
+    # port group id for vlan or virtual wire id for vxlan
+    nsx_network_id = nsx_db.get_nsx_switch_ids(context.session,
+                                               network['id'])[0]
+    # Deploy an Edge for dhcp service
+    return nsxv_manager.deploy_edge(
+        network['id'], edge_name, nsx_network_id, jobdata=jobdata,
+        appliance_size=vcns_const.SERVICE_SIZE_MAPPING['dhcp'])
+
+
+def delete_dhcp_service(context, nsxv_manager, network_id):
+    """Delete the Edge of dhcp service."""
+    task = None
+    binding = nsxv_db.get_dhcp_edge_network_binding(context.session,
+                                                    network_id)
+    if binding:
+        dhcp_edge_id = binding['edge_id']
+        vnic_index = binding['vnic_index']
+        jobdata = {'context': context, 'network_id': network_id}
+
+        edge_id = dhcp_edge_id
+
+        LOG.debug("Delete the vnic %d from DHCP Edge %s",
+                  vnic_index, edge_id)
+        nsxv_manager.vcns.delete_interface(edge_id, vnic_index)
+        nsxv_db.free_edge_vnic_by_network(
+            context.session, edge_id, network_id)
+        LOG.debug("Delete the DHCP Edge service %s", edge_id)
+        task = nsxv_manager.delete_edge(network_id, edge_id, jobdata)
+
+    return task
+
+
+def get_dhcp_edge_id(context, network_id):
+    # Query edge id
+    resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36]
+    binding = nsxv_db.get_nsxv_router_binding(context.session,
+                                              resource_id)
+    if binding:
+        edge_id = binding['edge_id']
+        return edge_id
+
+
+def create_dhcp_bindings(context, nsxv_manager, network_id, bindings):
+    edge_id = get_dhcp_edge_id(context, network_id)
+    if edge_id:
+        for binding in bindings:
+            nsxv_manager.vcns.create_dhcp_binding(edge_id, binding)
+        bindings_get = get_dhcp_binding_mappings(nsxv_manager, edge_id)
+        mac_address_list = [binding['macAddress'] for binding in bindings]
+        for mac_address, binding_id in bindings_get.items():
+            if mac_address in mac_address_list:
+                nsxv_db.create_edge_dhcp_static_binding(
+                    context.session, edge_id,
+                    mac_address, binding_id)
+
+
+def get_dhcp_binding_mappings(nsxv_manager, edge_id):
+    dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id)
+    bindings_get = {}
+    if dhcp_config:
+        for binding in dhcp_config['staticBindings']['staticBindings']:
+            bindings_get[binding['macAddress'].lower()] = binding['bindingId']
+    return bindings_get
+
+
+def delete_dhcp_binding(context, nsxv_manager, network_id, mac_address):
+    edge_id = get_dhcp_edge_id(context, network_id)
+    dhcp_binding = nsxv_db.get_edge_dhcp_static_binding(
+        context.session, edge_id, mac_address)
+    if edge_id and dhcp_binding:
+        nsxv_manager.vcns.delete_dhcp_binding(edge_id, dhcp_binding.binding_id)
+        nsxv_db.delete_edge_dhcp_static_binding(
+            context.session, edge_id, mac_address)
+
+
+def query_dhcp_service_config(nsxv_manager, edge_id):
+    """Retrieve the current DHCP configuration from the edge."""
+    _, dhcp_config = nsxv_manager.vcns.query_dhcp_configuration(edge_id)
+    return dhcp_config
+
+
+def update_dhcp_internal_interface(context, nsxv_manager,
+                                   network_id, address_groups, add=True):
+    # Get the physical port group /wire id of the network id
+    mappings = nsx_db.get_nsx_switch_ids(context.session, network_id)
+    if mappings:
+        vcns_network_id = mappings[0]
+
+    # Get the DHCP Edge to update the internal interface
+    binding = nsxv_db.get_dhcp_edge_network_binding(context.session,
+                                                    network_id)
+    if binding:
+        dhcp_edge_id = binding['edge_id']
+        vnic_index = binding['vnic_index']
+        edge_id = dhcp_edge_id
+        LOG.debug("Query the vnic %s for DHCP Edge %s",
+                  vnic_index, edge_id)
+        _, vnic_config = nsxv_manager.get_interface(edge_id, vnic_index)
+        for addr_group in address_groups:
+            vnic_addr_grp = vnic_config['addressGroups']['addressGroups']
+            if add:
+                vnic_addr_grp.append(addr_group)
+            else:
+                if addr_group in vnic_addr_grp:
+                    vnic_addr_grp.remove(addr_group)
+
+        LOG.debug("Update the vnic %d for DHCP Edge %s", vnic_index, edge_id)
+        nsxv_manager.update_interface(
+            'fake_router_id', edge_id, vnic_index, vcns_network_id,
+            address_groups=vnic_config['addressGroups']['addressGroups'])
+
+
+def update_gateway(nsxv_manager, context, router_id, nexthop, routes=None):
+    binding = nsxv_db.get_nsxv_router_binding(context.session,
+                                              router_id)
+    edge_id = binding['edge_id']
+    if routes is None:
+        routes = []
+    task = nsxv_manager.update_routes(router_id, edge_id, nexthop, routes)
+    task.wait(task_const.TaskState.RESULT)
+
+
+def update_routes(edge_manager, context, router_id, routes, nexthop=None):
+    binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    edge_id = binding['edge_id']
+    edge_routes = []
+    for route in routes:
+        if not route.get('network_id'):
+            LOG.warning(_LW("There is no network info for the route %s, so "
+                            "the route entry would not be executed!"), route)
+            continue
+        if route.get('external'):
+            edge_routes.append({
+                'vnic_index': vcns_const.EXTERNAL_VNIC_INDEX,
+                'cidr': route['destination'],
+                'nexthop': route['nexthop']})
+        else:
+            edge_routes.append({
+                'vnic_index': nsxv_db.get_edge_vnic_binding(
+                    context.session, edge_id,
+                    route['network_id'])['vnic_index'],
+                'cidr': route['destination'],
+                'nexthop': route['nexthop']})
+    task = edge_manager.update_routes(router_id, edge_id, nexthop, edge_routes)
+    task.wait(task_const.TaskState.RESULT)
+
+
+def update_routes_on_plr(edge_manager, context, plr_id, router_id,
+                         routes, nexthop=None):
+
+    binding = nsxv_db.get_nsxv_router_binding(context.session, plr_id)
+    edge_id = binding['edge_id']
+    lswitch_id = nsxv_db.get_nsxv_router_binding(
+        context.session, router_id).lswitch_id
+    edge_vnic_binding = nsxv_db.get_edge_vnic_binding(
+        context.session, edge_id, lswitch_id)
+    edge_routes = []
+    for route in routes:
+        edge_routes.append({
+            'vnic_index': edge_vnic_binding.vnic_index,
+            'cidr': route['destination'],
+            'nexthop': route['nexthop']})
+    task = edge_manager.update_routes(plr_id, edge_id, nexthop, edge_routes)
+    task.wait(task_const.TaskState.RESULT)
+
+
+def clear_gateway(nsxv_manager, context, router_id):
+    return update_gateway(nsxv_manager, context, router_id, None)
+
+
+def update_external_interface(
+    nsxv_manager, context, router_id, ext_net_id,
+    ipaddr, netmask, secondary=[]):
+    binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    net_bindings = nsxv_db.get_network_bindings(context.session, ext_net_id)
+    if not net_bindings:
+        vcns_network_id = nsxv_manager.external_network
+    else:
+        vcns_network_id = net_bindings[0].phy_uuid
+
+    nsxv_manager.update_interface(router_id, binding['edge_id'],
+                                  vcns_const.EXTERNAL_VNIC_INDEX,
+                                  vcns_network_id,
+                                  address=ipaddr,
+                                  netmask=netmask,
+                                  secondary=secondary)
+
+
+def update_internal_interface(
+    nsxv_manager, context, router_id, int_net_id, address_groups):
+    # Get the pg/wire id of the network id
+    mappings = nsx_db.get_nsx_switch_ids(context.session, int_net_id)
+    if mappings:
+        vcns_network_id = mappings[0]
+    LOG.debug("Network id %(network_id)s corresponding ref is : "
+              "%(net_moref)s", {'network_id': int_net_id,
+                                'net_moref': vcns_network_id})
+
+    # Get edge id
+    binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    edge_id = binding['edge_id']
+    edge_vnic_binding = nsxv_db.get_edge_vnic_binding(
+        context.session, edge_id, int_net_id)
+    # if edge_vnic_binding is None, then first select one avaliable
+    # internal vnic for connection.
+    if not edge_vnic_binding:
+        edge_vnic_binding = nsxv_db.allocate_edge_vnic(
+            context.session, edge_id, int_net_id)
+
+    nsxv_manager.update_interface(router_id, edge_id,
+                                  edge_vnic_binding.vnic_index,
+                                  vcns_network_id,
+                                  address_groups=address_groups)
+
+
+def add_vdr_internal_interface(
+    nsxv_manager, context, router_id, int_net_id, address_groups):
+    # Get the pg/wire id of the network id
+    mappings = nsx_db.get_nsx_switch_ids(context.session, int_net_id)
+    if mappings:
+        vcns_network_id = mappings[0]
+    LOG.debug("Network id %(network_id)s corresponding ref is : "
+              "%(net_moref)s", {'network_id': int_net_id,
+                                'net_moref': vcns_network_id})
+    # Get edge id
+    binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    edge_id = binding['edge_id']
+    edge_vnic_binding = nsxv_db.get_edge_vnic_binding(
+        context.session, edge_id, int_net_id)
+    if not edge_vnic_binding:
+        vnic_index = nsxv_manager.add_vdr_internal_interface(
+            edge_id, vcns_network_id,
+            address_groups=address_groups)
+        nsxv_db.create_edge_vnic_binding(
+            context.session, edge_id, vnic_index, int_net_id)
+    else:
+        msg = (_("Distributed Router doesn't support multiple subnets "
+                 "with same network attached to it."))
+        raise n_exc.BadRequest(resource='vdr', msg=msg)
+
+
+def update_vdr_internal_interface(
+    nsxv_manager, context, router_id, int_net_id, address_groups):
+    # Get the pg/wire id of the network id
+    mappings = nsx_db.get_nsx_switch_ids(context.session, int_net_id)
+    if mappings:
+        vcns_network_id = mappings[0]
+    LOG.debug("Network id %(network_id)s corresponding ref is : "
+              "%(net_moref)s", {'network_id': int_net_id,
+                                'net_moref': vcns_network_id})
+
+    # Get edge id
+    binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    edge_id = binding['edge_id']
+    edge_vnic_binding = nsxv_db.get_edge_vnic_binding(
+        context.session, edge_id, binding.network_id)
+    nsxv_manager.update_vdr_internal_interface(
+        edge_id, edge_vnic_binding.vnic_index,
+        vcns_network_id, address_groups=address_groups)
+
+
+def delete_interface(nsxv_manager, context, router_id, network_id, dist=False):
+    # Get the pg/wire id of the network id
+    mappings = nsx_db.get_nsx_switch_ids(context.session, network_id)
+    if mappings:
+        vcns_network_id = mappings[0]
+    LOG.debug("Network id %(network_id)s corresponding ref is : "
+              "%(net_moref)s", {'network_id': network_id,
+                                'net_moref': vcns_network_id})
+
+    # Get edge id
+    binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    edge_id = binding['edge_id']
+    edge_vnic_binding = nsxv_db.get_edge_vnic_binding(
+        context.session, edge_id, network_id)
+    if not dist:
+        task = nsxv_manager.delete_interface(
+            router_id, edge_id, edge_vnic_binding.vnic_index)
+        task.wait(task_const.TaskState.RESULT)
+        nsxv_db.free_edge_vnic_by_network(
+            context.session, edge_id, network_id)
+    else:
+        nsxv_manager.delete_vdr_internal_interface(
+            edge_id, edge_vnic_binding.vnic_index)
+        nsxv_db.delete_edge_vnic_binding_by_network(
+            context.session, edge_id, network_id)
+
+
+def update_nat_rules(nsxv_manager, context, router_id, snat, dnat):
+    binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    task = nsxv_manager.update_nat_rules(
+        router_id, binding['edge_id'], snat, dnat)
+    task.wait(task_const.TaskState.RESULT)
+
+
+def update_dnat_rules(nsxv_manager, context, router_id, dnat_rules):
+    rtr_binding = nsxv_db.get_nsxv_router_binding(context.session, router_id)
+    edge_id = rtr_binding['edge_id']
+
+    for dnat_rule in dnat_rules:
+        vnic_binding = nsxv_db.get_edge_vnic_binding(
+            context.session,
+            edge_id,
+            dnat_rule['network_id'])
+
+        vnic_index = vnic_binding['vnic_index']
+        dnat_rule['vnic_index'] = vnic_index
+
+    nsxv_manager.update_dnat_rules(edge_id, dnat_rules)
+
+
+def clear_nat_rules(nsxv_manager, context, router_id):
+    update_nat_rules(nsxv_manager, context, router_id, [], [])
+
+
+def update_firewall(nsxv_manager, context, router_id, firewall,
+                    allow_external=True):
+    jobdata = {'context': context}
+    edge_id = nsxv_db.get_nsxv_router_binding(
+        context.session, router_id)['edge_id']
+    task = nsxv_manager.asyn_update_firewall(router_id, edge_id,
+                                             firewall, jobdata=jobdata,
+                                             allow_external=allow_external)
+    task.wait(task_const.TaskState.RESULT)
+
+
+class NsxVCallbacks(object):
+    """Edge callback implementation Callback functions for
+    asynchronous tasks.
+    """
+    def __init__(self, plugin):
+        self.plugin = plugin
+
+    def edge_deploy_started(self, task):
+        """callback when deployment task started."""
+        jobdata = task.userdata['jobdata']
+        context = jobdata['context']
+        router_id = jobdata.get('router_id')
+        edge_id = task.userdata.get('edge_id')
+        name = task.userdata.get('router_name')
+        dist = task.userdata.get('dist')
+        if edge_id:
+            LOG.debug("Start deploying %(edge_id)s for router %(name)s",
+                      {'edge_id': edge_id,
+                       'name': name})
+            nsxv_db.update_nsxv_router_binding(
+                context.session, router_id, edge_id=edge_id)
+            if not dist:
+                # Init Edge vnic binding
+                nsxv_db.init_edge_vnic_binding(
+                    context.session, edge_id)
+        else:
+            LOG.debug("Failed to deploy Edge")
+            if router_id:
+                nsxv_db.update_nsxv_router_binding(
+                    context.session, router_id,
+                    status=plugin_const.ERROR)
+
+    def edge_deploy_result(self, task):
+        """callback when deployment task finished."""
+        jobdata = task.userdata['jobdata']
+        context = jobdata['context']
+        name = task.userdata.get('router_name')
+        dist = task.userdata.get('dist')
+        router_id = jobdata['router_id']
+        router_db = None
+        if uuidutils.is_uuid_like(router_id):
+            try:
+                router_db = self.plugin._get_router(
+                    context, router_id)
+            except l3.RouterNotFound:
+                # Router might have been deleted before deploy finished
+                LOG.warning(_LW("Router %s not found"), name)
+
+        if task.status == task_const.TaskStatus.COMPLETED:
+            LOG.debug("Successfully deployed %(edge_id)s for router %(name)s",
+                      {'edge_id': task.userdata['edge_id'],
+                       'name': name})
+            if (router_db and
+                router_db['status'] == plugin_const.PENDING_CREATE):
+                router_db['status'] = plugin_const.ACTIVE
+            nsxv_db.update_nsxv_router_binding(
+                context.session, router_id,
+                status=plugin_const.ACTIVE)
+        else:
+            LOG.debug("Failed to deploy Edge for router %s", name)
+            if router_db:
+                router_db['status'] = plugin_const.ERROR
+            nsxv_db.update_nsxv_router_binding(
+                context.session, router_id,
+                status=plugin_const.ERROR)
+            if not dist:
+                nsxv_db.clean_edge_vnic_binding(
+                    context.session, task.userdata['edge_id'])
+
+    def edge_update_result(self, task):
+        LOG.debug("edge_update_result %d", task.status)
+
+    def edge_delete_result(self, task):
+        jobdata = task.userdata['jobdata']
+        router_id = task.userdata['router_id']
+        dist = task.userdata.get('dist')
+        edge_id = task.userdata['edge_id']
+        context = jobdata['context']
+        try:
+            nsxv_db.delete_nsxv_router_binding(context.session, router_id)
+            if not dist:
+                nsxv_db.clean_edge_vnic_binding(context.session, edge_id)
+        except sa_exc.NoResultFound:
+            LOG.warning(_LW("Router Binding for %s not found"), router_id)
+
+    def interface_update_result(self, task):
+        LOG.debug("interface_update_result %d", task.status)
+
+    def interface_delete_result(self, task):
+        LOG.debug("interface_delete_result %d", task.status)
+
+    def snat_create_result(self, task):
+        LOG.debug("snat_create_result %d", task.status)
+
+    def snat_delete_result(self, task):
+        LOG.debug("snat_delete_result %d", task.status)
+
+    def dnat_create_result(self, task):
+        LOG.debug("dnat_create_result %d", task.status)
+
+    def dnat_delete_result(self, task):
+        LOG.debug("dnat_delete_result %d", task.status)
+
+    def routes_update_result(self, task):
+        LOG.debug("routes_update_result %d", task.status)
+
+    def nat_update_result(self, task):
+        LOG.debug("nat_update_result %d", task.status)
+
+    def _create_rule_id_mapping(
+        self, context, edge_id, firewall, vcns_fw):
+        for rule in vcns_fw['firewallRules']['firewallRules']:
+            if rule.get('ruleTag'):
+                index = rule['ruleTag'] - 1
+                #TODO(linb):a simple filter of the retrieved rules which may be
+                #created by other operations unintentionally
+                if index < len(firewall['firewall_rule_list']):
+                    rule_vseid = rule['ruleId']
+                    rule_id = firewall['firewall_rule_list'][index].get('id')
+                    if rule_id:
+                        map_info = {
+                            'rule_id': rule_id,
+                            'rule_vseid': rule_vseid,
+                            'edge_id': edge_id
+                        }
+                        nsxv_db.add_nsxv_edge_firewallrule_binding(
+                            context.session, map_info)
+
+    def firewall_update_result(self, task):
+        LOG.debug("firewall_update_result %d", task.status)
+        context = task.userdata['jobdata']['context']
+        edge_id = task.userdata['edge_id']
+        fw_config = task.userdata['fw_config']
+        vcns_fw_config = task.userdata['vcns_fw_config']
+        nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
+            context.session, edge_id)
+        self._create_rule_id_mapping(
+            context, edge_id, fw_config, vcns_fw_config)
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/nsxv_edge_cfg_obj.py b/vmware_nsx/neutron/plugins/vmware/vshield/nsxv_edge_cfg_obj.py
new file mode 100644
index 0000000000..1fad22f989
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/nsxv_edge_cfg_obj.py
@@ -0,0 +1,67 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import abc
+
+from oslo.serialization import jsonutils
+import six
+
+from vmware_nsx.neutron.plugins.vmware.vshield import vcns
+
+
+@six.add_metaclass(abc.ABCMeta)
+class NsxvEdgeCfgObj(object):
+
+    def __init__(self):
+        return
+
+    @abc.abstractmethod
+    def get_service_name(self):
+        return
+
+    @abc.abstractmethod
+    def serializable_payload(self):
+        return
+
+    @staticmethod
+    def get_object(vcns_obj, edge_id, service_name):
+        uri = "%s/%s/%s" % (vcns.URI_PREFIX,
+                            edge_id,
+                            service_name)
+
+        h, v = vcns_obj.do_request(
+            vcns.HTTP_GET,
+            uri,
+            decode=True)
+
+        return v
+
+    def submit_to_backend(self, vcns_obj, edge_id, async=True):
+        uri = "%s/%s/%s/config" % (vcns.URI_PREFIX,
+                                   edge_id,
+                                   self.get_service_name())
+
+        if async:
+            uri += '?async=true'
+
+        payload = jsonutils.dumps(self.serializable_payload(), sort_keys=True)
+
+        if payload:
+            return vcns_obj.do_request(
+                vcns.HTTP_PUT,
+                uri,
+                payload,
+                format='json',
+                encode=False)
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/nsxv_loadbalancer.py b/vmware_nsx/neutron/plugins/vmware/vshield/nsxv_loadbalancer.py
new file mode 100644
index 0000000000..7e7a03afb7
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/nsxv_loadbalancer.py
@@ -0,0 +1,391 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+from neutron.openstack.common import log as logging
+from vmware_nsx.neutron.plugins.vmware.vshield import nsxv_edge_cfg_obj
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NsxvLoadbalancer(nsxv_edge_cfg_obj.NsxvEdgeCfgObj):
+
+    SERVICE_NAME = 'loadbalancer'
+
+    def __init__(
+            self,
+            enabled=True,
+            enable_service_insertion=False,
+            acceleration_enabled=False):
+        super(NsxvLoadbalancer, self).__init__()
+        self.payload = {
+            'enabled': enabled,
+            'enableServiceInsertion': enable_service_insertion,
+            'accelerationEnabled': acceleration_enabled}
+        self.virtual_servers = {}
+
+    def get_service_name(self):
+        return self.SERVICE_NAME
+
+    def add_virtual_server(self, virtual_server):
+        self.virtual_servers[virtual_server.payload['name']] = virtual_server
+
+    def del_virtual_server(self, name):
+        self.virtual_servers.pop(name, None)
+
+    def serializable_payload(self):
+        virt_servers = []
+        app_profiles = []
+        app_rules = []
+        pools = []
+        monitors = []
+
+        virt_id = 1
+        app_prof_id = 1
+        app_rule_id = 1
+        pool_id = 1
+        monitor_id = 1
+        member_id = 1
+
+        for virtual_server in self.virtual_servers.values():
+            s_virt = virtual_server.payload.copy()
+            s_virt['virtualServerId'] = 'virtualServer-%d' % virt_id
+            virt_id += 1
+
+            # Setup app profile
+            s_app_prof = virtual_server.app_profile.payload.copy()
+            s_app_prof['applicationProfileId'] = ('applicationProfile-%d' %
+                                                  app_prof_id)
+            app_profiles.append(s_app_prof)
+            app_prof_id += 1
+
+            # Bind virtual server to app profile
+            s_virt['applicationProfileId'] = s_app_prof['applicationProfileId']
+
+            # Setup app rules
+            if virtual_server.app_rules.values():
+                s_virt['applicationRuleId'] = []
+                for app_rule in virtual_server.app_rules.values():
+                    s_app_rule = app_rule.payload.copy()
+                    s_app_rule['applicationRuleId'] = ('applicationRule-%d' %
+                                                       app_rule_id)
+                    app_rule_id += 1
+
+                    # Add to LB object, bind to virtual server
+                    app_rules.append(s_app_rule)
+                    s_virt['applicationRuleId'].append(
+                        s_app_rule['applicationRuleId'])
+
+            # Setup pools
+            s_pool = virtual_server.default_pool.payload.copy()
+            s_pool['poolId'] = 'pool-%d' % pool_id
+            pool_id += 1
+            pools.append(s_pool)
+
+            # Add pool members
+            s_pool['member'] = []
+            for member in virtual_server.default_pool.members.values():
+                s_m = member.payload.copy()
+                s_m['memberId'] = 'member-%d' % member_id
+                member_id += 1
+                s_pool['member'].append(s_m)
+
+            # Bind pool to virtual server
+            s_virt['defaultPoolId'] = s_pool['poolId']
+
+            s_pool['monitorId'] = []
+            # Add monitors
+            for monitor in virtual_server.default_pool.monitors.values():
+                s_mon = monitor.payload.copy()
+                s_mon['monitorId'] = 'monitor-%d' % monitor_id
+                monitor_id += 1
+
+                s_pool['monitorId'].append(s_mon['monitorId'])
+
+                monitors.append(s_mon)
+
+            virt_servers.append(s_virt)
+
+        payload = self.payload.copy()
+        payload['applicationProfile'] = app_profiles
+        if app_rules:
+            payload['applicationRule'] = app_rules
+        payload['monitor'] = monitors
+        payload['pool'] = pools
+        payload['virtualServer'] = virt_servers
+        payload['featureType'] = 'loadbalancer_4.0'
+
+        return payload
+
+    @staticmethod
+    def get_loadbalancer(vcns_obj, edge_id):
+        edge_lb = nsxv_edge_cfg_obj.NsxvEdgeCfgObj.get_object(
+            vcns_obj,
+            edge_id,
+            NsxvLoadbalancer.SERVICE_NAME)
+
+        lb_obj = NsxvLoadbalancer(
+            edge_lb['enabled'],
+            edge_lb['enableServiceInsertion'],
+            edge_lb['accelerationEnabled'])
+
+        # Construct loadbalancer objects
+        for virt_srvr in edge_lb['virtualServer']:
+            v_s = NsxvLBVirtualServer(
+                virt_srvr['name'],
+                virt_srvr['ipAddress'],
+                virt_srvr['port'],
+                virt_srvr['protocol'],
+                virt_srvr['enabled'],
+                virt_srvr['accelerationEnabled'],
+                virt_srvr['connectionLimit'])
+
+            # Find application profile objects, attach to virtual server
+            for app_prof in edge_lb['applicationProfile']:
+                if (virt_srvr['applicationProfileId']
+                        == app_prof['applicationProfileId']):
+                    a_p = NsxvLBAppProfile(
+                        app_prof['name'],
+                        app_prof['serverSslEnabled'],
+                        app_prof['sslPassthrough'],
+                        app_prof['template'],
+                        app_prof['insertXForwardedFor'])
+
+                    if app_prof['persistence']:
+                        a_p.set_persistence(
+                            True,
+                            app_prof['persistence']['method'],
+                            app_prof['persistence'].get('cookieName'),
+                            app_prof['persistence'].get('cookieMode'),
+                            app_prof['persistence'].get('expire'))
+
+                    v_s.set_app_profile(a_p)
+
+            # Find default pool, attach to virtual server
+            for pool in edge_lb['pool']:
+                if virt_srvr['defaultPoolId'] == pool['poolId']:
+                    p = NsxvLBPool(
+                        pool['name'],
+                        pool['algorithm'],
+                        pool['transparent'])
+
+                    # Add pool members to pool
+                    for member in pool['member']:
+                        m = NsxvLBPoolMember(
+                            member['name'],
+                            member['ipAddress'],
+                            member['port'],
+                            member['monitorPort'],
+                            member['condition'],
+                            member['weight'],
+                            member['minConn'],
+                            member['maxConn'])
+
+                        p.add_member(m)
+
+                    # Add monitors to pool
+                    for mon in edge_lb['monitor']:
+                        if mon['monitorId'] in pool['monitorId']:
+                            m = NsxvLBMonitor(
+                                mon['name'],
+                                mon['interval'],
+                                mon['maxRetries'],
+                                mon['method'],
+                                mon['timeout'],
+                                mon['type'],
+                                mon['url'])
+
+                            p.add_monitor(m)
+
+                    v_s.set_default_pool(p)
+
+            # Add application rules to virtual server
+            for rule in edge_lb['applicationRule']:
+                if rule['applicationRuleId'] in virt_srvr['applicationRuleId']:
+                    r = NsxvLBAppRule(
+                        rule['name'],
+                        rule['script'])
+
+                    v_s.add_app_rule(r)
+
+            lb_obj.add_virtual_server(v_s)
+
+        return lb_obj
+
+
+class NsxvLBAppProfile():
+    def __init__(
+            self,
+            name,
+            server_ssl_enabled=False,
+            ssl_pass_through=False,
+            template='TCP',
+            insert_xff=False,
+            persist=False,
+            persist_method='cookie',
+            persist_cookie_name='JSESSIONID',
+            persist_cookie_mode='insert',
+            persist_expire=30):
+        self.payload = {
+            'name': name,
+            'serverSslEnabled': server_ssl_enabled,
+            'sslPassthrough': ssl_pass_through,
+            'template': template,
+            'insertXForwardedFor': insert_xff}
+
+        if persist:
+            self.payload['persistence'] = {
+                'method': persist_method,
+                'expire': persist_expire
+            }
+            if persist_cookie_mode == 'cookie':
+                self.payload['persistence']['cookieMode'] = persist_cookie_mode
+                self.payload['persistence']['cookieName'] = persist_cookie_name
+
+    def set_persistence(
+            self,
+            persist=False,
+            persist_method='cookie',
+            persist_cookie_name='JSESSIONID',
+            persist_cookie_mode='insert',
+            persist_expire=30):
+
+        if persist:
+            self.payload['persistence'] = {
+                'method': persist_method,
+                'expire': persist_expire
+            }
+            if persist_cookie_mode == 'cookie':
+                self.payload['persistence']['cookieMode'] = persist_cookie_mode
+                self.payload['persistence']['cookieName'] = persist_cookie_name
+
+        else:
+            self.payload.pop('persistence', None)
+
+
+class NsxvLBAppRule(object):
+    def __init__(self, name, script):
+        self.payload = {
+            'name': name,
+            'script': script}
+
+
+class NsxvLBVirtualServer(object):
+    def __init__(
+            self,
+            name,
+            ip_address,
+            port=80,
+            protocol='HTTP',
+            enabled=True,
+            acceleration_enabled=False,
+            connection_limit=0,
+            enable_service_insertion=False):
+        self.payload = {
+            'name': name,
+            'ipAddress': ip_address,
+            'port': port,
+            'protocol': protocol,
+            'enabled': enabled,
+            'accelerationEnabled': acceleration_enabled,
+            'connectionLimit': connection_limit,
+            'enableServiceInsertion': enable_service_insertion}
+
+        self.app_rules = {}
+        self.app_profile = None
+        self.default_pool = None
+
+    def add_app_rule(self, app_rule):
+        self.app_rules[app_rule.payload['name']] = app_rule
+
+    def del_app_rule(self, name):
+        self.app_rules.pop(name, None)
+
+    def set_default_pool(self, pool):
+        self.default_pool = pool
+
+    def set_app_profile(self, app_profile):
+        self.app_profile = app_profile
+
+
+class NsxvLBMonitor(object):
+    def __init__(
+            self,
+            name,
+            interval=10,
+            max_retries=3,
+            method='GET',
+            timeout=15,
+            mon_type='http',
+            url='/'):
+        self.payload = {
+            'name': name,
+            'interval': interval,
+            'maxRetries': max_retries,
+            'method': method,
+            'timeout': timeout,
+            'type': mon_type,
+            'url': url}
+
+
+class NsxvLBPoolMember(object):
+    def __init__(
+            self,
+            name,
+            ip_address,
+            port,
+            monitor_port=None,
+            condition='enabled',
+            weight=1,
+            min_conn=0,
+            max_conn=0):
+
+        self.payload = {
+            'name': name,
+            'ipAddress': ip_address,
+            'port': port,
+            'monitorPort': monitor_port,
+            'condition': condition,
+            'weight': weight,
+            'minConn': min_conn,
+            'maxConn': max_conn}
+
+
+class NsxvLBPool(object):
+    def __init__(
+            self,
+            name,
+            algorithm='round-robin',
+            transparent=False):
+        self.payload = {
+            'name': name,
+            'algorithm': algorithm,
+            'transparent': transparent}
+
+        self.members = {}
+        self.monitors = {}
+
+    def add_member(self, member):
+        self.members[member.payload['name']] = member
+
+    def del_member(self, name):
+        self.members.pop(name, None)
+
+    def add_monitor(self, monitor):
+        self.monitors[monitor.payload['name']] = monitor
+
+    def del_monitor(self, name):
+        self.monitors.pop(name, None)
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/securitygroup_utils.py b/vmware_nsx/neutron/plugins/vmware/vshield/securitygroup_utils.py
new file mode 100644
index 0000000000..b691900fa6
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/securitygroup_utils.py
@@ -0,0 +1,183 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import xml.etree.ElementTree as et
+
+from neutron.i18n import _LE, _LI
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import loopingcall
+
+WAIT_INTERVAL = 2000
+MAX_ATTEMPTS = 5
+
+LOG = logging.getLogger(__name__)
+
+
+class NsxSecurityGroupUtils(object):
+
+    def __init__(self, nsxv_manager):
+        LOG.debug("Start Security Group Utils initialization")
+        self.nsxv_manager = nsxv_manager
+
+    def to_xml_string(self, element):
+        return et.tostring(element)
+
+    def get_section_with_rules(self, name, rules):
+        """Helper method to create section dict with rules."""
+
+        section = et.Element('section')
+        section.attrib['name'] = name
+        for rule in rules:
+            section.append(rule)
+        return section
+
+    def get_container(self, nsx_sg_id):
+        container = {'type': 'SecurityGroup', 'value': nsx_sg_id}
+        return container
+
+    def get_remote_container(self, remote_group_id, remote_ip_mac):
+        container = None
+        if remote_group_id is not None:
+            return self.get_container(remote_group_id)
+        if remote_ip_mac is not None:
+            container = {'type': 'Ipv4Address', 'value': remote_ip_mac}
+        return container
+
+    def get_rule_config(self, applied_to_id, name, action='allow',
+                        applied_to='SecurityGroup',
+                        source=None, destination=None, services=None,
+                        flags=None):
+        """Helper method to create a nsx rule dict."""
+        ruleTag = et.Element('rule')
+        nameTag = et.SubElement(ruleTag, 'name')
+        nameTag.text = name
+        actionTag = et.SubElement(ruleTag, 'action')
+        actionTag.text = action
+
+        apList = et.SubElement(ruleTag, 'appliedToList')
+        apTag = et.SubElement(apList, 'appliedTo')
+        apTypeTag = et.SubElement(apTag, 'type')
+        apTypeTag.text = applied_to
+        apValueTag = et.SubElement(apTag, 'value')
+        apValueTag.text = applied_to_id
+
+        if source is not None:
+            sources = et.SubElement(ruleTag, 'sources')
+            sources.attrib['excluded'] = 'false'
+            srcTag = et.SubElement(sources, 'source')
+            srcTypeTag = et.SubElement(srcTag, 'type')
+            srcTypeTag.text = source['type']
+            srcValueTag = et.SubElement(srcTag, 'value')
+            srcValueTag.text = source['value']
+
+        if destination is not None:
+            dests = et.SubElement(ruleTag, 'destinations')
+            dests.attrib['excluded'] = 'false'
+            destTag = et.SubElement(dests, 'destination')
+            destTypeTag = et.SubElement(destTag, 'type')
+            destTypeTag.text = destination['type']
+            destValueTag = et.SubElement(destTag, 'value')
+            destValueTag.text = destination['value']
+
+        if services:
+            s = et.SubElement(ruleTag, 'services')
+            for protocol, port, icmptype, icmpcode in services:
+                svcTag = et.SubElement(s, 'service')
+                try:
+                    int(protocol)
+                    svcProtocolTag = et.SubElement(svcTag, 'protocol')
+                    svcProtocolTag.text = str(protocol)
+                except ValueError:
+                    svcProtocolTag = et.SubElement(svcTag, 'protocolName')
+                    svcProtocolTag.text = protocol
+                if port is not None:
+                    svcPortTag = et.SubElement(svcTag, 'destinationPort')
+                    svcPortTag.text = str(port)
+                if icmptype is not None:
+                    svcPortTag = et.SubElement(svcTag, 'subProtocol')
+                    svcPortTag.text = str(icmptype)
+                if icmpcode is not None:
+                    svcPortTag = et.SubElement(svcTag, 'icmpCode')
+                    svcPortTag.text = str(icmpcode)
+
+        if flags:
+            if flags.get('ethertype') is not None:
+                pktTag = et.SubElement(ruleTag, 'packetType')
+                pktTag.text = flags.get('ethertype')
+            if flags.get('direction') is not None:
+                dirTag = et.SubElement(ruleTag, 'direction')
+                dirTag.text = flags.get('direction')
+        return ruleTag
+
+    def get_rule_id_pair_from_section(self, resp):
+        root = et.fromstring(resp)
+        pairs = []
+        for rule in root.findall('rule'):
+            pair = {'nsx_id': rule.attrib.get('id'),
+                    'neutron_id': rule.find('name').text}
+            pairs.append(pair)
+        return pairs
+
+    def insert_rule_in_section(self, section, nsx_rule):
+        section.insert(0, nsx_rule)
+
+    def parse_section(self, xml_string):
+        return et.fromstring(xml_string)
+
+    def add_port_to_security_group(self, nsx_sg_id, nsx_vnic_id):
+        userdata = {
+            'nsx_sg_id': nsx_sg_id,
+            'nsx_vnic_id': nsx_vnic_id,
+            'attempt': 1
+        }
+        LOG.info(_LI("Add task to add %(nsx_sg_id)s member to NSX security "
+                     "group %(nsx_vnic_id)s"), userdata)
+        task = loopingcall.FixedIntervalLoopingCall(
+            self._add_security_groups_port_mapping,
+            userdata=userdata)
+        task.start(WAIT_INTERVAL / 1000)
+
+    def _add_security_groups_port_mapping(self, userdata):
+        nsx_vnic_id = userdata.get('nsx_vnic_id')
+        nsx_sg_id = userdata.get('nsx_sg_id')
+        attempt = userdata.get('attempt')
+        LOG.debug("Trying to execute task to add %s to %s attempt %d",
+                  nsx_vnic_id, nsx_sg_id, attempt)
+        if attempt >= MAX_ATTEMPTS:
+            LOG.error(_LE("Stop task to add %(nsx_vnic_id)s to security group "
+                          "%(nsx_sg_id)s"), userdata)
+            LOG.error(_LE("Exception %s"), userdata.get('exception'))
+            raise loopingcall.LoopingCallDone()
+        else:
+            attempt = attempt + 1
+            userdata['attempt'] = attempt
+
+        try:
+            h, c = self.nsxv_manager.vcns.add_member_to_security_group(
+                nsx_sg_id, nsx_vnic_id)
+            LOG.info(_LI("Added %s(nsx_sg_id)s member to NSX security "
+                         "group %(nsx_vnic_id)s"), userdata)
+
+        except Exception as e:
+            LOG.debug("NSX security group %(nsx_sg_id)s member add "
+                      "failed %(nsx_vnic_id)s - attempt %(attempt)d",
+                      {'nsx_sg_id': nsx_sg_id,
+                       'nsx_vnic_id': nsx_vnic_id,
+                       'attempt': attempt})
+            userdata['exception'] = e
+            LOG.debug("Exception %s", e)
+            return
+
+        raise loopingcall.LoopingCallDone()
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/tasks/tasks.py b/vmware_nsx/neutron/plugins/vmware/vshield/tasks/tasks.py
index 3f9ac23680..0ea9fcca16 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/tasks/tasks.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/tasks/tasks.py
@@ -23,7 +23,7 @@ from neutron.common import exceptions
 from neutron.i18n import _LE, _LI
 from neutron.openstack.common import log as logging
 from neutron.openstack.common import loopingcall
-from neutron.plugins.vmware.vshield.tasks import constants
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import constants
 
 DEFAULT_INTERVAL = 1000
 
@@ -184,15 +184,15 @@ class TaskManager():
         try:
             status = task._execute_callback(task)
         except Exception:
-            LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
+            LOG.exception(_LE("Task %(task)s encountered exception in "
+                              "%(cb)s"),
                           {'task': str(task),
                            'cb': str(task._execute_callback)})
             status = constants.TaskStatus.ERROR
 
-        LOG.debug("Task %(task)s return %(status)s", {
-            'task': str(task),
-            'status': status})
-
+        LOG.debug("Task %(task)s return %(status)s",
+                  {'task': str(task),
+                   'status': status})
         task._update_status(status)
         task._executed()
 
@@ -203,10 +203,10 @@ class TaskManager():
         try:
             task._result_callback(task)
         except Exception:
-            LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
+            LOG.exception(_LE("Task %(task)s encountered exception in "
+                              "%(cb)s"),
                           {'task': str(task),
                            'cb': str(task._result_callback)})
-
         LOG.debug("Task %(task)s return %(status)s",
                   {'task': str(task), 'status': task.status})
 
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/vcns.py b/vmware_nsx/neutron/plugins/vmware/vshield/vcns.py
index 68ad9c7b7b..3f98bcd84d 100644
--- a/vmware_nsx/neutron/plugins/vmware/vshield/vcns.py
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/vcns.py
@@ -1,3 +1,5 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
 # Copyright 2013 VMware, Inc
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,10 +14,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo.serialization import jsonutils
+import time
 
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import xml.etree.ElementTree as et
+
+from neutron.i18n import _LI
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.vshield.common import VcnsApiClient
+from vmware_nsx.neutron.plugins.vmware.vshield.common import exceptions
+from vmware_nsx.neutron.plugins.vmware.vshield.common import VcnsApiClient
 
 LOG = logging.getLogger(__name__)
 
@@ -29,16 +37,27 @@ URI_PREFIX = "/api/4.0/edges"
 FIREWALL_SERVICE = "firewall/config"
 FIREWALL_RULE_RESOURCE = "rules"
 
+#NSXv Constants
+FIREWALL_PREFIX = '/api/4.0/firewall/globalroot-0/config'
+SECURITYGROUP_PREFIX = '/api/2.0/services/securitygroup'
+VDN_PREFIX = '/api/2.0/vdn'
+SERVICES_PREFIX = '/api/2.0/services'
+
 #LbaaS Constants
 LOADBALANCER_SERVICE = "loadbalancer/config"
 VIP_RESOURCE = "virtualservers"
 POOL_RESOURCE = "pools"
 MONITOR_RESOURCE = "monitors"
 APP_PROFILE_RESOURCE = "applicationprofiles"
+APP_RULE_RESOURCE = "applicationrules"
 
 # IPsec VPNaaS Constants
 IPSEC_VPN_SERVICE = 'ipsec/config'
 
+# Dhcp constants
+DHCP_SERVICE = "dhcp/config"
+DHCP_BINDING_RESOURCE = "bindings"
+
 
 class Vcns(object):
 
@@ -48,16 +67,37 @@ class Vcns(object):
         self.password = password
         self.jsonapi_client = VcnsApiClient.VcnsApiHelper(address, user,
                                                           password, 'json')
+        self.xmlapi_client = VcnsApiClient.VcnsApiHelper(address, user,
+                                                         password, 'xml')
+
+    def _client_request(self, client, method, uri, params, headers,
+                        encodeParams):
+        retries = max(cfg.CONF.nsxv.retries, 1)
+        delay = 0.5
+        for attempt in range(1, retries + 1):
+            if attempt != 1:
+                time.sleep(delay)
+                delay = min(2 * delay, 60)
+            try:
+                return client(method, uri, params, headers, encodeParams)
+            except exceptions.ServiceConflict as e:
+                if attempt == retries:
+                    raise e
+            LOG.info(_LI('NSXv: conflict on request. Trying again.'))
 
     def do_request(self, method, uri, params=None, format='json', **kwargs):
         LOG.debug("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')", {
                   'method': method,
                   'uri': uri,
                   'body': jsonutils.dumps(params)})
+        headers = kwargs.get('headers')
+        encodeParams = kwargs.get('encode', True)
         if format == 'json':
-            header, content = self.jsonapi_client.request(method, uri, params)
+            _client = self.jsonapi_client.request
         else:
-            header, content = self.xmlapi_client.request(method, uri, params)
+            _client = self.xmlapi_client.request
+        header, content = self._client_request(_client, method, uri, params,
+                                               headers, encodeParams)
         LOG.debug("Header: '%s'", header)
         LOG.debug("Content: '%s'", content)
         if content == '':
@@ -70,10 +110,18 @@ class Vcns(object):
         uri = URI_PREFIX + "?async=true"
         return self.do_request(HTTP_POST, uri, request, decode=False)
 
+    def update_edge(self, edge_id, request):
+        uri = "%s/%s?async=true" % (URI_PREFIX, edge_id)
+        return self.do_request(HTTP_PUT, uri, request, decode=False)
+
     def get_edge_id(self, job_id):
         uri = URI_PREFIX + "/jobs/%s" % job_id
         return self.do_request(HTTP_GET, uri, decode=True)
 
+    def get_edge_jobs(self, edge_id):
+        uri = URI_PREFIX + "/%s/jobs" % edge_id
+        return self.do_request(HTTP_GET, uri, decode=True)
+
     def get_edge_deploy_status(self, edge_id):
         uri = URI_PREFIX + "/%s/status?getlatest=false" % edge_id
         return self.do_request(HTTP_GET, uri, decode="True")
@@ -82,16 +130,37 @@ class Vcns(object):
         uri = "%s/%s" % (URI_PREFIX, edge_id)
         return self.do_request(HTTP_DELETE, uri)
 
+    def add_vdr_internal_interface(self, edge_id, interface):
+        uri = "%s/%s/interfaces?action=patch&async=true" % (URI_PREFIX,
+                                                            edge_id)
+        return self.do_request(HTTP_POST, uri, interface, decode=True)
+
+    def update_vdr_internal_interface(
+        self, edge_id, interface_index, interface):
+        uri = "%s/%s/interfaces/%s?async=true" % (URI_PREFIX, edge_id,
+                                                  interface_index)
+        return self.do_request(HTTP_PUT, uri, interface, decode=True)
+
+    def delete_vdr_internal_interface(self, edge_id, interface_index):
+        uri = "%s/%s/interfaces/%d?async=true" % (URI_PREFIX, edge_id,
+                                                  interface_index)
+        return self.do_request(HTTP_DELETE, uri, decode=True)
+
     def update_interface(self, edge_id, vnic):
-        uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index'])
+        uri = "%s/%s/vnics/%d?async=true" % (URI_PREFIX, edge_id,
+                                             vnic['index'])
         return self.do_request(HTTP_PUT, uri, vnic, decode=True)
 
+    def delete_interface(self, edge_id, vnic_index):
+        uri = "%s/%s/vnics/%d?async=true" % (URI_PREFIX, edge_id, vnic_index)
+        return self.do_request(HTTP_DELETE, uri, decode=True)
+
     def get_nat_config(self, edge_id):
         uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id)
         return self.do_request(HTTP_GET, uri, decode=True)
 
     def update_nat_config(self, edge_id, nat):
-        uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id)
+        uri = "%s/%s/nat/config?async=true" % (URI_PREFIX, edge_id)
         return self.do_request(HTTP_PUT, uri, nat, decode=True)
 
     def delete_nat_rule(self, edge_id, rule_id):
@@ -106,8 +175,12 @@ class Vcns(object):
         uri = URI_PREFIX
         return self.do_request(HTTP_GET, uri, decode=True)
 
+    def get_edge_interfaces(self, edge_id):
+        uri = "%s/%s/interfaces" % (URI_PREFIX, edge_id)
+        return self.do_request(HTTP_GET, uri, decode=True)
+
     def update_routes(self, edge_id, routes):
-        uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id)
+        uri = "%s/%s/routing/config/static?async=true" % (URI_PREFIX, edge_id)
         return self.do_request(HTTP_PUT, uri, routes)
 
     def create_lswitch(self, lsconfig):
@@ -129,11 +202,13 @@ class Vcns(object):
     def update_firewall(self, edge_id, fw_req):
         uri = self._build_uri_path(
             edge_id, FIREWALL_SERVICE)
+        uri += '?async=true'
         return self.do_request(HTTP_PUT, uri, fw_req)
 
     def delete_firewall(self, edge_id):
         uri = self._build_uri_path(
             edge_id, FIREWALL_SERVICE, None)
+        uri += '?async=true'
         return self.do_request(HTTP_DELETE, uri)
 
     def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req):
@@ -270,6 +345,25 @@ class Vcns(object):
             app_profileid)
         return self.do_request(HTTP_DELETE, uri)
 
+    def create_app_rule(self, edge_id, app_rule):
+        uri = self._build_uri_path(
+            edge_id, LOADBALANCER_SERVICE,
+            APP_RULE_RESOURCE)
+        return self.do_request(HTTP_POST, uri, app_rule)
+
+    def update_app_rule(self, edge_id, app_ruleid, app_rule):
+        uri = self._build_uri_path(
+            edge_id, LOADBALANCER_SERVICE,
+            APP_RULE_RESOURCE, app_ruleid)
+        return self.do_request(HTTP_PUT, uri, app_rule)
+
+    def delete_app_rule(self, edge_id, app_ruleid):
+        uri = self._build_uri_path(
+            edge_id, LOADBALANCER_SERVICE,
+            APP_RULE_RESOURCE,
+            app_ruleid)
+        return self.do_request(HTTP_DELETE, uri)
+
     def update_ipsec_config(self, edge_id, ipsec_config):
         uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
         return self.do_request(HTTP_PUT, uri, ipsec_config)
@@ -282,6 +376,156 @@ class Vcns(object):
         uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
         return self.do_request(HTTP_GET, uri)
 
+    def create_virtual_wire(self, vdn_scope_id, request):
+        """Creates a VXLAN virtual wire
+
+        The method will return the virtual wire ID.
+        """
+        uri = '/api/2.0/vdn/scopes/%s/virtualwires' % vdn_scope_id
+        return self.do_request(HTTP_POST, uri, request, format='xml',
+                               decode=False)
+
+    def delete_virtual_wire(self, virtualwire_id):
+        """Deletes a virtual wire."""
+        uri = '/api/2.0/vdn/virtualwires/%s' % virtualwire_id
+        return self.do_request(HTTP_DELETE, uri, format='xml')
+
+    def create_port_group(self, dvs_id, request):
+        """Creates a port group on a DVS
+
+        The method will return the port group ID.
+        """
+        uri = '/api/2.0/xvs/switches/%s/networks' % dvs_id
+        return self.do_request(HTTP_POST, uri, request, format='xml',
+                               decode=False)
+
+    def delete_port_group(self, dvs_id, portgroup_id):
+        """Deletes a portgroup."""
+        uri = '/api/2.0/xvs/switches/%s/networks/%s' % (dvs_id,
+                                                        portgroup_id)
+        return self.do_request(HTTP_DELETE, uri, format='xml', decode=False)
+
+    def query_interface(self, edge_id, vnic_index):
+        uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic_index)
+        return self.do_request(HTTP_GET, uri, decode=True)
+
+    def reconfigure_dhcp_service(self, edge_id, request_config):
+        """Reconfigure dhcp static bindings in the created Edge."""
+        uri = "/api/4.0/edges/%s/dhcp/config?async=true" % edge_id
+
+        return self.do_request(HTTP_PUT, uri, request_config)
+
+    def query_dhcp_configuration(self, edge_id):
+        """Query DHCP configuration from the specific edge."""
+        uri = "/api/4.0/edges/%s/dhcp/config" % edge_id
+        return self.do_request(HTTP_GET, uri)
+
+    def create_dhcp_binding(self, edge_id, request_config):
+        """Append one dhcp static binding on the edge."""
+        uri = self._build_uri_path(edge_id,
+                                   DHCP_SERVICE, DHCP_BINDING_RESOURCE,
+                                   is_async=True)
+        return self.do_request(HTTP_POST, uri, request_config, decode=False)
+
+    def delete_dhcp_binding(self, edge_id, binding_id):
+        """Delete one dhcp static binding on the edge."""
+        uri = self._build_uri_path(edge_id,
+                                   DHCP_SERVICE, DHCP_BINDING_RESOURCE,
+                                   binding_id, is_async=True)
+        return self.do_request(HTTP_DELETE, uri, decode=False)
+
+    def create_security_group(self, request):
+        """Creates a security group container in nsx.
+
+        The method will return the security group ID.
+        """
+        uri = '%s/globalroot-0' % (SECURITYGROUP_PREFIX)
+        return self.do_request(HTTP_POST, uri, request, format='xml',
+                               decode=False)
+
+    def delete_security_group(self, securitygroup_id):
+        """Deletes a security group container."""
+        uri = '%s/%s?force=true' % (SECURITYGROUP_PREFIX, securitygroup_id)
+        return self.do_request(HTTP_DELETE, uri, format='xml', decode=False)
+
+    def create_section(self, type, request):
+        """Creates a layer 3 or layer 2 section in nsx rule table.
+
+        The method will return the uri to newly created section.
+        """
+        if type == 'ip':
+            sec_type = 'layer3sections'
+        else:
+            sec_type = 'layer2sections'
+        uri = '%s/%s?autoSaveDraft=false' % (FIREWALL_PREFIX, sec_type)
+        return self.do_request(HTTP_POST, uri, request, format='xml',
+                               decode=False, encode=False)
+
+    def update_section(self, section_uri, request, h):
+        """Replaces a section in nsx rule table."""
+        uri = '%s?autoSaveDraft=false' % section_uri
+        headers = self._get_section_header(section_uri, h)
+        return self.do_request(HTTP_PUT, uri, request, format='xml',
+                               decode=False, encode=False, headers=headers)
+
+    def delete_section(self, section_uri):
+        """Deletes a section in nsx rule table."""
+        uri = '%s?autoSaveDraft=false' % section_uri
+        return self.do_request(HTTP_DELETE, uri, format='xml', decode=False)
+
+    def get_section(self, section_uri):
+        return self.do_request(HTTP_GET, section_uri, format='xml',
+                               decode=False)
+
+    def get_section_id(self, section_name):
+        """Retrieve the id of a section from nsx."""
+        uri = FIREWALL_PREFIX
+        h, section_list = self.do_request(HTTP_GET, uri, decode=False,
+                                          format='xml')
+
+        root = et.fromstring(section_list)
+
+        for sec in root.iter('section'):
+            if sec.attrib['name'] == section_name:
+                return sec.attrib['id']
+
+    def update_section_by_id(self, id, type, request):
+        """Update a section while building its uri from the id."""
+        if type == 'ip':
+            sec_type = 'layer3sections'
+        else:
+            sec_type = 'layer2sections'
+        section_uri = '%s/%s/%s' % (FIREWALL_PREFIX, sec_type, id)
+        self.update_section(section_uri, request, h=None)
+
+    def _get_section_header(self, section_uri, h=None):
+        if h is None:
+            h, c = self.do_request(HTTP_GET, section_uri, format='xml',
+                                   decode=False)
+        etag = h['etag']
+        headers = {'If-Match': etag}
+        return headers
+
+    def remove_rule_from_section(self, section_uri, rule_id):
+        """Deletes a rule from nsx section table."""
+        uri = '%s/rules/%s?autoSaveDraft=false' % (section_uri, rule_id)
+        headers = self._get_section_header(section_uri)
+        return self.do_request(HTTP_DELETE, uri, format='xml',
+                               headers=headers)
+
+    def add_member_to_security_group(self, security_group_id, member_id):
+        """Adds a vnic member to nsx security group."""
+        uri = '%s/%s/members/%s' % (SECURITYGROUP_PREFIX,
+                                    security_group_id, member_id)
+        return self.do_request(HTTP_PUT, uri, format='xml', decode=False)
+
+    def remove_member_from_security_group(self, security_group_id,
+                                          member_id):
+        """Removes a vnic member from nsx security group."""
+        uri = '%s/%s/members/%s' % (SECURITYGROUP_PREFIX,
+                                    security_group_id, member_id)
+        return self.do_request(HTTP_DELETE, uri, format='xml', decode=False)
+
     def _build_uri_path(self, edge_id,
                         service,
                         resource=None,
@@ -291,13 +535,61 @@ class Vcns(object):
                         relations=None,
                         filters=None,
                         types=None,
-                        is_attachment=False):
+                        is_attachment=False,
+                        is_async=False):
         uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service)
         if resource:
-            res_path = resource
-            if resource_id:
-                res_path += "/%s" % resource_id
+            res_path = resource + (resource_id and "/%s" % resource_id or '')
             uri_path = "%s/%s" % (uri_prefix, res_path)
         else:
             uri_path = uri_prefix
-        return uri_path
+        if is_async:
+            return (uri_path + "?async=true")
+        else:
+            return uri_path
+
+    def _scopingobjects_lookup(self, type_name, object_id):
+        uri = '%s/usermgmt/scopingobjects' % SERVICES_PREFIX
+        h, so_list = self.do_request(HTTP_GET, uri, decode=False,
+                                     format='xml')
+
+        root = et.fromstring(so_list)
+        for obj in root.iter('object'):
+            if (obj.find('objectTypeName').text == type_name and
+                    obj.find('objectId').text == object_id):
+                return True
+
+        return False
+
+    def validate_datacenter_moid(self, object_id):
+        return self._scopingobjects_lookup('Datacenter', object_id)
+
+    def validate_network(self, object_id):
+        return (self._scopingobjects_lookup('Network', object_id) or
+                self._scopingobjects_lookup('DistributedVirtualPortgroup',
+                                            object_id) or
+                self._scopingobjects_lookup('VirtualWire', object_id))
+
+    def validate_vdn_scope(self, object_id):
+        uri = '%s/scopes' % VDN_PREFIX
+        h, scope_list = self.do_request(HTTP_GET, uri, decode=False,
+                                        format='xml')
+
+        root = et.fromstring(scope_list)
+        for obj_id in root.iter('objectId'):
+            if obj_id.text == object_id:
+                return True
+
+        return False
+
+    def validate_dvs(self, object_id):
+        uri = '%s/switches' % VDN_PREFIX
+        h, dvs_list = self.do_request(HTTP_GET, uri, decode=False,
+                                      format='xml')
+
+        root = et.fromstring(dvs_list)
+        for obj_id in root.iter('objectId'):
+            if obj_id.text == object_id:
+                return True
+
+        return False
diff --git a/vmware_nsx/neutron/plugins/vmware/vshield/vcns_driver.py b/vmware_nsx/neutron/plugins/vmware/vshield/vcns_driver.py
new file mode 100644
index 0000000000..e3550a3cc2
--- /dev/null
+++ b/vmware_nsx/neutron/plugins/vmware/vshield/vcns_driver.py
@@ -0,0 +1,47 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 VMware, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo.config import cfg
+
+from neutron.openstack.common import log as logging
+from vmware_nsx.neutron.plugins.vmware.common import config  # noqa
+from vmware_nsx.neutron.plugins.vmware.vshield import edge_appliance_driver
+from vmware_nsx.neutron.plugins.vmware.vshield import edge_firewall_driver
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks
+from vmware_nsx.neutron.plugins.vmware.vshield import vcns
+
+LOG = logging.getLogger(__name__)
+
+
+class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver,
+                 edge_firewall_driver.EdgeFirewallDriver):
+
+    def __init__(self, callbacks):
+        super(VcnsDriver, self).__init__()
+
+        self.callbacks = callbacks
+        self.vcns_uri = cfg.CONF.nsxv.manager_uri
+        self.vcns_user = cfg.CONF.nsxv.user
+        self.vcns_passwd = cfg.CONF.nsxv.password
+        self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid
+        self.deployment_container_id = cfg.CONF.nsxv.deployment_container_id
+        self.resource_pool_id = cfg.CONF.nsxv.resource_pool_id
+        self.datastore_id = cfg.CONF.nsxv.datastore_id
+        self.external_network = cfg.CONF.nsxv.external_network
+        interval = cfg.CONF.nsxv.task_status_check_interval
+        self.task_manager = tasks.TaskManager(interval)
+        self.task_manager.start()
+        self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd)
diff --git a/vmware_nsx/neutron/tests/unit/vmware/__init__.py b/vmware_nsx/neutron/tests/unit/vmware/__init__.py
index e0c4b0f9e7..bdd17ae422 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/__init__.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/__init__.py
@@ -16,13 +16,15 @@
 
 import os
 
-from neutron.plugins.vmware.api_client import client as nsx_client
-from neutron.plugins.vmware.api_client import eventlet_client
-from neutron.plugins.vmware import extensions
-import neutron.plugins.vmware.plugin as neutron_plugin
-from neutron.plugins.vmware.vshield.common import VcnsApiClient as vcnsapi
-from neutron.plugins.vmware.vshield import vcns
-import neutron.plugins.vmware.vshield.vcns_driver as vcnsdriver
+from vmware_nsx.neutron.plugins.vmware.api_client import client as nsx_client
+from vmware_nsx.neutron.plugins.vmware.api_client import eventlet_client
+from vmware_nsx.neutron.plugins.vmware import extensions
+import vmware_nsx.neutron.plugins.vmware.plugin as neutron_plugin
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
+    VcnsApiClient as vcnsapi)
+from vmware_nsx.neutron.plugins.vmware.vshield import edge_utils
+from vmware_nsx.neutron.plugins.vmware.vshield import vcns
+import vmware_nsx.neutron.plugins.vmware.vshield.vcns_driver as vcnsdriver
 
 
 plugin = neutron_plugin.NsxPlugin
@@ -31,6 +33,7 @@ evt_client = eventlet_client.EventletApiClient
 vcns_class = vcns.Vcns
 vcns_driver = vcnsdriver.VcnsDriver
 vcns_api_helper = vcnsapi.VcnsApiHelper
+edge_manage_class = edge_utils.EdgeManager
 
 STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc')
 NSXEXT_PATH = os.path.dirname(extensions.__file__)
@@ -40,6 +43,8 @@ CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__)
 VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__)
 VCNS_DRIVER_NAME = '%s.%s' % (vcns_driver.__module__, vcns_driver.__name__)
 VCNSAPI_NAME = '%s.%s' % (vcns_api_helper.__module__, vcns_api_helper.__name__)
+EDGE_MANAGE_NAME = '%s.%s' % (edge_manage_class.__module__,
+                              edge_manage_class.__name__)
 
 
 def get_fake_conf(filename):
@@ -47,4 +52,5 @@ def get_fake_conf(filename):
 
 
 def nsx_method(method_name, module_name='nsxlib'):
-    return '%s.%s.%s' % ('neutron.plugins.vmware', module_name, method_name)
+    return '%s.%s.%s' % ('vmware_nsx.neutron.plugins.vmware', module_name,
+                         method_name)
diff --git a/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_common.py b/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_common.py
index 5ea40d0d8e..3b6e57ad4e 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_common.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_common.py
@@ -16,8 +16,8 @@
 
 import httplib
 
-from neutron.plugins.vmware import api_client
 from neutron.tests import base
+from vmware_nsx.neutron.plugins.vmware import api_client
 
 
 class ApiCommonTest(base.BaseTestCase):
diff --git a/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py b/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py
index bce6666681..63cc4024a8 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py
@@ -21,9 +21,11 @@ import mock
 
 from neutron.i18n import _LI
 from neutron.openstack.common import log as logging
-from neutron.plugins.vmware.api_client import eventlet_client as client
-from neutron.plugins.vmware.api_client import eventlet_request as request
 from neutron.tests import base
+from vmware_nsx.neutron.plugins.vmware.api_client import (
+    eventlet_client as client)
+from vmware_nsx.neutron.plugins.vmware.api_client import (
+    eventlet_request as request)
 from vmware_nsx.neutron.tests.unit import vmware
 
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/db/test_nsx_db.py b/vmware_nsx/neutron/tests/unit/vmware/db/test_nsx_db.py
index f4078a4694..7b23a2bf15 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/db/test_nsx_db.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/db/test_nsx_db.py
@@ -17,9 +17,9 @@ from oslo.db import exception as d_exc
 
 from neutron import context
 from neutron.db import models_v2
-from neutron.plugins.vmware.dbexts import db as nsx_db
 from neutron.plugins.vmware.dbexts import models
 from neutron.tests.unit import testlib_api
+from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
 
 
 class NsxDBTestCase(testlib_api.SqlTestCase):
diff --git a/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.test b/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.test
index 1bb959be33..5d41b909b7 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.test
+++ b/vmware_nsx/neutron/tests/unit/vmware/etc/nsx.ini.test
@@ -5,3 +5,10 @@ nsx_user=foo
 nsx_password=bar
 default_l3_gw_service_uuid = whatever
 default_l2_gw_service_uuid = whatever
+
+[nsxv]
+manager_uri = https://fake_manager
+user = fake_user
+password = fake_password
+vdn_scope_id = fake_vdn_scope_id
+dvs_id = fake_dvs_id
diff --git a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py
index 89b0ae45fc..50352e2204 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_maclearning.py
@@ -22,9 +22,9 @@ from neutron.api.v2 import attributes
 from neutron.common import test_lib
 from neutron import context
 from neutron.extensions import agent
-from neutron.plugins.vmware.api_client import version
-from neutron.plugins.vmware.common import sync
 from neutron.tests.unit import test_db_plugin
+from vmware_nsx.neutron.plugins.vmware.api_client import version
+from vmware_nsx.neutron.plugins.vmware.common import sync
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py
index 6f8bd09486..ae18ff0281 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_networkgw.py
@@ -28,15 +28,15 @@ from neutron import manager
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
 from neutron.plugins.vmware.dbexts import networkgw_db
-from neutron.plugins.vmware.extensions import networkgw
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
 from neutron import quota
 from neutron.tests import base
 from neutron.tests.unit import test_api_v2
 from neutron.tests.unit import test_db_plugin
 from neutron.tests.unit import test_extensions
 from neutron.tests.unit import testlib_plugin
+from vmware_nsx.neutron.plugins.vmware.extensions import networkgw
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware import test_nsx_plugin
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py
index 6be1248f6e..b488d8cd87 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_portsecurity.py
@@ -16,8 +16,8 @@
 import mock
 
 from neutron.common import test_lib
-from neutron.plugins.vmware.common import sync
 from neutron.tests.unit import test_extension_portsecurity as psec
+from vmware_nsx.neutron.plugins.vmware.common import sync
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py
index 12225a25f6..a1f30f6509 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_qosqueues.py
@@ -21,9 +21,9 @@ import webob.exc
 
 from neutron import context
 from neutron.plugins.vmware.dbexts import qos_db
-from neutron.plugins.vmware.extensions import qos as ext_qos
-from neutron.plugins.vmware import nsxlib
 from neutron.tests.unit import test_extensions
+from vmware_nsx.neutron.plugins.vmware.extensions import qos as ext_qos
+from vmware_nsx.neutron.plugins.vmware import nsxlib
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware import test_nsx_plugin
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/extensions/test_vnic_index.py b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_vnic_index.py
new file mode 100644
index 0000000000..b26b633c5e
--- /dev/null
+++ b/vmware_nsx/neutron/tests/unit/vmware/extensions/test_vnic_index.py
@@ -0,0 +1,108 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo.config import cfg
+from oslo.db import exception as d_exc
+
+from neutron.api.v2 import attributes as attr
+from neutron import context as neutron_context
+from neutron.db import db_base_plugin_v2
+from neutron import manager
+from neutron.openstack.common import uuidutils
+from neutron.tests.unit import test_db_plugin
+from vmware_nsx.neutron.plugins.vmware.dbexts import vnic_index_db
+from vmware_nsx.neutron.plugins.vmware.extensions import vnic_index as vnicidx
+from vmware_nsx.neutron.tests.unit import vmware
+
+
+DB_PLUGIN_KLASS = ('vmware_nsx.neutron.tests.unit.vmware.extensions.'
+                   'test_vnic_index.VnicIndexTestPlugin')
+
+_uuid = uuidutils.generate_uuid
+
+
+class VnicIndexTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
+                          vnic_index_db.VnicIndexDbMixin):
+
+    supported_extension_aliases = ["vnic-index"]
+
+    def update_port(self, context, id, port):
+        p = port['port']
+        current_port = super(VnicIndexTestPlugin, self).get_port(context, id)
+        vnic_idx = p.get(vnicidx.VNIC_INDEX)
+        device_id = current_port['device_id']
+        if attr.is_attr_set(vnic_idx) and device_id != '':
+            self._set_port_vnic_index_mapping(
+                context, id, device_id, vnic_idx)
+
+        with context.session.begin(subtransactions=True):
+            p = port['port']
+            ret_port = super(VnicIndexTestPlugin, self).update_port(
+                context, id, port)
+            vnic_idx = current_port.get(vnicidx.VNIC_INDEX)
+            if (attr.is_attr_set(vnic_idx) and
+                device_id != ret_port['device_id']):
+                self._delete_port_vnic_index_mapping(
+                    context, id)
+        return ret_port
+
+    def delete_port(self, context, id):
+        port_db = self.get_port(context, id)
+        vnic_idx = port_db.get(vnicidx.VNIC_INDEX)
+        if attr.is_attr_set(vnic_idx):
+            self._delete_port_vnic_index_mapping(context, id)
+        with context.session.begin(subtransactions=True):
+            super(VnicIndexTestPlugin, self).delete_port(context, id)
+
+
+class VnicIndexDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
+    def setUp(self, plugin=None, ext_mgr=None):
+        plugin = plugin or DB_PLUGIN_KLASS
+        cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
+        super(VnicIndexDbTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
+
+    def _port_index_update(self, port_id, index):
+        data = {'port': {'vnic_index': index}}
+        req = self.new_update_request('ports', data, port_id)
+        res = self.deserialize('json', req.get_response(self.api))
+        return res
+
+    def test_vnic_index_db(self):
+        plugin = manager.NeutronManager.get_plugin()
+        vnic_index = 2
+        device_id = _uuid()
+        context = neutron_context.get_admin_context()
+        with self.port(device_id=device_id,
+                       device_owner='compute:None') as port:
+            port_id = port['port']['id']
+            res = self._port_index_update(port_id, vnic_index)
+            self.assertEqual(res['port'][vnicidx.VNIC_INDEX], vnic_index)
+            # Port should be associated with at most one vnic index
+            self.assertRaises(d_exc.DBDuplicateEntry,
+                              plugin._set_port_vnic_index_mapping,
+                              context, port_id, device_id, 1)
+            # Only one Port can be associated with a specific index on a device
+            self.assertRaises(d_exc.DBDuplicateEntry,
+                              plugin._set_port_vnic_index_mapping,
+                              context, _uuid(), device_id, vnic_index)
+        # Check that the call for _delete_port_vnic_index remove the row from
+        # the table
+
+        # TODO(kobis): deletion was removed from port - fix this assert
+        # self.assertIsNone(plugin._get_port_vnic_index(context, port_id))
+
+
+class TestVnicIndex(VnicIndexDbTestCase):
+    pass
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsx_v/__init__.py b/vmware_nsx/neutron/tests/unit/vmware/nsx_v/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsx_v/test_nsxv_loadbalancer.py b/vmware_nsx/neutron/tests/unit/vmware/nsx_v/test_nsxv_loadbalancer.py
new file mode 100644
index 0000000000..8e7fb8fb92
--- /dev/null
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsx_v/test_nsxv_loadbalancer.py
@@ -0,0 +1,95 @@
+# Copyright 2014 VMware, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import json
+
+import mock
+
+from neutron.tests import base
+from vmware_nsx.neutron.plugins.vmware.vshield import nsxv_loadbalancer
+from vmware_nsx.neutron.plugins.vmware.vshield import vcns
+
+
+class NsxvLoadbalancerTestCase(base.BaseTestCase):
+
+    EDGE_OBJ_JSON = (
+        '{"accelerationEnabled":false,"applicationProfile":[{'
+        '"applicationProfileId":"applicationProfile-1","insertXForwardedFor":'
+        'false,"name":"MDSrvProxy","persistence":{"cookieMode":"insert",'
+        '"cookieName":"JSESSIONID","expire":"30","method":"cookie"},'
+        '"serverSslEnabled":false,"sslPassthrough":false,"template":"HTTP"}],'
+        '"applicationRule":[],"enableServiceInsertion":false,"enabled":true,'
+        '"featureType":"loadbalancer_4.0","logging":{"enable":false,'
+        '"logLevel":"info"},"monitor":[{"interval":10,"maxRetries":3,"method":'
+        '"GET","monitorId":"monitor-1","name":"MDSrvMon","timeout":15,"type":'
+        '"http","url":"/"}],"pool":[{"algorithm":"round-robin",'
+        '"applicationRuleId":[],"member":[{"condition":"enabled","ipAddress":'
+        '"192.168.0.39","maxConn":0,"memberId":"member-1","minConn":0,'
+        '"monitorPort":8775,"name":"Member-1","port":8775,"weight":1}],'
+        '"monitorId":["monitor-1"],"name":"MDSrvPool","poolId":"pool-1",'
+        '"transparent":false}],"version":6,"virtualServer":[{'
+        '"accelerationEnabled":false,"applicationProfileId":'
+        '"applicationProfile-1","applicationRuleId":[],"connectionLimit":0,'
+        '"defaultPoolId":"pool-1","enableServiceInsertion":false,'
+        '"enabled":true,"ipAddress":"169.254.0.3","name":"MdSrv",'
+        '"port":"8775","protocol":"http","virtualServerId":'
+        '"virtualServer-1"}]}')
+
+    OUT_OBJ_JSON = (
+        '{"accelerationEnabled": false, "applicationProfile": [{'
+        '"applicationProfileId": "applicationProfile-1", '
+        '"insertXForwardedFor": false, "name": "MDSrvProxy", "persistence": '
+        '{"expire": "30", "method": "cookie"}, "serverSslEnabled": false, '
+        '"sslPassthrough": false, "template": "HTTP"}],'
+        ' "enableServiceInsertion": false, "enabled": true, "featureType": '
+        '"loadbalancer_4.0", "monitor": [{"interval": 10, "maxRetries": 3, '
+        '"method": "GET", "monitorId": "monitor-1", "name": "MDSrvMon", '
+        '"timeout": 15, "type": "http", "url": "/"}], "pool": [{"algorithm":'
+        ' "round-robin", "member": [{"condition": "enabled", "ipAddress": '
+        '"192.168.0.39", "maxConn": 0, "memberId": "member-1", "minConn": 0, '
+        '"monitorPort": 8775, "name": "Member-1", "port": 8775, "weight": 1}],'
+        ' "monitorId": ["monitor-1"], "name": "MDSrvPool", "poolId": "pool-1",'
+        ' "transparent": false}], "virtualServer": [{"accelerationEnabled": '
+        'false, "applicationProfileId": "applicationProfile-1", '
+        '"connectionLimit": 0, "defaultPoolId": "pool-1", '
+        '"enableServiceInsertion": false, "enabled": true, "ipAddress": '
+        '"169.254.0.3", "name": "MdSrv", "port": "8775", "protocol": '
+        '"http", "virtualServerId": "virtualServer-1"}]}')
+
+    LB_URI = '/api/4.0/edges/%s/loadbalancer/config?async=true'
+    EDGE_1 = 'edge-x'
+    EDGE_2 = 'edge-y'
+
+    def setUp(self):
+        super(NsxvLoadbalancerTestCase, self).setUp()
+        self._lb = nsxv_loadbalancer.NsxvLoadbalancer()
+        self._vcns = vcns.Vcns(None, None, None)
+
+    def test_get_edge_loadbalancer(self):
+        h = None
+        v = json.loads(self.EDGE_OBJ_JSON)
+
+        with mock.patch.object(self._vcns, 'do_request',
+                               return_value=(h, v)) as mock_do_request:
+            lb = nsxv_loadbalancer.NsxvLoadbalancer.get_loadbalancer(
+                self._vcns, self.EDGE_1)
+            lb.submit_to_backend(self._vcns, self.EDGE_2)
+
+            mock_do_request.assert_called_with(
+                vcns.HTTP_PUT,
+                self.LB_URI % self.EDGE_2,
+                self.OUT_OBJ_JSON,
+                format='json',
+                encode=False)
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/base.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/base.py
index 1882f6d0b0..a6e5656fb5 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/base.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/base.py
@@ -16,13 +16,13 @@
 
 import mock
 
-from neutron.plugins.vmware.api_client import client
 from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.api_client import version
-from neutron.plugins.vmware.common import config  # noqa
-from neutron.plugins.vmware import nsx_cluster as cluster
 from neutron.tests import base
 from neutron.tests.unit import test_api_v2
+from vmware_nsx.neutron.plugins.vmware.api_client import client
+from vmware_nsx.neutron.plugins.vmware.api_client import version
+from vmware_nsx.neutron.plugins.vmware.common import config  # noqa
+from vmware_nsx.neutron.plugins.vmware import nsx_cluster as cluster
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py
index ede517fc3a..0d99a37011 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_l2gateway.py
@@ -18,11 +18,11 @@ import mock
 from oslo.serialization import jsonutils
 
 from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.common import utils as nsx_utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
 from neutron.tests.unit import test_api_v2
+from vmware_nsx.neutron.plugins.vmware.common import utils as nsx_utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
 from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
 
 _uuid = test_api_v2._uuid
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_lsn.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_lsn.py
index c1b2c71466..29a935e6d7 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_lsn.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_lsn.py
@@ -19,9 +19,9 @@ from oslo.serialization import jsonutils
 from neutron.common import exceptions
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.nsxlib import lsn as lsnlib
 from neutron.tests import base
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware.nsxlib import lsn as lsnlib
 
 
 class LSNTestCase(base.BaseTestCase):
@@ -29,7 +29,7 @@ class LSNTestCase(base.BaseTestCase):
     def setUp(self):
         super(LSNTestCase, self).setUp()
         self.mock_request_p = mock.patch(
-            'neutron.plugins.vmware.nsxlib.do_request')
+            'vmware_nsx.neutron.plugins.vmware.nsxlib.do_request')
         self.mock_request = self.mock_request_p.start()
         self.cluster = mock.Mock()
         self.cluster.default_service_cluster_uuid = 'foo'
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py
index d62cb0c607..9b1fb0192b 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_queue.py
@@ -18,8 +18,8 @@ import mock
 
 from neutron.common import exceptions
 from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import queue as queuelib
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import queue as queuelib
 from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
 
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_router.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_router.py
index 217d0fa362..b367d8a4e6 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_router.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_router.py
@@ -21,13 +21,14 @@ from oslo.config import cfg
 from neutron.common import exceptions
 from neutron.openstack.common import uuidutils
 from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.api_client import version as version_module
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import switch as switchlib
 from neutron.tests.unit import test_api_v2
+from vmware_nsx.neutron.plugins.vmware.api_client import (
+    version as version_module)
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
 from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
 
 _uuid = test_api_v2._uuid
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py
index a9fbe2c275..83493aa467 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_secgroup.py
@@ -15,9 +15,9 @@
 #
 
 from neutron.common import exceptions
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
 from neutron.tests.unit import test_api_v2
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
 from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
 
 _uuid = test_api_v2._uuid
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py
index 31de145c37..ae81d3b8df 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_switch.py
@@ -19,9 +19,9 @@ import mock
 
 from neutron.common import constants
 from neutron.common import exceptions
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.nsxlib import switch as switchlib
 from neutron.tests.unit import test_api_v2
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
 from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
 
 _uuid = test_api_v2._uuid
diff --git a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_versioning.py b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_versioning.py
index a50f94283b..9adea7cce9 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_versioning.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/nsxlib/test_versioning.py
@@ -15,10 +15,11 @@
 #
 
 from neutron.plugins.vmware.api_client import exception
-from neutron.plugins.vmware.api_client import version as version_module
-from neutron.plugins.vmware.nsxlib import router as routerlib
-from neutron.plugins.vmware.nsxlib import versioning
 from neutron.tests import base
+from vmware_nsx.neutron.plugins.vmware.api_client import (
+    version as version_module)
+from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
+from vmware_nsx.neutron.plugins.vmware.nsxlib import versioning
 
 
 class TestVersioning(base.BaseTestCase):
diff --git a/vmware_nsx/neutron/tests/unit/vmware/test_agent_scheduler.py b/vmware_nsx/neutron/tests/unit/vmware/test_agent_scheduler.py
index 6cb0c9ce8b..cf03c590a6 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/test_agent_scheduler.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/test_agent_scheduler.py
@@ -18,9 +18,9 @@ from oslo.config import cfg
 
 from neutron.common import constants
 from neutron.common import test_lib
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware.dhcp_meta import rpc
 from neutron.tests.unit.openvswitch import test_agent_scheduler as test_base
+from vmware_nsx.neutron.plugins.vmware.common import sync
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/test_dhcpmeta.py b/vmware_nsx/neutron/tests/unit/vmware/test_dhcpmeta.py
index d6c519218d..d10f6600cb 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/test_dhcpmeta.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/test_dhcpmeta.py
@@ -23,13 +23,13 @@ from neutron import context
 from neutron.plugins.vmware.api_client import exception
 from neutron.plugins.vmware.common import exceptions as p_exc
 from neutron.plugins.vmware.dbexts import lsn_db
-from neutron.plugins.vmware.dhcp_meta import constants
-from neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man
-from neutron.plugins.vmware.dhcp_meta import migration as mig_man
-from neutron.plugins.vmware.dhcp_meta import nsx
-from neutron.plugins.vmware.dhcp_meta import rpc
 from neutron.tests import base
 from neutron.tests.unit import testlib_api
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import constants
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import migration as mig_man
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import nsx
+from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc
 
 
 class DhcpMetadataBuilderTestCase(base.BaseTestCase):
diff --git a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_opts.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_opts.py
index 300d5a29a1..1ad34ac520 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_opts.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_opts.py
@@ -20,14 +20,14 @@ from oslo.config import cfg
 
 from neutron import manager
 from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.api_client import client
-from neutron.plugins.vmware.api_client import version
-from neutron.plugins.vmware.common import config  # noqa
 from neutron.plugins.vmware.common import exceptions
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware import nsx_cluster
-from neutron.plugins.vmware.nsxlib import lsn as lsnlib
 from neutron.tests import base
+from vmware_nsx.neutron.plugins.vmware.api_client import client
+from vmware_nsx.neutron.plugins.vmware.api_client import version
+from vmware_nsx.neutron.plugins.vmware.common import config  # noqa
+from vmware_nsx.neutron.plugins.vmware.common import sync
+from vmware_nsx.neutron.plugins.vmware import nsx_cluster
+from vmware_nsx.neutron.plugins.vmware.nsxlib import lsn as lsnlib
 from vmware_nsx.neutron.tests.unit import vmware
 
 BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test')
diff --git a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_plugin.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_plugin.py
index 01645ddf36..3c85ec9ba8 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_plugin.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_plugin.py
@@ -39,18 +39,19 @@ from neutron import manager
 from neutron.openstack.common import log
 from neutron.openstack.common import uuidutils
 from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.api_client import version as version_module
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware.common import utils
-from neutron.plugins.vmware.dbexts import db as nsx_db
-from neutron.plugins.vmware import nsxlib
 from neutron.tests.unit import _test_extension_portbindings as test_bindings
 import neutron.tests.unit.test_db_plugin as test_plugin
 import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode
 import neutron.tests.unit.test_extension_security_group as ext_sg
 import neutron.tests.unit.test_l3_plugin as test_l3_plugin
 from neutron.tests.unit import testlib_api
+from vmware_nsx.neutron.plugins.vmware.api_client import (
+    version as version_module)
+from vmware_nsx.neutron.plugins.vmware.common import sync
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
+from vmware_nsx.neutron.plugins.vmware import nsxlib
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_sync.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_sync.py
index 799b60243f..a9ea96e801 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_sync.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_sync.py
@@ -27,17 +27,17 @@ from neutron.common import exceptions as n_exc
 from neutron import context
 from neutron.extensions import l3
 from neutron.openstack.common import log
-from neutron.plugins.vmware.api_client import client
 from neutron.plugins.vmware.api_client import exception as api_exc
-from neutron.plugins.vmware.api_client import version
-from neutron.plugins.vmware.common import sync
-from neutron.plugins.vmware.dbexts import db
-from neutron.plugins.vmware import nsx_cluster as cluster
-from neutron.plugins.vmware import nsxlib
-from neutron.plugins.vmware import plugin
 from neutron.tests import base
 from neutron.tests.unit import test_api_v2
 from neutron.tests.unit import testlib_api
+from vmware_nsx.neutron.plugins.vmware.api_client import client
+from vmware_nsx.neutron.plugins.vmware.api_client import version
+from vmware_nsx.neutron.plugins.vmware.common import sync
+from vmware_nsx.neutron.plugins.vmware.dbexts import db
+from vmware_nsx.neutron.plugins.vmware import nsx_cluster as cluster
+from vmware_nsx.neutron.plugins.vmware import nsxlib
+from vmware_nsx.neutron.plugins.vmware import plugin
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_utils.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_utils.py
index 74e65adfde..59c053f624 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_utils.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_utils.py
@@ -21,11 +21,11 @@ from neutron.extensions import providernet as pnet
 from neutron.openstack.common import uuidutils
 from neutron.plugins.vmware.api_client import exception as api_exc
 from neutron.plugins.vmware.common import exceptions as nsx_exc
-from neutron.plugins.vmware.common import nsx_utils
-from neutron.plugins.vmware.common import utils
 from neutron.plugins.vmware.dbexts import models
-from neutron.plugins.vmware import nsxlib
 from neutron.tests import base
+from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
+from vmware_nsx.neutron.plugins.vmware.common import utils
+from vmware_nsx.neutron.plugins.vmware import nsxlib
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base as nsx_base
 
diff --git a/vmware_nsx/neutron/tests/unit/vmware/test_nsx_v_plugin.py b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_v_plugin.py
new file mode 100644
index 0000000000..431e2cadf6
--- /dev/null
+++ b/vmware_nsx/neutron/tests/unit/vmware/test_nsx_v_plugin.py
@@ -0,0 +1,1614 @@
+# Copyright (c) 2012 OpenStack Foundation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import contextlib
+from eventlet import greenthread
+import mock
+from oslo.config import cfg
+import webob.exc
+
+from neutron.api.v2 import attributes
+from neutron.common import constants
+from neutron.common import exceptions as n_exc
+from neutron.common import ipv6_utils
+import neutron.common.test_lib as test_lib
+from neutron import context
+from neutron.extensions import external_net
+from neutron.extensions import l3
+from neutron.extensions import l3_ext_gw_mode
+from neutron.extensions import portbindings
+from neutron.extensions import providernet as pnet
+from neutron.extensions import securitygroup as secgrp
+from neutron import manager
+from neutron.openstack.common import uuidutils
+from neutron.tests.unit import _test_extension_portbindings as test_bindings
+import neutron.tests.unit.test_db_plugin as test_plugin
+import neutron.tests.unit.test_extension_allowedaddresspairs as test_addr_pair
+import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode
+import neutron.tests.unit.test_extension_security_group as ext_sg
+import neutron.tests.unit.test_l3_plugin as test_l3_plugin
+from neutron.tests.unit import testlib_api
+from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
+from vmware_nsx.neutron.plugins.vmware.extensions import (
+    distributedrouter as dist_router)
+from vmware_nsx.neutron.plugins.vmware.extensions import (
+    vnic_index as ext_vnic_idx)
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
+    constants as vcns_const)
+from vmware_nsx.neutron.plugins.vmware.vshield import edge_utils
+from vmware_nsx.neutron.tests.unit import vmware
+from vmware_nsx.neutron.tests.unit.vmware.extensions import test_vnic_index
+from vmware_nsx.neutron.tests.unit.vmware.vshield import fake_vcns
+
+PLUGIN_NAME = 'vmware_nsx.neutron.plugins.vmware.plugin.NsxVPlugin'
+
+_uuid = uuidutils.generate_uuid
+
+
+class NsxVPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
+
+    def _create_network(self, fmt, name, admin_state_up,
+                        arg_list=None, providernet_args=None, **kwargs):
+        data = {'network': {'name': name,
+                            'admin_state_up': admin_state_up,
+                            'tenant_id': self._tenant_id}}
+        # Fix to allow the router:external attribute and any other
+        # attributes containing a colon to be passed with
+        # a double underscore instead
+        kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items())
+        if external_net.EXTERNAL in kwargs:
+            arg_list = (external_net.EXTERNAL, ) + (arg_list or ())
+
+        attrs = kwargs
+        if providernet_args:
+            attrs.update(providernet_args)
+        for arg in (('admin_state_up', 'tenant_id', 'shared') +
+                    (arg_list or ())):
+            # Arg must be present and not empty
+            if arg in kwargs and kwargs[arg]:
+                data['network'][arg] = kwargs[arg]
+        network_req = self.new_create_request('networks', data, fmt)
+        if (kwargs.get('set_context') and 'tenant_id' in kwargs):
+            # create a specific auth context for this request
+            network_req.environ['neutron.context'] = context.Context(
+                '', kwargs['tenant_id'])
+        return network_req.get_response(self.api)
+
+    def setUp(self,
+              plugin=PLUGIN_NAME,
+              ext_mgr=None,
+              service_plugins=None):
+        test_lib.test_config['config_files'] = [
+            vmware.get_fake_conf('nsx.ini.test')]
+        mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True)
+        mock_vcns_instance = mock_vcns.start()
+        self.fc2 = fake_vcns.FakeVcns()
+        mock_vcns_instance.return_value = self.fc2
+        edge_utils.query_dhcp_service_config = mock.Mock(return_value=[])
+        self.mock_create_dhcp_service = mock.patch("%s.%s" % (
+            vmware.EDGE_MANAGE_NAME, 'create_dhcp_edge_service'))
+        self.mock_create_dhcp_service.start()
+        mock_update_dhcp_service = mock.patch("%s.%s" % (
+            vmware.EDGE_MANAGE_NAME, 'update_dhcp_edge_service'))
+        mock_update_dhcp_service.start()
+        mock_delete_dhcp_service = mock.patch("%s.%s" % (
+            vmware.EDGE_MANAGE_NAME, 'delete_dhcp_edge_service'))
+        mock_delete_dhcp_service.start()
+        super(NsxVPluginV2TestCase, self).setUp(plugin=plugin,
+                                                ext_mgr=ext_mgr)
+        self.addCleanup(self.fc2.reset_all)
+
+    def test_get_vlan_network_name(self):
+        p = manager.NeutronManager.get_plugin()
+        id = uuidutils.generate_uuid()
+        net = {'name': '',
+               'id': id}
+        expected = id
+        self.assertEqual(expected,
+                         p._get_vlan_network_name(net))
+        net = {'name': 'pele',
+               'id': id}
+        expected = '%s-%s' % ('pele', id)
+        self.assertEqual(expected,
+                         p._get_vlan_network_name(net))
+        name = 'X' * 500
+        net = {'name': name,
+               'id': id}
+        expected = '%s-%s' % (name[:43], id)
+        self.assertEqual(expected,
+                         p._get_vlan_network_name(net))
+
+
+class TestNetworksV2(test_plugin.TestNetworksV2, NsxVPluginV2TestCase):
+
+    def _test_create_bridge_network(self, vlan_id=0):
+        net_type = vlan_id and 'vlan' or 'flat'
+        name = 'bridge_net'
+        expected = [('subnets', []), ('name', name), ('admin_state_up', True),
+                    ('status', 'ACTIVE'), ('shared', False),
+                    (pnet.NETWORK_TYPE, net_type),
+                    (pnet.PHYSICAL_NETWORK, 'tzuuid'),
+                    (pnet.SEGMENTATION_ID, vlan_id)]
+        providernet_args = {pnet.NETWORK_TYPE: net_type,
+                            pnet.PHYSICAL_NETWORK: 'tzuuid'}
+        if vlan_id:
+            providernet_args[pnet.SEGMENTATION_ID] = vlan_id
+        with self.network(name=name,
+                          providernet_args=providernet_args,
+                          arg_list=(pnet.NETWORK_TYPE,
+                                    pnet.PHYSICAL_NETWORK,
+                                    pnet.SEGMENTATION_ID)) as net:
+            for k, v in expected:
+                self.assertEqual(net['network'][k], v)
+
+    def test_create_bridge_network(self):
+        self._test_create_bridge_network()
+
+    def test_create_bridge_vlan_network(self):
+        self._test_create_bridge_network(vlan_id=123)
+
+    def test_create_bridge_vlan_network_outofrange_returns_400(self):
+        with testlib_api.ExpectedException(
+                webob.exc.HTTPClientError) as ctx_manager:
+            self._test_create_bridge_network(vlan_id=5000)
+        self.assertEqual(ctx_manager.exception.code, 400)
+
+    def test_create_external_portgroup_network(self):
+        name = 'ext_net'
+        expected = [('subnets', []), ('name', name), ('admin_state_up', True),
+                    ('status', 'ACTIVE'), ('shared', False),
+                    (external_net.EXTERNAL, True),
+                    (pnet.NETWORK_TYPE, 'portgroup'),
+                    (pnet.PHYSICAL_NETWORK, 'tzuuid')]
+        providernet_args = {pnet.NETWORK_TYPE: 'portgroup',
+                            pnet.PHYSICAL_NETWORK: 'tzuuid',
+                            external_net.EXTERNAL: True}
+        with self.network(name=name,
+                          providernet_args=providernet_args,
+                          arg_list=(pnet.NETWORK_TYPE,
+                                    pnet.PHYSICAL_NETWORK,
+                                    external_net.EXTERNAL)) as net:
+            for k, v in expected:
+                self.assertEqual(net['network'][k], v)
+
+    def test_delete_network_after_removing_subnet(self):
+        gateway_ip = '10.0.0.1'
+        cidr = '10.0.0.0/24'
+        fmt = 'json'
+        # Create new network
+        res = self._create_network(fmt=fmt, name='net',
+                                   admin_state_up=True)
+        network = self.deserialize(fmt, res)
+        subnet = self._make_subnet(fmt, network, gateway_ip,
+                                   cidr, ip_version=4)
+        req = self.new_delete_request('subnets', subnet['subnet']['id'])
+        sub_del_res = req.get_response(self.api)
+        self.assertEqual(sub_del_res.status_int, 204)
+        req = self.new_delete_request('networks', network['network']['id'])
+        net_del_res = req.get_response(self.api)
+        self.assertEqual(net_del_res.status_int, 204)
+
+    def test_list_networks_with_shared(self):
+        with self.network(name='net1'):
+            with self.network(name='net2', shared=True):
+                req = self.new_list_request('networks')
+                res = self.deserialize('json', req.get_response(self.api))
+                self.assertEqual(len(res['networks']), 2)
+                req_2 = self.new_list_request('networks')
+                req_2.environ['neutron.context'] = context.Context('',
+                                                                   'somebody')
+                res = self.deserialize('json', req_2.get_response(self.api))
+                # tenant must see a single network
+                self.assertEqual(len(res['networks']), 1)
+
+    def test_create_network_name_exceeds_40_chars(self):
+        name = 'this_is_a_network_whose_name_is_longer_than_40_chars'
+        with self.network(name=name) as net:
+            # Assert neutron name is not truncated
+            self.assertEqual(net['network']['name'], name)
+
+    def test_update_network_with_admin_false(self):
+        data = {'network': {'admin_state_up': False}}
+        with self.network() as net:
+            plugin = manager.NeutronManager.get_plugin()
+            self.assertRaises(NotImplementedError,
+                              plugin.update_network,
+                              context.get_admin_context(),
+                              net['network']['id'], data)
+
+    def test_create_extend_dvs_provider_network(self):
+        name = 'provider_net'
+        expected = [('subnets', []), ('name', name), ('admin_state_up', True),
+                    ('status', 'ACTIVE'), ('shared', False),
+                    (pnet.NETWORK_TYPE, 'flat'),
+                    (pnet.PHYSICAL_NETWORK, 'dvs-uuid')]
+        providernet_args = {pnet.NETWORK_TYPE: 'flat',
+                            pnet.PHYSICAL_NETWORK: 'dvs-uuid'}
+        with self.network(name=name,
+                          providernet_args=providernet_args,
+                          arg_list=(pnet.NETWORK_TYPE,
+                                    pnet.PHYSICAL_NETWORK)) as net:
+            for k, v in expected:
+                self.assertEqual(net['network'][k], v)
+
+    def test_create_same_vlan_network_with_different_dvs(self):
+        name = 'dvs-provider-net'
+        expected = [('subnets', []), ('name', name), ('admin_state_up', True),
+                    ('status', 'ACTIVE'), ('shared', False),
+                    (pnet.NETWORK_TYPE, 'vlan'),
+                    (pnet.SEGMENTATION_ID, 43),
+                    (pnet.PHYSICAL_NETWORK, 'dvs-uuid-1')]
+        providernet_args = {pnet.NETWORK_TYPE: 'vlan',
+                            pnet.SEGMENTATION_ID: 43,
+                            pnet.PHYSICAL_NETWORK: 'dvs-uuid-1'}
+        with self.network(name=name,
+                          providernet_args=providernet_args,
+                          arg_list=(pnet.NETWORK_TYPE,
+                                    pnet.SEGMENTATION_ID,
+                                    pnet.PHYSICAL_NETWORK)) as net:
+            for k, v in expected:
+                self.assertEqual(net['network'][k], v)
+
+            expected_same_vlan = [(pnet.NETWORK_TYPE, 'vlan'),
+                                  (pnet.SEGMENTATION_ID, 43),
+                                  (pnet.PHYSICAL_NETWORK, 'dvs-uuid-2')]
+            providernet_args_1 = {pnet.NETWORK_TYPE: 'vlan',
+                                  pnet.SEGMENTATION_ID: 43,
+                                  pnet.PHYSICAL_NETWORK: 'dvs-uuid-2'}
+            with self.network(name=name,
+                              providernet_args=providernet_args_1,
+                              arg_list=(pnet.NETWORK_TYPE,
+                                        pnet.SEGMENTATION_ID,
+                                        pnet.PHYSICAL_NETWORK)) as net1:
+                for k, v in expected_same_vlan:
+                    self.assertEqual(net1['network'][k], v)
+
+
+class TestVnicIndex(NsxVPluginV2TestCase,
+                    test_vnic_index.VnicIndexDbTestCase):
+    def test_update_port_twice_with_the_same_index(self):
+        """Tests that updates which does not modify the port vnic
+        index association do not produce any errors
+        """
+        with self.subnet() as subnet:
+            with self.port(subnet=subnet) as port:
+                res = self._port_index_update(port['port']['id'], 2)
+                self.assertEqual(2, res['port'][ext_vnic_idx.VNIC_INDEX])
+                res = self._port_index_update(port['port']['id'], 2)
+                self.assertEqual(2, res['port'][ext_vnic_idx.VNIC_INDEX])
+
+
+class TestPortsV2(NsxVPluginV2TestCase,
+                  test_plugin.TestPortsV2,
+                  test_bindings.PortBindingsTestCase,
+                  test_bindings.PortBindingsHostTestCaseMixin,
+                  test_bindings.PortBindingsVnicTestCaseMixin):
+
+    VIF_TYPE = portbindings.VIF_TYPE_DVS
+    HAS_PORT_FILTER = True
+
+    def test_create_port_json(self):
+        keys = [('admin_state_up', True), ('status', self.port_create_status)]
+        with self.port(name='myname') as port:
+            for k, v in keys:
+                self.assertEqual(port['port'][k], v)
+            self.assertIn('mac_address', port['port'])
+            ips = port['port']['fixed_ips']
+            self.assertEqual(len(ips), 1)
+            self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
+            self.assertEqual('myname', port['port']['name'])
+
+    def test_list_ports(self):
+        # for this test we need to enable overlapping ips
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(self.port(subnet),
+                                   self.port(subnet),
+                                   self.port(subnet)) as ports:
+                self._test_list_resources('port', ports)
+
+    def test_list_ports_public_network(self):
+        with self.network(shared=True) as network:
+            with self.subnet(network, enable_dhcp=False) as subnet:
+                with contextlib.nested(self.port(subnet, tenant_id='tenant_1'),
+                                       self.port(subnet, tenant_id='tenant_2')
+                                       ) as (port1, port2):
+                    # Admin request - must return both ports
+                    self._test_list_resources('port', [port1, port2])
+                    # Tenant_1 request - must return single port
+                    q_context = context.Context('', 'tenant_1')
+                    self._test_list_resources('port', [port1],
+                                              neutron_context=q_context)
+                    # Tenant_2 request - must return single port
+                    q_context = context.Context('', 'tenant_2')
+                    self._test_list_resources('port', [port2],
+                                              neutron_context=q_context)
+
+    def test_list_ports_with_pagination_emulated(self):
+        helper_patcher = mock.patch(
+            'neutron.api.v2.base.Controller._get_pagination_helper',
+            new=test_plugin._fake_get_pagination_helper)
+        helper_patcher.start()
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(self.port(subnet,
+                                             mac_address='00:00:00:00:00:01'),
+                                   self.port(subnet,
+                                             mac_address='00:00:00:00:00:02'),
+                                   self.port(subnet,
+                                             mac_address='00:00:00:00:00:03')
+                                   ) as (port1, port2, port3):
+                self._test_list_with_pagination('port',
+                                                (port1, port2, port3),
+                                                ('mac_address', 'asc'), 2, 2)
+
+    def test_list_ports_with_pagination_native(self):
+        if self._skip_native_pagination:
+            self.skipTest("Skip test for not implemented pagination feature")
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(self.port(subnet,
+                                             mac_address='00:00:00:00:00:01'),
+                                   self.port(subnet,
+                                             mac_address='00:00:00:00:00:02'),
+                                   self.port(subnet,
+                                             mac_address='00:00:00:00:00:03')
+                                   ) as (port1, port2, port3):
+                self._test_list_with_pagination('port',
+                                                (port1, port2, port3),
+                                                ('mac_address', 'asc'), 2, 2)
+
+    def test_list_ports_with_sort_emulated(self):
+        helper_patcher = mock.patch(
+            'neutron.api.v2.base.Controller._get_sorting_helper',
+            new=test_plugin._fake_get_sorting_helper)
+        helper_patcher.start()
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(self.port(subnet, admin_state_up='True',
+                                             mac_address='00:00:00:00:00:01'),
+                                   self.port(subnet, admin_state_up='False',
+                                             mac_address='00:00:00:00:00:02'),
+                                   self.port(subnet, admin_state_up='False',
+                                             mac_address='00:00:00:00:00:03')
+                                   ) as (port1, port2, port3):
+                self._test_list_with_sort('port', (port3, port2, port1),
+                                          [('admin_state_up', 'asc'),
+                                           ('mac_address', 'desc')])
+
+    def test_list_ports_with_sort_native(self):
+        if self._skip_native_sorting:
+            self.skipTest("Skip test for not implemented sorting feature")
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(self.port(subnet, admin_state_up='True',
+                                             mac_address='00:00:00:00:00:01'),
+                                   self.port(subnet, admin_state_up='False',
+                                             mac_address='00:00:00:00:00:02'),
+                                   self.port(subnet, admin_state_up='False',
+                                             mac_address='00:00:00:00:00:03')
+                                   ) as (port1, port2, port3):
+                self._test_list_with_sort('port', (port3, port2, port1),
+                                          [('admin_state_up', 'asc'),
+                                           ('mac_address', 'desc')])
+
+    def test_update_port_delete_ip(self):
+        # This test case overrides the default because the nsx plugin
+        # implements port_security/security groups and it is not allowed
+        # to remove an ip address from a port unless the security group
+        # is first removed.
+        with self.subnet() as subnet:
+            with self.port(subnet=subnet) as port:
+                data = {'port': {'admin_state_up': False,
+                                 'fixed_ips': [],
+                                 secgrp.SECURITYGROUPS: []}}
+                req = self.new_update_request('ports',
+                                              data, port['port']['id'])
+                res = self.deserialize('json', req.get_response(self.api))
+                self.assertEqual(res['port']['admin_state_up'],
+                                 data['port']['admin_state_up'])
+                self.assertEqual(res['port']['fixed_ips'],
+                                 data['port']['fixed_ips'])
+
+    def test_update_port_index(self):
+        with self.subnet() as subnet:
+            with self.port(subnet=subnet) as port:
+                self.assertIsNone(port['port']['vnic_index'])
+                data = {'port': {'vnic_index': 1}}
+                req = self.new_update_request('ports',
+                                              data, port['port']['id'])
+                res = self.deserialize('json', req.get_response(self.api))
+                self.assertEqual(1, res['port']['vnic_index'])
+
+    def test_update_port_with_compute_device_owner(self):
+        """
+        Test that DHCP binding is created when ports 'device_owner'
+        is updated to compute, for example when attaching an interface to a
+        instance with existing port.
+        """
+        with self.port() as port:
+            with mock.patch(PLUGIN_NAME + '._create_dhcp_static_binding') as (
+                    _create_dhcp_static_binding_mock):
+                update = {'port': {'device_owner'}}
+                self.new_update_request('ports',
+                                        update, port['port']['id'])
+                _create_dhcp_static_binding_mock.assert_called_once()
+
+    def test_create_port_public_network_with_ip(self):
+        with self.network(shared=True) as network:
+            with self.subnet(enable_dhcp=False,
+                             network=network, cidr='10.0.0.0/24') as subnet:
+                keys = [('admin_state_up', True),
+                        ('status', self.port_create_status),
+                        ('fixed_ips', [{'subnet_id': subnet['subnet']['id'],
+                                        'ip_address': '10.0.0.2'}])]
+                port_res = self._create_port(self.fmt,
+                                             network['network']['id'],
+                                             webob.exc.HTTPCreated.code,
+                                             tenant_id='another_tenant',
+                                             set_context=True)
+                port = self.deserialize(self.fmt, port_res)
+                for k, v in keys:
+                    self.assertEqual(port['port'][k], v)
+                self.assertIn('mac_address', port['port'])
+                self._delete('ports', port['port']['id'])
+
+    def test_no_more_port_exception(self):
+        with self.subnet(enable_dhcp=False, cidr='10.0.0.0/32',
+                         gateway_ip=None) as subnet:
+            id = subnet['subnet']['network_id']
+            res = self._create_port(self.fmt, id)
+            data = self.deserialize(self.fmt, res)
+            msg = str(n_exc.IpAddressGenerationFailure(net_id=id))
+            self.assertEqual(data['NeutronError']['message'], msg)
+            self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
+
+    def test_ports_vif_host(self):
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        host_arg = {portbindings.HOST_ID: self.hostname}
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(
+                    self.port(subnet, name='name1',
+                              arg_list=(portbindings.HOST_ID,),
+                              **host_arg),
+                    self.port(subnet, name='name2')):
+                ctx = context.get_admin_context()
+                ports = self._list('ports', neutron_context=ctx)['ports']
+                self.assertEqual(2, len(ports))
+                for port in ports:
+                    if port['name'] == 'name1':
+                        self._check_response_portbindings_host(port)
+                    else:
+                        self.assertFalse(port[portbindings.HOST_ID])
+                # By default user is admin - now test non admin user
+                ctx = context.Context(user_id=None,
+                                      tenant_id=self._tenant_id,
+                                      is_admin=False,
+                                      read_deleted="no")
+                ports = self._list('ports', neutron_context=ctx)['ports']
+                self.assertEqual(2, len(ports))
+                for non_admin_port in ports:
+                    self._check_response_no_portbindings_host(non_admin_port)
+
+    def test_ports_vif_host_update(self):
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        host_arg = {portbindings.HOST_ID: self.hostname}
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(
+                    self.port(subnet, name='name1',
+                              arg_list=(portbindings.HOST_ID,),
+                              **host_arg),
+                    self.port(subnet, name='name2')) as (port1, port2):
+                data = {'port': {portbindings.HOST_ID: 'testhosttemp'}}
+                req = self.new_update_request(
+                    'ports', data, port1['port']['id'])
+                req.get_response(self.api)
+                req = self.new_update_request(
+                    'ports', data, port2['port']['id'])
+                ctx = context.get_admin_context()
+                req.get_response(self.api)
+                ports = self._list('ports', neutron_context=ctx)['ports']
+        self.assertEqual(2, len(ports))
+        for port in ports:
+            self.assertEqual('testhosttemp', port[portbindings.HOST_ID])
+
+    def test_ports_vif_details(self):
+        plugin = manager.NeutronManager.get_plugin()
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(self.port(subnet), self.port(subnet)):
+                ctx = context.get_admin_context()
+                ports = plugin.get_ports(ctx)
+                self.assertEqual(len(ports), 2)
+                for port in ports:
+                    self._check_response_portbindings(port)
+                # By default user is admin - now test non admin user
+                ctx = self._get_non_admin_context()
+                ports = self._list('ports', neutron_context=ctx)['ports']
+                self.assertEqual(len(ports), 2)
+                for non_admin_port in ports:
+                    self._check_response_no_portbindings(non_admin_port)
+
+    def test_ports_vnic_type(self):
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(
+                    self.port(subnet, name='name1',
+                              arg_list=(portbindings.VNIC_TYPE,),
+                              **vnic_arg),
+                    self.port(subnet, name='name2')):
+                ctx = context.get_admin_context()
+                ports = self._list('ports', neutron_context=ctx)['ports']
+                self.assertEqual(2, len(ports))
+                for port in ports:
+                    if port['name'] == 'name1':
+                        self._check_response_portbindings_vnic_type(port)
+                    else:
+                        self.assertEqual(portbindings.VNIC_NORMAL,
+                                         port[portbindings.VNIC_TYPE])
+                # By default user is admin - now test non admin user
+                ctx = context.Context(user_id=None,
+                                      tenant_id=self._tenant_id,
+                                      is_admin=False,
+                                      read_deleted="no")
+                ports = self._list('ports', neutron_context=ctx)['ports']
+                self.assertEqual(2, len(ports))
+                for non_admin_port in ports:
+                    self._check_response_portbindings_vnic_type(non_admin_port)
+
+    def test_ports_vnic_type_list(self):
+        cfg.CONF.set_default('allow_overlapping_ips', True)
+        vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
+        with self.subnet(enable_dhcp=False) as subnet:
+            with contextlib.nested(
+                self.port(subnet, name='name1',
+                          arg_list=(portbindings.VNIC_TYPE,),
+                          **vnic_arg),
+                self.port(subnet, name='name2'),
+                self.port(subnet, name='name3',
+                          arg_list=(portbindings.VNIC_TYPE,),
+                          **vnic_arg),) as (port1, port2, port3):
+                self._test_list_resources(
+                    'port', (port1, port2, port3),
+                    query_params='%s=%s' % (portbindings.VNIC_TYPE,
+                                            self.vnic_type))
+
+    def test_range_allocation(self):
+        with self.subnet(enable_dhcp=False, gateway_ip='10.0.0.3',
+                         cidr='10.0.0.0/29') as subnet:
+                kwargs = {"fixed_ips":
+                          [{'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet['subnet']['id']}]}
+                net_id = subnet['subnet']['network_id']
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                port = self.deserialize(self.fmt, res)
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 5)
+                alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5',
+                         '10.0.0.6']
+                for ip in ips:
+                    self.assertIn(ip['ip_address'], alloc)
+                    self.assertEqual(ip['subnet_id'],
+                                     subnet['subnet']['id'])
+                    alloc.remove(ip['ip_address'])
+                self.assertEqual(len(alloc), 0)
+                self._delete('ports', port['port']['id'])
+
+        with self.subnet(enable_dhcp=False, gateway_ip='11.0.0.6',
+                         cidr='11.0.0.0/29') as subnet:
+                kwargs = {"fixed_ips":
+                          [{'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet['subnet']['id']}]}
+                net_id = subnet['subnet']['network_id']
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                port = self.deserialize(self.fmt, res)
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 5)
+                alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4',
+                         '11.0.0.5']
+                for ip in ips:
+                    self.assertIn(ip['ip_address'], alloc)
+                    self.assertEqual(ip['subnet_id'],
+                                     subnet['subnet']['id'])
+                    alloc.remove(ip['ip_address'])
+                self.assertEqual(len(alloc), 0)
+                self._delete('ports', port['port']['id'])
+
+    def test_requested_duplicate_ip(self):
+        with self.subnet(enable_dhcp=False) as subnet:
+            with self.port(subnet=subnet) as port:
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                # Check configuring of duplicate IP
+                kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
+                                         'ip_address': ips[0]['ip_address']}]}
+                net_id = port['port']['network_id']
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
+
+    def test_requested_invalid_fixed_ips(self):
+        with self.subnet(enable_dhcp=False) as subnet:
+            with self.port(subnet=subnet) as port:
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                # Test invalid subnet_id
+                kwargs = {"fixed_ips":
+                          [{'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id':
+                            '00000000-ffff-ffff-ffff-000000000000'}]}
+                net_id = port['port']['network_id']
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                port2 = self.deserialize(self.fmt, res)
+                self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
+
+                # Test invalid IP address on specified subnet_id
+                kwargs = {"fixed_ips":
+                          [{'subnet_id': subnet['subnet']['id'],
+                            'ip_address': '1.1.1.1'}]}
+                net_id = port['port']['network_id']
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                port2 = self.deserialize(self.fmt, res)
+                self.assertEqual(res.status_int,
+                                 webob.exc.HTTPClientError.code)
+
+                # Test invalid addresses - IP's not on subnet or network
+                # address or broadcast address
+                bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255']
+                net_id = port['port']['network_id']
+                for ip in bad_ips:
+                    kwargs = {"fixed_ips": [{'ip_address': ip}]}
+                    res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                    port2 = self.deserialize(self.fmt, res)
+                    self.assertEqual(res.status_int,
+                                     webob.exc.HTTPClientError.code)
+
+                # Enable allocation of gateway address
+                kwargs = {"fixed_ips":
+                          [{'subnet_id': subnet['subnet']['id'],
+                            'ip_address': '10.0.0.1'}]}
+                net_id = port['port']['network_id']
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                port2 = self.deserialize(self.fmt, res)
+                ips = port2['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.1')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                self._delete('ports', port2['port']['id'])
+
+    def test_requested_split(self):
+        with self.subnet(enable_dhcp=False) as subnet:
+            with self.port(subnet=subnet) as port:
+                ports_to_delete = []
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                # Allocate specific IP
+                kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
+                                         'ip_address': '10.0.0.5'}]}
+                net_id = port['port']['network_id']
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                port2 = self.deserialize(self.fmt, res)
+                ports_to_delete.append(port2)
+                ips = port2['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.5')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                # Allocate specific IP's
+                allocated = ['10.0.0.3', '10.0.0.4', '10.0.0.6']
+
+                for a in allocated:
+                    res = self._create_port(self.fmt, net_id=net_id)
+                    port2 = self.deserialize(self.fmt, res)
+                    ports_to_delete.append(port2)
+                    ips = port2['port']['fixed_ips']
+                    self.assertEqual(len(ips), 1)
+                    self.assertEqual(ips[0]['ip_address'], a)
+                    self.assertEqual(ips[0]['subnet_id'],
+                                     subnet['subnet']['id'])
+
+                for p in ports_to_delete:
+                    self._delete('ports', p['port']['id'])
+
+    def test_requested_ips_only(self):
+        with self.subnet(enable_dhcp=False) as subnet:
+            with self.port(subnet=subnet) as port:
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21',
+                            '10.0.0.3', '10.0.0.17', '10.0.0.19']
+                ports_to_delete = []
+                for i in ips_only:
+                    kwargs = {"fixed_ips": [{'ip_address': i}]}
+                    net_id = port['port']['network_id']
+                    res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                    port = self.deserialize(self.fmt, res)
+                    ports_to_delete.append(port)
+                    ips = port['port']['fixed_ips']
+                    self.assertEqual(len(ips), 1)
+                    self.assertEqual(ips[0]['ip_address'], i)
+                    self.assertEqual(ips[0]['subnet_id'],
+                                     subnet['subnet']['id'])
+                for p in ports_to_delete:
+                    self._delete('ports', p['port']['id'])
+
+    def test_requested_subnet_id(self):
+        with self.subnet(enable_dhcp=False) as subnet:
+            with self.port(subnet=subnet) as port:
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                # Request a IP from specific subnet
+                kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]}
+                net_id = port['port']['network_id']
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                port2 = self.deserialize(self.fmt, res)
+                ips = port2['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                self._delete('ports', port2['port']['id'])
+
+    def test_requested_subnet_id_v4_and_v6(self):
+        with self.subnet(enable_dhcp=False) as subnet:
+                # Get a IPv4 and IPv6 address
+                tenant_id = subnet['subnet']['tenant_id']
+                net_id = subnet['subnet']['network_id']
+                res = self._create_subnet(
+                    self.fmt,
+                    tenant_id=tenant_id,
+                    net_id=net_id,
+                    cidr='2607:f0d0:1002:51::/124',
+                    ip_version=6,
+                    gateway_ip=attributes.ATTR_NOT_SPECIFIED,
+                    enable_dhcp=False)
+                subnet2 = self.deserialize(self.fmt, res)
+                kwargs = {"fixed_ips":
+                          [{'subnet_id': subnet['subnet']['id']},
+                           {'subnet_id': subnet2['subnet']['id']}]}
+                res = self._create_port(self.fmt, net_id=net_id, **kwargs)
+                port3 = self.deserialize(self.fmt, res)
+                ips = port3['port']['fixed_ips']
+                self.assertEqual(len(ips), 2)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                self.assertEqual(ips[1]['ip_address'], '2607:f0d0:1002:51::2')
+                self.assertEqual(ips[1]['subnet_id'], subnet2['subnet']['id'])
+                res = self._create_port(self.fmt, net_id=net_id)
+                port4 = self.deserialize(self.fmt, res)
+                # Check that a v4 and a v6 address are allocated
+                ips = port4['port']['fixed_ips']
+                self.assertEqual(len(ips), 2)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                self.assertEqual(ips[1]['ip_address'], '2607:f0d0:1002:51::3')
+                self.assertEqual(ips[1]['subnet_id'], subnet2['subnet']['id'])
+                self._delete('ports', port3['port']['id'])
+                self._delete('ports', port4['port']['id'])
+
+    def test_update_port_add_additional_ip(self):
+        """Test update of port with additional IP."""
+        with self.subnet(enable_dhcp=False) as subnet:
+            with self.port(subnet=subnet) as port:
+                data = {'port': {'admin_state_up': False,
+                                 'fixed_ips': [{'subnet_id':
+                                                subnet['subnet']['id']},
+                                               {'subnet_id':
+                                                subnet['subnet']['id']}]}}
+                req = self.new_update_request('ports', data,
+                                              port['port']['id'])
+                res = self.deserialize(self.fmt, req.get_response(self.api))
+                self.assertEqual(res['port']['admin_state_up'],
+                                 data['port']['admin_state_up'])
+                ips = res['port']['fixed_ips']
+                self.assertEqual(len(ips), 2)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                self.assertEqual(ips[1]['ip_address'], '10.0.0.4')
+                self.assertEqual(ips[1]['subnet_id'], subnet['subnet']['id'])
+
+    def test_update_port_update_ip(self):
+        """Test update of port IP.
+
+        Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
+        """
+        with self.subnet(enable_dhcp=False) as subnet:
+            with self.port(subnet=subnet) as port:
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                data = {'port': {'fixed_ips': [{'subnet_id':
+                                                subnet['subnet']['id'],
+                                                'ip_address': "10.0.0.10"}]}}
+                req = self.new_update_request('ports', data,
+                                              port['port']['id'])
+                res = self.deserialize(self.fmt, req.get_response(self.api))
+                ips = res['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+
+    def test_update_port_update_ip_address_only(self):
+        with self.subnet(enable_dhcp=False) as subnet:
+            with self.port(subnet=subnet) as port:
+                ips = port['port']['fixed_ips']
+                self.assertEqual(len(ips), 1)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                data = {'port': {'fixed_ips': [{'subnet_id':
+                                                subnet['subnet']['id'],
+                                                'ip_address': "10.0.0.10"},
+                                               {'ip_address': "10.0.0.2"}]}}
+                req = self.new_update_request('ports', data,
+                                              port['port']['id'])
+                res = self.deserialize(self.fmt, req.get_response(self.api))
+                ips = res['port']['fixed_ips']
+                self.assertEqual(len(ips), 2)
+                self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
+                self.assertEqual(ips[1]['ip_address'], '10.0.0.10')
+                self.assertEqual(ips[1]['subnet_id'], subnet['subnet']['id'])
+
+    def test_requested_subnet_id_v4_and_v6_slaac(self):
+        with self.network() as network:
+            with contextlib.nested(
+                self.subnet(network, enable_dhcp=False),
+                self.subnet(network,
+                            cidr='2607:f0d0:1002:51::/64',
+                            ip_version=6,
+                            gateway_ip='fe80::1',
+                            ipv6_address_mode=constants.IPV6_SLAAC)
+            ) as (subnet, subnet2):
+                with self.port(
+                    subnet,
+                    fixed_ips=[{'subnet_id': subnet['subnet']['id']},
+                               {'subnet_id': subnet2['subnet']['id']}]
+                ) as port:
+                    ips = port['port']['fixed_ips']
+                    self.assertEqual(len(ips), 2)
+                    self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
+                    port_mac = port['port']['mac_address']
+                    subnet_cidr = subnet2['subnet']['cidr']
+                    eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(
+                            subnet_cidr, port_mac))
+                    self.assertEqual(ips[1]['ip_address'], eui_addr)
+
+
+class TestSubnetsV2(NsxVPluginV2TestCase,
+                    test_plugin.TestSubnetsV2):
+
+    def setUp(self,
+              plugin=PLUGIN_NAME,
+              ext_mgr=None,
+              service_plugins=None):
+        super(TestSubnetsV2, self).setUp()
+        self.context = context.get_admin_context()
+
+    def _create_subnet_bulk(self, fmt, number, net_id, name,
+                            ip_version=4, **kwargs):
+        base_data = {'subnet': {'network_id': net_id,
+                                'ip_version': ip_version,
+                                'enable_dhcp': False,
+                                'tenant_id': self._tenant_id}}
+        # auto-generate cidrs as they should not overlap
+        overrides = dict((k, v)
+                         for (k, v) in zip(range(number),
+                                           [{'cidr': "10.0.%s.0/24" % num}
+                                            for num in range(number)]))
+        kwargs.update({'override': overrides})
+        return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs)
+
+    def test_create_subnet_nonzero_cidr(self):
+        with contextlib.nested(
+            self.subnet(enable_dhcp=False, cidr='10.129.122.5/8'),
+            self.subnet(enable_dhcp=False, cidr='11.129.122.5/15'),
+            self.subnet(enable_dhcp=False, cidr='12.129.122.5/16'),
+            self.subnet(enable_dhcp=False, cidr='13.129.122.5/18'),
+            self.subnet(enable_dhcp=False, cidr='14.129.122.5/22'),
+            self.subnet(enable_dhcp=False, cidr='15.129.122.5/24'),
+            self.subnet(enable_dhcp=False, cidr='16.129.122.5/28'),
+            self.subnet(enable_dhcp=False, cidr='17.129.122.5/32',
+                        gateway_ip=None)
+        ) as subs:
+            # the API should accept and correct these for users
+            self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8')
+            self.assertEqual(subs[1]['subnet']['cidr'], '11.128.0.0/15')
+            self.assertEqual(subs[2]['subnet']['cidr'], '12.129.0.0/16')
+            self.assertEqual(subs[3]['subnet']['cidr'], '13.129.64.0/18')
+            self.assertEqual(subs[4]['subnet']['cidr'], '14.129.120.0/22')
+            self.assertEqual(subs[5]['subnet']['cidr'], '15.129.122.0/24')
+            self.assertEqual(subs[6]['subnet']['cidr'], '16.129.122.0/28')
+            self.assertEqual(subs[7]['subnet']['cidr'], '17.129.122.5/32')
+
+    def test_create_subnet_ipv6_attributes(self):
+        # Expected to fail for now as we dont't support IPv6 for NSXv
+        cidr = "fe80::/80"
+        with testlib_api.ExpectedException(
+                webob.exc.HTTPClientError) as ctx_manager:
+            self._test_create_subnet(cidr=cidr)
+            self.assertEqual(ctx_manager.exception.code, 400)
+
+    def test_create_subnet_with_different_dhcp_server(self):
+        self.mock_create_dhcp_service.stop()
+        name = 'dvs-provider-net'
+        providernet_args = {pnet.NETWORK_TYPE: 'vlan',
+                            pnet.SEGMENTATION_ID: 43,
+                            pnet.PHYSICAL_NETWORK: 'dvs-uuid'}
+        with self.network(name=name,
+                          providernet_args=providernet_args,
+                          arg_list=(pnet.NETWORK_TYPE,
+                                    pnet.SEGMENTATION_ID,
+                                    pnet.PHYSICAL_NETWORK)) as net:
+            self._test_create_subnet(network=net, cidr='10.0.0.0/24')
+            dhcp_router_id = (vcns_const.DHCP_EDGE_PREFIX +
+                              net['network']['id'])[:36]
+            dhcp_server_id = nsxv_db.get_nsxv_router_binding(
+                self.context.session, dhcp_router_id)['edge_id']
+            providernet_args_1 = {pnet.NETWORK_TYPE: 'vlan',
+                                  pnet.SEGMENTATION_ID: 43,
+                                  pnet.PHYSICAL_NETWORK: 'dvs-uuid-1'}
+            with self.network(name=name,
+                              providernet_args=providernet_args_1,
+                              arg_list=(pnet.NETWORK_TYPE,
+                                        pnet.SEGMENTATION_ID,
+                                        pnet.PHYSICAL_NETWORK)) as net1:
+                self._test_create_subnet(network=net1, cidr='10.0.1.0/24')
+                router_id = (vcns_const.DHCP_EDGE_PREFIX +
+                             net1['network']['id'])[:36]
+                dhcp_server_id_1 = nsxv_db.get_nsxv_router_binding(
+                    self.context.session, router_id)['edge_id']
+                self.assertNotEqual(dhcp_server_id, dhcp_server_id_1)
+
+    def test_create_subnet_with_different_dhcp_by_flat_net(self):
+        self.mock_create_dhcp_service.stop()
+        name = 'flat-net'
+        providernet_args = {pnet.NETWORK_TYPE: 'flat',
+                            pnet.PHYSICAL_NETWORK: 'dvs-uuid'}
+        with self.network(name=name,
+                          providernet_args=providernet_args,
+                          arg_list=(pnet.NETWORK_TYPE,
+                                    pnet.PHYSICAL_NETWORK)) as net:
+            self._test_create_subnet(network=net, cidr='10.0.0.0/24')
+            dhcp_router_id = (vcns_const.DHCP_EDGE_PREFIX +
+                              net['network']['id'])[:36]
+            dhcp_server_id = nsxv_db.get_nsxv_router_binding(
+                self.context.session, dhcp_router_id)['edge_id']
+            providernet_args_1 = {pnet.NETWORK_TYPE: 'flat',
+                                  pnet.PHYSICAL_NETWORK: 'dvs-uuid'}
+            with self.network(name=name,
+                              providernet_args=providernet_args_1,
+                              arg_list=(pnet.NETWORK_TYPE,
+                                        pnet.PHYSICAL_NETWORK)) as net1:
+                self._test_create_subnet(network=net1, cidr='10.0.1.0/24')
+                router_id = (vcns_const.DHCP_EDGE_PREFIX +
+                             net1['network']['id'])[:36]
+                dhcp_server_id_1 = nsxv_db.get_nsxv_router_binding(
+                    self.context.session, router_id)['edge_id']
+                self.assertNotEqual(dhcp_server_id, dhcp_server_id_1)
+
+
+class TestBasicGet(test_plugin.TestBasicGet, NsxVPluginV2TestCase):
+    pass
+
+
+class TestV2HTTPResponse(test_plugin.TestV2HTTPResponse, NsxVPluginV2TestCase):
+    pass
+
+
+class TestL3ExtensionManager(object):
+
+    def get_resources(self):
+        # Simulate extension of L3 attribute map
+        # First apply attribute extensions
+        for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
+            l3.RESOURCE_ATTRIBUTE_MAP[key].update(
+                l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
+            l3.RESOURCE_ATTRIBUTE_MAP[key].update(
+                dist_router.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
+        # Finally add l3 resources to the global attribute map
+        attributes.RESOURCE_ATTRIBUTE_MAP.update(
+            l3.RESOURCE_ATTRIBUTE_MAP)
+        return l3.L3.get_resources()
+
+    def get_actions(self):
+        return []
+
+    def get_request_extensions(self):
+        return []
+
+
+def backup_l3_attribute_map():
+    """Return a backup of the original l3 attribute map."""
+    return dict((res, attrs.copy()) for
+                (res, attrs) in l3.RESOURCE_ATTRIBUTE_MAP.iteritems())
+
+
+def restore_l3_attribute_map(map_to_restore):
+    """Ensure changes made by fake ext mgrs are reverted."""
+    l3.RESOURCE_ATTRIBUTE_MAP = map_to_restore
+
+
+class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxVPluginV2TestCase):
+
+    def _restore_l3_attribute_map(self):
+        l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk
+
+    def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None):
+        self._l3_attribute_map_bk = {}
+        for item in l3.RESOURCE_ATTRIBUTE_MAP:
+            self._l3_attribute_map_bk[item] = (
+                l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
+        cfg.CONF.set_override('task_status_check_interval', 200, group="nsxv")
+
+        cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
+        l3_attribute_map_bk = backup_l3_attribute_map()
+        self.addCleanup(restore_l3_attribute_map, l3_attribute_map_bk)
+        ext_mgr = ext_mgr or TestL3ExtensionManager()
+        super(L3NatTest, self).setUp(
+            plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins)
+        plugin_instance = manager.NeutronManager.get_plugin()
+        self._plugin_name = "%s.%s" % (
+            plugin_instance.__module__,
+            plugin_instance.__class__.__name__)
+        self._plugin_class = plugin_instance.__class__
+
+    def tearDown(self):
+        plugin = manager.NeutronManager.get_plugin()
+        _manager = plugin.nsx_v.task_manager
+        # wait max ~10 seconds for all tasks to be finished
+        for i in range(100):
+            if not _manager.has_pending_task():
+                break
+            greenthread.sleep(0.1)
+        if _manager.has_pending_task():
+            _manager.show_pending_tasks()
+            raise Exception(_("Tasks not completed"))
+        _manager.stop()
+        # Ensure the manager thread has been stopped
+        self.assertIsNone(_manager._thread)
+        super(L3NatTest, self).tearDown()
+
+    def _create_l3_ext_network(self, vlan_id=None):
+        name = 'l3_ext_net'
+        return self.network(name=name,
+                            router__external=True)
+
+    @contextlib.contextmanager
+    def router(self, name=None, admin_state_up=True,
+               fmt=None, tenant_id=_uuid(),
+               external_gateway_info=None, set_context=False,
+               **kwargs):
+        # avoid name duplication of edge
+        if not name:
+            name = _uuid()
+        router = self._make_router(fmt or self.fmt, tenant_id, name,
+                                   admin_state_up, external_gateway_info,
+                                   set_context, **kwargs)
+        yield router
+        self._delete('routers', router['router']['id'])
+
+
+class TestL3NatTestCase(L3NatTest,
+                        test_l3_plugin.L3NatDBIntTestCase,
+                        NsxVPluginV2TestCase):
+
+    def _test_create_l3_ext_network(self, vlan_id=0):
+        name = 'l3_ext_net'
+        expected = [('subnets', []), ('name', name), ('admin_state_up', True),
+                    ('status', 'ACTIVE'), ('shared', False),
+                    (external_net.EXTERNAL, True)]
+        with self._create_l3_ext_network(vlan_id) as net:
+            for k, v in expected:
+                self.assertEqual(net['network'][k], v)
+
+    def test_create_l3_ext_network_with_dhcp(self):
+        with self._create_l3_ext_network() as net:
+            with testlib_api.ExpectedException(
+                webob.exc.HTTPClientError) as ctx_manager:
+                with self.subnet(network=net):
+                    self.assertEqual(ctx_manager.exception.code, 400)
+
+    def test_create_l3_ext_network_without_vlan(self):
+        self._test_create_l3_ext_network()
+
+    def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None,
+                                                       validate_ext_gw=False):
+        with self._create_l3_ext_network(vlan_id) as net:
+            with self.subnet(network=net, enable_dhcp=False) as s:
+                data = {'router': {'tenant_id': 'whatever'}}
+                data['router']['name'] = 'router1'
+                data['router']['external_gateway_info'] = {
+                    'network_id': s['subnet']['network_id']}
+                router_req = self.new_create_request('routers', data,
+                                                     self.fmt)
+                try:
+                    res = router_req.get_response(self.ext_api)
+                    router = self.deserialize(self.fmt, res)
+                    self.assertEqual(
+                        s['subnet']['network_id'],
+                        (router['router']['external_gateway_info']
+                         ['network_id']))
+                    if validate_ext_gw:
+                        pass
+                finally:
+                    self._delete('routers', router['router']['id'])
+
+    def test_router_create_with_gwinfo_and_l3_ext_net(self):
+        self._test_router_create_with_gwinfo_and_l3_ext_net()
+
+    def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self):
+        self._test_router_create_with_gwinfo_and_l3_ext_net(444)
+
+    def test_router_add_gateway_invalid_network_returns_404(self):
+        # NOTE(salv-orlando): This unit test has been overriden
+        # as the nsx plugin support the ext_gw_mode extension
+        # which mandates a uuid for the external network identifier
+        with self.router() as r:
+            self._add_external_gateway_to_router(
+                r['router']['id'],
+                uuidutils.generate_uuid(),
+                expected_code=webob.exc.HTTPNotFound.code)
+
+    def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None,
+                                                  validate_ext_gw=False,
+                                                  distributed=False):
+        with self.router(
+            arg_list=('distributed',), distributed=distributed) as r:
+            with self.subnet() as s1:
+                with self._create_l3_ext_network(vlan_id) as net:
+                    with self.subnet(network=net, enable_dhcp=False) as s2:
+                        self._set_net_external(s1['subnet']['network_id'])
+                        try:
+                            self._add_external_gateway_to_router(
+                                r['router']['id'],
+                                s1['subnet']['network_id'])
+                            body = self._show('routers', r['router']['id'])
+                            net_id = (body['router']
+                                      ['external_gateway_info']['network_id'])
+                            self.assertEqual(net_id,
+                                             s1['subnet']['network_id'])
+                            # Plug network with external mapping
+                            self._set_net_external(s2['subnet']['network_id'])
+                            self._add_external_gateway_to_router(
+                                r['router']['id'],
+                                s2['subnet']['network_id'])
+                            body = self._show('routers', r['router']['id'])
+                            net_id = (body['router']
+                                      ['external_gateway_info']['network_id'])
+                            self.assertEqual(net_id,
+                                             s2['subnet']['network_id'])
+                            if validate_ext_gw:
+                                pass
+                        finally:
+                            # Cleanup
+                            self._remove_external_gateway_from_router(
+                                r['router']['id'],
+                                s2['subnet']['network_id'])
+
+    def test_router_update_gateway_on_l3_ext_net(self):
+        self._test_router_update_gateway_on_l3_ext_net()
+
+    def test_router_update_gateway_on_l3_ext_net_with_vlan(self):
+        self._test_router_update_gateway_on_l3_ext_net(444)
+
+    def test_router_update_gateway_with_existing_floatingip(self):
+        with self._create_l3_ext_network() as net:
+            with self.subnet(network=net, enable_dhcp=False) as subnet:
+                with self.floatingip_with_assoc() as fip:
+                    self._add_external_gateway_to_router(
+                        fip['floatingip']['router_id'],
+                        subnet['subnet']['network_id'],
+                        expected_code=webob.exc.HTTPConflict.code)
+
+    def test_router_list_by_tenant_id(self):
+        with contextlib.nested(self.router(tenant_id='custom'),
+                               self.router(),
+                               self.router()
+                               ) as routers:
+            self._test_list_resources('router', [routers[0]],
+                                      query_params="tenant_id=custom")
+
+    def test_create_l3_ext_network_with_vlan(self):
+        self._test_create_l3_ext_network(666)
+
+    def test_floatingip_with_assoc_fails(self):
+        self._test_floatingip_with_assoc_fails(
+            self._plugin_name + '._check_and_get_fip_assoc')
+
+    def test_floatingip_with_invalid_create_port(self):
+        self._test_floatingip_with_invalid_create_port(self._plugin_name)
+
+    def test_floatingip_update(self):
+        super(TestL3NatTestCase, self).test_floatingip_update(
+            constants.FLOATINGIP_STATUS_DOWN)
+
+    def test_floatingip_disassociate(self):
+        with self.port() as p:
+            private_sub = {'subnet': {'id':
+                                      p['port']['fixed_ips'][0]['subnet_id']}}
+            with self.floatingip_no_assoc(private_sub) as fip:
+                self.assertEqual(fip['floatingip']['status'],
+                                 constants.FLOATINGIP_STATUS_DOWN)
+                port_id = p['port']['id']
+                body = self._update('floatingips', fip['floatingip']['id'],
+                                    {'floatingip': {'port_id': port_id}})
+                self.assertEqual(body['floatingip']['port_id'], port_id)
+                self.assertEqual(body['floatingip']['status'],
+                                 constants.FLOATINGIP_STATUS_ACTIVE)
+                # Disassociate
+                body = self._update('floatingips', fip['floatingip']['id'],
+                                    {'floatingip': {'port_id': None}})
+                body = self._show('floatingips', fip['floatingip']['id'])
+                self.assertIsNone(body['floatingip']['port_id'])
+                self.assertIsNone(body['floatingip']['fixed_ip_address'])
+                self.assertEqual(body['floatingip']['status'],
+                                 constants.FLOATINGIP_STATUS_DOWN)
+
+    def test_update_floatingip_with_edge_router_update_failure(self):
+        p = manager.NeutronManager.get_plugin()
+        with self.subnet() as subnet:
+            with contextlib.nested(self.port(subnet=subnet),
+                                   self.port(subnet=subnet)) as (p1, p2):
+                p1_id = p1['port']['id']
+                p2_id = p2['port']['id']
+                with self.floatingip_with_assoc(port_id=p1_id) as fip:
+                    with self._mock_edge_router_update_with_exception() as (
+                            update_edge):
+                        self.assertRaises(object,
+                                          p.update_floatingip,
+                                          context.get_admin_context(),
+                                          fip['floatingip']['id'],
+                                          floatingip={'floatingip':
+                                                      {'port_id': p2_id}})
+                        update_edge.assert_called_once()
+                    res = self._list(
+                        'floatingips', query_params="port_id=%s" % p1_id)
+                    self.assertEqual(len(res['floatingips']), 1)
+                    res = self._list(
+                        'floatingips', query_params="port_id=%s" % p2_id)
+                    self.assertEqual(len(res['floatingips']), 0)
+
+    def test_create_floatingip_with_edge_router_update_failure(self):
+        p = manager.NeutronManager.get_plugin()
+        with self.subnet(cidr='200.0.0.0/24') as public_sub:
+            public_network_id = public_sub['subnet']['network_id']
+            self._set_net_external(public_network_id)
+            with self.port() as private_port:
+                port_id = private_port['port']['id']
+                tenant_id = private_port['port']['tenant_id']
+                subnet_id = private_port['port']['fixed_ips'][0]['subnet_id']
+                with self.router() as r:
+                    self._add_external_gateway_to_router(
+                        r['router']['id'],
+                        public_sub['subnet']['network_id'])
+                    self._router_interface_action('add',
+                                                  r['router']['id'],
+                                                  subnet_id,
+                                                  None)
+                    floatingip = {'floatingip': {
+                                  'tenant_id': tenant_id,
+                                  'floating_network_id': public_network_id,
+                                  'port_id': port_id}}
+
+                    with self._mock_edge_router_update_with_exception() as (
+                            update_edge):
+                        self.assertRaises(object,
+                                          p.create_floatingip,
+                                          context.get_admin_context(),
+                                          floatingip=floatingip)
+                        update_edge.assert_called_once()
+                        res = self._list(
+                            'floatingips', query_params="port_id=%s" % port_id)
+                        self.assertEqual(len(res['floatingips']), 0)
+                    # Cleanup
+                    self._router_interface_action('remove',
+                                                  r['router']['id'],
+                                                  subnet_id,
+                                                  None)
+                    self._remove_external_gateway_from_router(
+                        r['router']['id'], public_network_id)
+
+    @contextlib.contextmanager
+    def _mock_edge_router_update_with_exception(self):
+        nsx_router_update = PLUGIN_NAME + '._update_edge_router'
+        with mock.patch(nsx_router_update) as update_edge:
+            update_edge.side_effect = object()
+            yield update_edge
+
+    def test_router_interfaces_with_update_firewall(self):
+        with mock.patch.object(edge_utils, "update_firewall") as firewall:
+            with self.router() as r:
+                s1_cidr = '10.0.0.0/24'
+                s2_cidr = '11.0.0.0/24'
+                with contextlib.nested(
+                    self.subnet(cidr=s1_cidr),
+                    self.subnet(cidr=s2_cidr)) as (s1, s2):
+                    self._router_interface_action('add',
+                                                  r['router']['id'],
+                                                  s1['subnet']['id'],
+                                                  None)
+                    firewall.reset_mock()
+                    self._router_interface_action('add',
+                                                  r['router']['id'],
+                                                  s2['subnet']['id'],
+                                                  None)
+                    expected_fw = {
+                        'firewall_rule_list': [
+                            {'action': 'allow',
+                             'enabled': True,
+                             'source_ip_address': [s2_cidr, s1_cidr],
+                             'destination_ip_address': [s2_cidr, s1_cidr]}]}
+                    firewall.assert_called_once_with(
+                        mock.ANY, mock.ANY, mock.ANY,
+                        expected_fw, allow_external=True)
+                    self._router_interface_action('remove',
+                                                  r['router']['id'],
+                                                  s1['subnet']['id'],
+                                                  None)
+                    self._router_interface_action('remove',
+                                                  r['router']['id'],
+                                                  s2['subnet']['id'],
+                                                  None)
+
+    def test_router_interfaces_different_tenants_update_firewall(self):
+        #TODO(kobis): unskip
+        self.skipTest('resolve value order problem')
+        tenant_id = _uuid()
+        other_tenant_id = _uuid()
+        with mock.patch.object(edge_utils, "update_firewall") as firewall:
+            with contextlib.nested(
+                self.router(tenant_id=tenant_id),
+                self.network(tenant_id=tenant_id),
+                self.network(tenant_id=other_tenant_id)
+            ) as (r, n1, n2):
+                s1_cidr = '10.0.0.0/24'
+                s2_cidr = '11.0.0.0/24'
+                with contextlib.nested(
+                    self.subnet(network=n1, cidr=s1_cidr),
+                    self.subnet(network=n2, cidr=s2_cidr)
+                ) as (s1, s2):
+                    self._router_interface_action('add',
+                                                  r['router']['id'],
+                                                  s2['subnet']['id'],
+                                                  None)
+                    firewall.reset_mock()
+                    self._router_interface_action('add',
+                                                  r['router']['id'],
+                                                  s1['subnet']['id'],
+                                                  None,
+                                                  tenant_id=tenant_id)
+                    expected_fw = {
+                        'firewall_rule_list': [
+                            {'action': 'allow',
+                             'enabled': True,
+                             'source_ip_address': [s1_cidr, s2_cidr],
+                             'destination_ip_address': [s1_cidr, s2_cidr]}]}
+                    firewall.assert_called_once_with(
+                        mock.ANY, mock.ANY, mock.ANY,
+                        expected_fw, allow_external=True)
+                    self._router_interface_action('remove',
+                                                  r['router']['id'],
+                                                  s1['subnet']['id'],
+                                                  None,
+                                                  tenant_id=tenant_id)
+                    firewall.reset_mock()
+                    self._router_interface_action('remove',
+                                                  r['router']['id'],
+                                                  s2['subnet']['id'],
+                                                  None)
+                    expected_fw = {'firewall_rule_list': []}
+                    firewall.assert_called_once_with(
+                        mock.ANY, mock.ANY, mock.ANY,
+                        expected_fw, allow_external=True)
+
+    def test_delete_ext_net_with_disassociated_floating_ips(self):
+        #TODO(kobis): unskip
+        self.skipTest('external network with dhcp not supported')
+
+
+class ExtGwModeTestCase(NsxVPluginV2TestCase,
+                        test_ext_gw_mode.ExtGwModeIntTestCase):
+    pass
+
+
+class NsxVSecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase):
+    def setUp(self,
+              plugin=PLUGIN_NAME,
+              ext_mgr=None,
+              service_plugins=None):
+        test_lib.test_config['config_files'] = [
+            vmware.get_fake_conf('nsx.ini.test')]
+        mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True)
+        mock_vcns_instance = mock_vcns.start()
+        self.fc2 = fake_vcns.FakeVcns()
+        mock_vcns_instance.return_value = self.fc2
+        edge_utils.query_dhcp_service_config = mock.Mock(return_value=[])
+        mock_create_dhcp_service = mock.patch("%s.%s" % (
+            vmware.EDGE_MANAGE_NAME, 'create_dhcp_edge_service'))
+        mock_create_dhcp_service.start()
+        mock_update_dhcp_service = mock.patch("%s.%s" % (
+            vmware.EDGE_MANAGE_NAME, 'update_dhcp_edge_service'))
+        mock_update_dhcp_service.start()
+        mock_delete_dhcp_service = mock.patch("%s.%s" % (
+            vmware.EDGE_MANAGE_NAME, 'delete_dhcp_edge_service'))
+        mock_delete_dhcp_service.start()
+        super(NsxVSecurityGroupsTestCase, self).setUp(plugin=plugin,
+                                                      ext_mgr=ext_mgr)
+        self.addCleanup(self.fc2.reset_all)
+
+
+class NsxVTestSecurityGroup(ext_sg.TestSecurityGroups,
+                            NsxVSecurityGroupsTestCase):
+
+    def test_list_ports_security_group(self):
+        with self.network() as n:
+            with self.subnet(n, enable_dhcp=False):
+                self._create_port(self.fmt, n['network']['id'])
+                req = self.new_list_request('ports')
+                res = req.get_response(self.api)
+                ports = self.deserialize(self.fmt, res)
+                port = ports['ports'][0]
+                self.assertEqual(len(port[secgrp.SECURITYGROUPS]), 1)
+                self._delete('ports', port['id'])
+
+    def test_vnic_security_group_membership(self):
+        self.fc2.add_member_to_security_group = (
+            mock.Mock().add_member_to_security_group)
+        self.fc2.remove_member_from_security_group = (
+            mock.Mock().remove_member_from_security_group)
+        nsx_sg_id = str(self.fc2._securitygroups['ids'])
+        device_id = _uuid()
+        port_index = 0
+        # The expected vnic-id format by NsxV
+        vnic_id = '%s.%03d' % (device_id, port_index)
+        with self.port(device_id=device_id,
+                       device_owner='compute:None') as port:
+            data = {'port': {'vnic_index': port_index}}
+            self.new_update_request('ports', data,
+                                    port['port']['id']).get_response(self.api)
+            # The vnic should be added as a member to the nsx-security-groups
+            # which match the port security-groups
+            (self.fc2.add_member_to_security_group
+             .assert_called_once_with(nsx_sg_id, vnic_id))
+
+        # The vnic should be removed from the nsx-security-groups which match
+        # the deleted port security-groups
+        #TODO(kobis): Port is not removed automatically
+        # (self.fc2.remove_member_from_security_group
+        #  .assert_called_once_with(nsx_sg_id, vnic_id))
+
+
+class TestVdrTestCase(L3NatTest,
+                      test_l3_plugin.L3NatDBIntTestCase,
+                      NsxVPluginV2TestCase):
+
+    def _create_router(self, fmt, tenant_id, name=None,
+                       admin_state_up=None, set_context=False,
+                       arg_list=None, **kwargs):
+        data = {'router': {'tenant_id': tenant_id}}
+        if name:
+            data['router']['name'] = name
+        if admin_state_up:
+            data['router']['admin_state_up'] = admin_state_up
+        for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())):
+            # Arg must be present and not empty
+            if arg in kwargs and kwargs[arg]:
+                data['router'][arg] = kwargs[arg]
+
+        if 'distributed' in kwargs:
+                data['router']['distributed'] = kwargs[arg]
+        else:
+                data['router']['distributed'] = True
+
+        router_req = self.new_create_request('routers', data, fmt)
+        if set_context and tenant_id:
+            # create a specific auth context for this request
+            router_req.environ['neutron.context'] = context.Context(
+                '', tenant_id)
+
+        return router_req.get_response(self.ext_api)
+
+    def _test_router_create_with_distributed(self, dist_input, dist_expected,
+                                             return_code=201):
+        data = {'tenant_id': 'whatever'}
+        data['name'] = 'router1'
+        data['distributed'] = dist_input
+        router_req = self.new_create_request(
+            'routers', {'router': data}, self.fmt)
+        try:
+            res = router_req.get_response(self.ext_api)
+            self.assertEqual(return_code, res.status_int)
+            if res.status_int == 201:
+                router = self.deserialize(self.fmt, res)
+                self.assertIn('distributed', router['router'])
+                self.assertEqual(dist_expected,
+                                 router['router']['distributed'])
+        finally:
+            if res.status_int == 201:
+                self._delete('routers', router['router']['id'])
+
+    def test_router_create_distributed(self):
+        self._test_router_create_with_distributed(True, True)
+
+    def test_router_create_not_distributed(self):
+        self._test_router_create_with_distributed(False, False)
+
+    def test_router_create_distributed_unspecified(self):
+        self._test_router_create_with_distributed(None, False)
+
+    def test_floatingip_with_assoc_fails(self):
+        self._test_floatingip_with_assoc_fails(
+            self._plugin_name + '._check_and_get_fip_assoc')
+
+    def test_floatingip_update(self):
+        super(TestVdrTestCase, self).test_floatingip_update(
+            constants.FLOATINGIP_STATUS_DOWN)
+
+    def test_floatingip_with_invalid_create_port(self):
+        self._test_floatingip_with_invalid_create_port(self._plugin_name)
+
+    def test_router_add_gateway_invalid_network_returns_404(self):
+        with self.router() as r:
+            self._add_external_gateway_to_router(
+                r['router']['id'],
+                uuidutils.generate_uuid(),
+                expected_code=webob.exc.HTTPNotFound.code)
+
+    def test_router_add_interfaces_with_multiple_subnets_on_same_network(self):
+        with self.router() as r:
+            with self.network() as n:
+                with contextlib.nested(
+                    self.subnet(network=n),
+                    self.subnet(network=n,
+                                cidr='11.0.0.0/24')) as (s1, s2):
+                    self._router_interface_action('add',
+                                                  r['router']['id'],
+                                                  s1['subnet']['id'],
+                                                  None)
+                    err_code = webob.exc.HTTPBadRequest.code
+                    self._router_interface_action('add',
+                                                  r['router']['id'],
+                                                  s2['subnet']['id'],
+                                                  None,
+                                                  err_code)
+                    self._router_interface_action('remove',
+                                                  r['router']['id'],
+                                                  s1['subnet']['id'],
+                                                  None)
+
+    def test_delete_ext_net_with_disassociated_floating_ips(self):
+        #TODO(kobis): unskip
+        self.skipTest('external network with dhcp not supported')
+
+
+class TestNSXvAllowedAddressPairs(test_addr_pair.TestAllowedAddressPairs,
+                                  NsxVPluginV2TestCase):
+    def test_get_vlan_network_name(self):
+        pass
diff --git a/vmware_nsx/neutron/tests/unit/vmware/vshield/fake_vcns.py b/vmware_nsx/neutron/tests/unit/vmware/vshield/fake_vcns.py
index 4b0aed4952..248673a962 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/vshield/fake_vcns.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/vshield/fake_vcns.py
@@ -14,10 +14,13 @@
 
 import copy
 
-from oslo.serialization import jsonutils
+from oslo.serialization import jsonutils as json
+import xml.etree.ElementTree as ET
 
 from neutron.openstack.common import uuidutils
-from neutron.plugins.vmware.vshield.common import exceptions
+from vmware_nsx.neutron.plugins.vmware.vshield.common import exceptions
+
+SECTION_LOCATION_HEADER = '/api/4.0/firewall/globalroot-0/config/%s/%s'
 
 
 class FakeVcns(object):
@@ -55,6 +58,13 @@ class FakeVcns(object):
         self._fake_monitors_dict = {}
         self._fake_app_profiles_dict = {}
         self._fake_loadbalancer_config = {}
+        self._fake_virtual_wires = {}
+        self._virtual_wire_id = 0
+        self._fake_portgroups = {}
+        self._portgroup_id = 0
+        self._securitygroups = {'ids': 0, 'names': set()}
+        self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()}
+        self._dhcp_bindings = {}
 
     def set_fake_nsx_api(self, fake_nsx_api):
         self._fake_nsx_api = fake_nsx_api
@@ -80,7 +90,7 @@ class FakeVcns(object):
                 'moduleName': 'vShield Edge',
                 'errorData': None
             }
-            return (header, jsonutils.dumps(response))
+            return (header, json.dumps(response))
 
         self._job_idx = self._job_idx + 1
         job_id = "jobdata-%d" % self._job_idx
@@ -91,7 +101,8 @@ class FakeVcns(object):
             'name': request['name'],
             'request': request,
             'nat_rules': None,
-            'nat_rule_id': 0
+            'nat_rule_id': 0,
+            'interface_index': 1
         }
         header = {
             'status': 200,
@@ -100,6 +111,17 @@ class FakeVcns(object):
         response = ''
         return (header, response)
 
+    def update_edge(self, edge_id, request):
+        if edge_id not in self._edges:
+            raise Exception(_("Edge %s does not exist") % edge_id)
+        edge = self._edges[edge_id]
+        edge['name'] = request['name']
+        header = {
+            'status': 200
+        }
+        response = ''
+        return (header, response)
+
     def get_edge_id(self, job_id):
         if job_id not in self._jobs:
             raise Exception(_("Job %s does not nexist") % job_id)
@@ -133,6 +155,47 @@ class FakeVcns(object):
         response = ''
         return (header, response)
 
+    def add_vdr_internal_interface(self, edge_id, interface):
+        interface = interface['interfaces'][0]
+        if not self._edges[edge_id].get('interfaces'):
+            self._edges[edge_id]['interfaces'] = []
+        index = len(self._edges[edge_id]['interfaces'])
+        interface['index'] = str(index)
+        self._edges[edge_id]['interfaces'].append(interface)
+        header = {
+            'status': 200
+        }
+        response = {"interfaces": [{"index": str(index)}]}
+        return (header, response)
+
+    def get_edge_interfaces(self, edge_id):
+        if not self._edges[edge_id].get('interfaces'):
+            self._edges[edge_id]['interfaces'] = []
+        header = {
+            'status': 200
+        }
+        response = {"interfaces": self._edges[edge_id].get('interfaces', [])}
+        return (header, response)
+
+    def update_vdr_internal_interface(
+        self, edge_id, interface_index, interface):
+        header = {
+            'status': 200
+        }
+        response = ''
+        return (header, response)
+
+    def delete_vdr_internal_interface(self, edge_id, interface_index):
+        for interface in self._edges[edge_id].get('interfaces', []):
+            if int(interface['index']) == int(interface_index):
+                header = {
+                    'status': 200
+                }
+                break
+        header = {'status': 404}
+        response = ''
+        return (header, response)
+
     def update_interface(self, edge_id, vnic):
         header = {
             'status': 200
@@ -140,6 +203,73 @@ class FakeVcns(object):
         response = ''
         return (header, response)
 
+    def delete_interface(self, edge_id, vnic_index):
+        header = {
+            'status': 200
+        }
+        response = ''
+        return (header, response)
+
+    def query_interface(self, edge_id, vnic_index):
+        header = {
+            'status': 200
+        }
+        response = {
+            'label': 'vNic_1',
+            'name': 'internal1',
+            'address_groups': {'address_groups': []},
+            'mtu': 1500,
+            'type': 'trunk',
+            'subInterfaces': {'subInterfaces': []},
+            'isConnected': True
+        }
+        return (header, response)
+
+    def reconfigure_dhcp_service(self, edge_id, request):
+        header = {
+            'status': 201
+        }
+        response = ''
+        return (header, response)
+
+    def query_dhcp_configuration(self, edge_id):
+        header = {
+            'status': 200
+        }
+        response = {
+            "featureType": "dhcp_4.0",
+            "version": 14,
+            "enabled": True,
+            "staticBindings": {"staticBindings": [{}]},
+            "ipPools": {"ipPools": []}
+        }
+        return (header, response)
+
+    def create_dhcp_binding(self, edge_id, request):
+        if not self._dhcp_bindings.get(edge_id):
+            self._dhcp_bindings[edge_id] = {}
+            self._dhcp_bindings[edge_id]['idx'] = 0
+        binding_idx = self._dhcp_bindings[edge_id]['idx']
+        binding_idx_str = "binding-" + str(binding_idx)
+        self._dhcp_bindings[edge_id][binding_idx_str] = request
+        self._dhcp_bindings[edge_id]['idx'] = binding_idx + 1
+        header = {
+            'status': 200,
+            'location': '/dhcp/config/bindings/%s' % binding_idx_str
+        }
+        response = ''
+        return (header, response)
+
+    def delete_dhcp_binding(self, edge_id, binding_id):
+        if binding_id not in self._dhcp_bindings[edge_id]:
+            raise Exception(_("binding %s does not exist") % binding_id)
+        del self._dhcp_bindings[edge_id][binding_id]
+        header = {
+            'status': 200
+        }
+        response = ''
+        return (header, response)
+
     def get_nat_config(self, edge_id):
         if edge_id not in self._edges:
             raise Exception(_("Edge %s does not exist") % edge_id)
@@ -242,8 +372,7 @@ class FakeVcns(object):
         # The lswitch is created via VCNS API so the fake nsx_api will not
         # see it. Added to fake nsx_api here.
         if self._fake_nsx_api:
-            lswitch = \
-                self._fake_nsx_api._add_lswitch(jsonutils.dumps(lsconfig))
+            lswitch = self._fake_nsx_api._add_lswitch(json.dumps(lsconfig))
         else:
             lswitch = lsconfig
             lswitch['uuid'] = uuidutils.generate_uuid()
@@ -579,6 +708,40 @@ class FakeVcns(object):
         self._fake_loadbalancer_config[edge_id] = True
         return self.return_helper(header, response)
 
+    def create_virtual_wire(self, vdn_scope_id, request):
+        self._virtual_wire_id += 1
+        header = {'status': 200}
+        virtual_wire = 'virtualwire-%s' % self._virtual_wire_id
+        data = {'name': request['virtualWireCreateSpec']['name'],
+                'objectId': virtual_wire}
+        self._fake_virtual_wires.update({virtual_wire: data})
+        return (header, virtual_wire)
+
+    def delete_virtual_wire(self, virtualwire_id):
+        del self._fake_virtual_wires[virtualwire_id]
+        header = {
+            'status': 200
+        }
+        response = ''
+        return (header, response)
+
+    def create_port_group(self, dvs_id, request):
+        self._portgroup_id += 1
+        header = {'status': 200}
+        portgroup = 'dvportgroup-%s' % self._portgroup_id
+        data = {'name': request['networkSpec']['networkName'],
+                'objectId': portgroup}
+        self._fake_portgroups.update({portgroup: data})
+        return (header, portgroup)
+
+    def delete_port_group(self, dvs_id, portgroup_id):
+        del self._fake_portgroups[portgroup_id]
+        header = {
+            'status': 200
+        }
+        response = ''
+        return (header, response)
+
     def return_helper(self, header, response):
         status = int(header['status'])
         if 200 <= status <= 300:
@@ -590,6 +753,194 @@ class FakeVcns(object):
         raise cls(
             status=status, header=header, uri='fake_url', response=response)
 
+    def _get_bad_req_response(self, details, error_code, module_name):
+        bad_req_response_format = """
+            <error>
+            <details>%(details)s</details>
+            <errorCode>%(error_code)s</errorCode>
+            <moduleName>%(module_name)s</moduleName>
+            </error>
+            """
+        return bad_req_response_format % {
+            'details': details,
+            'error_code': error_code,
+            'module_name': module_name,
+        }
+
+    def _get_section_location(self, type, section_id):
+        return SECTION_LOCATION_HEADER % (type, section_id)
+
+    def _get_section_id_from_uri(self, section_uri):
+        return section_uri.split('/')[-1]
+
+    def _section_not_found(self, section_id):
+        msg = "Invalid section id found : %s" % section_id
+        response = self._get_bad_req_response(msg, 100089, 'vShield App')
+        headers = {'status': 400}
+        return (headers, response)
+
+    def _unknown_error(self):
+        msg = "Unknown Error Occured.Please look into tech support logs."
+        response = self._get_bad_req_response(msg, 100046, 'vShield App')
+        headers = {'status': 400}
+        return (headers, response)
+
+    def create_security_group(self, request):
+        sg = request['securitygroup']
+        if sg['name'] in self._securitygroups['names']:
+            status = 400
+            msg = ("Another object with same name : %s already exists in "
+                   "the current scope : globalroot-0." % sg['name'])
+            response = self._get_bad_req_response(msg, 210, 'core-services')
+        else:
+            sg_id = str(self._securitygroups['ids'])
+            self._securitygroups['ids'] += 1
+            sg['members'] = set()
+            self._securitygroups[sg_id] = sg
+            self._securitygroups['names'].add(sg['name'])
+            status, response = 201, sg_id
+        return ({'status': status}, response)
+
+    def delete_security_group(self, securitygroup_id):
+        try:
+            del self._securitygroups[securitygroup_id]
+        except KeyError:
+            status = 404
+            msg = ("The requested object : %s could "
+                   "not be found. Object identifiers are case sensitive."
+                   % securitygroup_id)
+            response = self._get_bad_req_response(msg, 210, 'core-services')
+        else:
+            status, response = 200, ''
+        return ({'status': status}, response)
+
+    def create_section(self, type, request):
+        section = ET.fromstring(request)
+        section_name = section.attrib.get('name')
+        if section_name in self._sections['names']:
+            msg = "Section with name %s already exists." % section_name
+            response = self._get_bad_req_response(msg, 100092, 'vShield App')
+            headers = {'status': 400}
+        else:
+            section_id = str(self._sections['section_ids'])
+            section.attrib['id'] = section_id
+            _section = self._sections[section_id] = {'name': section_name,
+                                                     'etag': 'Etag-0',
+                                                     'rules': {}}
+            self._sections['names'].add(section_name)
+            for rule in section.findall('rule'):
+                rule_id = str(self._sections['rule_ids'])
+                rule.attrib['id'] = rule_id
+                _section['rules'][rule_id] = ET.tostring(rule)
+                self._sections['rule_ids'] += 1
+            response = ET.tostring(section)
+            headers = {
+                'status': 201,
+                'location': self._get_section_location(type, section_id),
+                'etag': _section['etag']
+            }
+            self._sections['section_ids'] += 1
+        return (headers, response)
+
+    def update_section(self, section_uri, request, h):
+        section = ET.fromstring(request)
+        section_id = section.attrib.get('id')
+        section_name = section.attrib.get('name')
+        if section_id not in self._sections:
+            return self._section_not_found(section_id)
+        _section = self._sections[section_id]
+        if (_section['name'] != section_name and
+            section_name in self._sections['names']):
+                # Theres a section with this name already
+                headers, response = self._unknown_error()
+        else:
+            # Different Etag every successful update
+            _section['etag'] = ('Etag-1' if _section['etag'] == 'Etag-0'
+                                else 'Etag-0')
+            self._sections['names'].remove(_section['name'])
+            _section['name'] = section_name
+            self._sections['names'].add(section_name)
+            _section['rules'] = {}
+            for rule in section.findall('rule'):
+                if not rule.attrib.get('id'):
+                    rule.attrib['id'] = str(self._sections['rule_ids'])
+                    self._sections['rule_ids'] += 1
+                rule_id = rule.attrib.get('id')
+                _section['rules'][rule_id] = ET.tostring(rule)
+            response = ET.tostring(section)
+            headers = {
+                'status': 200,
+                'location': self._get_section_location(type, section_id),
+                'etag': _section['etag']
+            }
+        return (headers, response)
+
+    def delete_section(self, section_uri):
+        section_id = self._get_section_id_from_uri(section_uri)
+        if section_id not in self._sections:
+            headers, response = self._unknown_error()
+        else:
+            section_name = self._sections[section_id]['name']
+            del self._sections[section_id]
+            self._sections['names'].remove(section_name)
+            response = ''
+            headers = {'status': 204}
+        return (headers, response)
+
+    def get_section(self, section_uri):
+        section_id = self._get_section_id_from_uri(section_uri)
+        if section_id not in self._sections:
+            headers, response = self._section_not_found(section_id)
+        else:
+            section_rules = (''.join(self._sections[section_id]['rules'].
+                             values()))
+            response = ('<section id="%s"><rules>%s</rules></section>'
+                        % (section_id, section_rules))
+            headers = {'status': 200,
+                       'etag': self._sections[section_id]['etag']}
+        return (headers, response)
+
+    def remove_rule_from_section(self, section_uri, rule_id):
+        section_id = self._get_section_id_from_uri(section_uri)
+        if section_id not in self._sections:
+            headers, response = self._section_not_found(section_id)
+        else:
+            section = self._sections[section_id]
+            if rule_id in section['rules']:
+                del section['rules'][rule_id]
+                response = ''
+                headers = {'status': 204}
+            else:
+                headers, response = self._unknown_error()
+        return (headers, response)
+
+    def add_member_to_security_group(self, security_group_id, member_id):
+        if security_group_id not in self._securitygroups:
+            msg = ("The requested object : %s could not be found."
+                   "Object identifiers are case "
+                   "sensitive.") % security_group_id
+            response = self._get_bad_req_response(msg, 202, 'core-services')
+            headers = {'status': 404}
+        else:
+            self._securitygroups[security_group_id]['members'].add(member_id)
+            response = ''
+            headers = {'status': 200}
+        return (headers, response)
+
+    def remove_member_from_security_group(self, security_group_id, member_id):
+        if security_group_id not in self._securitygroups:
+            msg = ("The requested object : %s could not be found."
+                   "Object identifiers are "
+                   "case sensitive.") % security_group_id
+            response = self._get_bad_req_response(msg, 202, 'core-services')
+            headers = {'status': 404}
+        else:
+            self._securitygroups[security_group_id]['members'].remove(
+                member_id)
+            response = ''
+            headers = {'status': 200}
+        return (headers, response)
+
     def reset_all(self):
         self._jobs.clear()
         self._edges.clear()
@@ -600,3 +951,22 @@ class FakeVcns(object):
         self._fake_monitors_dict = {}
         self._fake_app_profiles_dict = {}
         self._fake_loadbalancer_config = {}
+        self._fake_virtual_wires = {}
+        self._virtual_wire_id = 0
+        self._fake_portgroups = {}
+        self._portgroup_id = 0
+        self._securitygroups = {'ids': 0, 'names': set()}
+        self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()}
+        self._dhcp_bindings = {}
+
+    def validate_datacenter_moid(self, object_id):
+        return True
+
+    def validate_network(self, object_id):
+        return True
+
+    def validate_vdn_scope(self, object_id):
+        return True
+
+    def validate_dvs(self, object_id):
+        return True
diff --git a/vmware_nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py b/vmware_nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py
index a30c5a7b5e..00e0465a15 100644
--- a/vmware_nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py
+++ b/vmware_nsx/neutron/tests/unit/vmware/vshield/test_vcns_driver.py
@@ -16,11 +16,13 @@
 from eventlet import greenthread
 import mock
 
-from neutron.plugins.vmware.vshield.common import constants as vcns_const
-from neutron.plugins.vmware.vshield.tasks import constants as ts_const
-from neutron.plugins.vmware.vshield.tasks import tasks as ts
-from neutron.plugins.vmware.vshield import vcns_driver
 from neutron.tests import base
+from vmware_nsx.neutron.plugins.vmware.vshield.common import (
+    constants as vcns_const)
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import (
+    constants as ts_const)
+from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks as ts
+from vmware_nsx.neutron.plugins.vmware.vshield import vcns_driver
 from vmware_nsx.neutron.tests.unit import vmware
 from vmware_nsx.neutron.tests.unit.vmware.vshield import fake_vcns
 
@@ -554,13 +556,9 @@ class VcnsDriverTestCase(base.BaseTestCase):
 
     def test_update_interface(self):
         self._deploy_edge()
-        jobdata = {}
-        task = self.vcns_driver.update_interface(
+        self.vcns_driver.update_interface(
             'router-id', self.edge_id, vcns_const.EXTERNAL_VNIC_INDEX,
-            'network-id', address='100.0.0.3', netmask='255.255.255.0',
-            jobdata=jobdata)
-        task.wait(ts_const.TaskState.RESULT)
-        self.assertTrue(jobdata.get('interface_update_result'))
+            'network-id', address='100.0.0.3', netmask='255.255.255.0')
 
     def test_delete_edge(self):
         self._deploy_edge()