diff --git a/README.md b/README.md
index a02030c9..7a25aab9 100644
--- a/README.md
+++ b/README.md
@@ -132,3 +132,22 @@ The following is a full list of current tip repos (may not be up-to-date):
         - {name: neutron,
            repository: 'git://github.com/openstack/neutron',
            branch: master}
+
+# Network Spaces support
+
+This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju.  This is only supported with Juju 2.0 and above.
+
+Open vSwitch endpoints can be configured using the 'data' extra-binding, ensuring that tunnel traffic is routed across the correct host network interfaces:
+
+    juju deploy neutron-openvswitch --bind "data=data-space"
+
+alternatively these can also be provided as part of a juju native bundle configuration:
+
+    neutron-openvswitch:
+      charm: cs:xenial/neutron-openvswitch
+      bindings:
+        data: data-space
+
+NOTE: Spaces must be configured in the underlying provider prior to attempting to use them.
+
+NOTE: Existing deployments using os-data-network configuration options will continue to function; this option is preferred over any network space binding provided if set.
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
index 4efe7993..b9c79000 100644
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ b/hooks/charmhelpers/contrib/network/ip.py
@@ -191,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
 get_netmask_for_address = partial(_get_for_address, key='netmask')
 
 
+def resolve_network_cidr(ip_address):
+    '''
+    Resolves the full address cidr of an ip_address based on
+    configured network interfaces
+    '''
+    netmask = get_netmask_for_address(ip_address)
+    return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
+
+
 def format_ipv6_addr(address):
     """If address is IPv6, wrap it in '[]' otherwise return None.
 
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index d2ede320..d21c9c78 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
         # Charms which can not use openstack-origin, ie. many subordinates
         no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
                      'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
-                     'cinder-backup']
+                     'cinder-backup', 'nexentaedge-data',
+                     'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
+                     'cinder-nexentaedge', 'nexentaedge-mgmt']
 
         if self.openstack:
             for svc in services:
diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py
index 3dca6dc1..532a1dc1 100644
--- a/hooks/charmhelpers/contrib/openstack/ip.py
+++ b/hooks/charmhelpers/contrib/openstack/ip.py
@@ -14,16 +14,19 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 
+
 from charmhelpers.core.hookenv import (
     config,
     unit_get,
     service_name,
+    network_get_primary_address,
 )
 from charmhelpers.contrib.network.ip import (
     get_address_in_network,
     is_address_in_network,
     is_ipv6,
     get_ipv6_addr,
+    resolve_network_cidr,
 )
 from charmhelpers.contrib.hahelpers.cluster import is_clustered
 
@@ -33,16 +36,19 @@ ADMIN = 'admin'
 
 ADDRESS_MAP = {
     PUBLIC: {
+        'binding': 'public',
         'config': 'os-public-network',
         'fallback': 'public-address',
         'override': 'os-public-hostname',
     },
     INTERNAL: {
+        'binding': 'internal',
         'config': 'os-internal-network',
         'fallback': 'private-address',
         'override': 'os-internal-hostname',
     },
     ADMIN: {
+        'binding': 'admin',
         'config': 'os-admin-network',
         'fallback': 'private-address',
         'override': 'os-admin-hostname',
@@ -110,7 +116,7 @@ def resolve_address(endpoint_type=PUBLIC):
     correct network. If clustered with no nets defined, return primary vip.
 
     If not clustered, return unit address ensuring address is on configured net
-    split if one is configured.
+    split if one is configured, or a Juju 2.0 extra-binding has been used.
 
     :param endpoint_type: Network endpoing type
     """
@@ -125,23 +131,45 @@ def resolve_address(endpoint_type=PUBLIC):
     net_type = ADDRESS_MAP[endpoint_type]['config']
     net_addr = config(net_type)
     net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
+    binding = ADDRESS_MAP[endpoint_type]['binding']
     clustered = is_clustered()
-    if clustered:
-        if not net_addr:
-            # If no net-splits defined, we expect a single vip
-            resolved_address = vips[0]
-        else:
+
+    if clustered and vips:
+        if net_addr:
             for vip in vips:
                 if is_address_in_network(net_addr, vip):
                     resolved_address = vip
                     break
+        else:
+            # NOTE: endeavour to check vips against network space
+            #       bindings
+            try:
+                bound_cidr = resolve_network_cidr(
+                    network_get_primary_address(binding)
+                )
+                for vip in vips:
+                    if is_address_in_network(bound_cidr, vip):
+                        resolved_address = vip
+                        break
+            except NotImplementedError:
+                # If no net-splits configured and no support for extra
+                # bindings/network spaces so we expect a single vip
+                resolved_address = vips[0]
     else:
         if config('prefer-ipv6'):
             fallback_addr = get_ipv6_addr(exc_list=vips)[0]
         else:
             fallback_addr = unit_get(net_fallback)
 
-        resolved_address = get_address_in_network(net_addr, fallback_addr)
+        if net_addr:
+            resolved_address = get_address_in_network(net_addr, fallback_addr)
+        else:
+            # NOTE: only try to use extra bindings if legacy network
+            #       configuration is not in use
+            try:
+                resolved_address = network_get_primary_address(binding)
+            except NotImplementedError:
+                resolved_address = fallback_addr
 
     if resolved_address is None:
         raise ValueError("Unable to resolve a suitable IP address based on "
diff --git a/hooks/neutron_ovs_context.py b/hooks/neutron_ovs_context.py
index f2b0a7cc..ea332f3a 100644
--- a/hooks/neutron_ovs_context.py
+++ b/hooks/neutron_ovs_context.py
@@ -6,6 +6,7 @@ from charmhelpers.core.hookenv import (
     relation_ids,
     related_units,
     unit_get,
+    network_get_primary_address,
 )
 from charmhelpers.contrib.openstack.ip import resolve_address
 from charmhelpers.contrib.openstack import context
@@ -44,9 +45,22 @@ class OVSPluginContext(context.NeutronContext):
             return {}
 
         conf = config()
-        ovs_ctxt['local_ip'] = \
-            get_address_in_network(config('os-data-network'),
-                                   get_host_ip(unit_get('private-address')))
+
+        fallback = get_host_ip(unit_get('private-address'))
+        if config('os-data-network'):
+            # NOTE: prefer any existing use of config based networking
+            ovs_ctxt['local_ip'] = \
+                get_address_in_network(config('os-data-network'),
+                                       fallback)
+        else:
+            # NOTE: test out network-spaces support, then fallback
+            try:
+                ovs_ctxt['local_ip'] = get_host_ip(
+                    network_get_primary_address('data')
+                )
+            except NotImplementedError:
+                ovs_ctxt['local_ip'] = fallback
+
         neutron_api_settings = NeutronAPIContext()()
         ovs_ctxt['neutron_security_groups'] = self.neutron_security_groups
         ovs_ctxt['l2_population'] = neutron_api_settings['l2_population']
diff --git a/metadata.yaml b/metadata.yaml
index ee462059..ded127ca 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -16,6 +16,8 @@ description: |
   L2 connectivity on nova-compute services.
 tags:
   - openstack
+extra-bindings:
+  data:
 provides:
   neutron-plugin:
     interface: neutron-plugin
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
index d2ede320..d21c9c78 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -126,7 +126,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
         # Charms which can not use openstack-origin, ie. many subordinates
         no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
                      'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
-                     'cinder-backup']
+                     'cinder-backup', 'nexentaedge-data',
+                     'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
+                     'cinder-nexentaedge', 'nexentaedge-mgmt']
 
         if self.openstack:
             for svc in services:
diff --git a/unit_tests/test_neutron_ovs_context.py b/unit_tests/test_neutron_ovs_context.py
index 43755fe0..de81ef9e 100644
--- a/unit_tests/test_neutron_ovs_context.py
+++ b/unit_tests/test_neutron_ovs_context.py
@@ -10,6 +10,7 @@ TO_PATCH = [
     'config',
     'unit_get',
     'get_host_ip',
+    'network_get_primary_address',
 ]
 
 
@@ -29,6 +30,7 @@ class OVSPluginContextTest(CharmTestCase):
         self.test_config.set('debug', True)
         self.test_config.set('verbose', True)
         self.test_config.set('use-syslog', True)
+        self.network_get_primary_address.side_effect = NotImplementedError
 
     def tearDown(self):
         super(OVSPluginContextTest, self).tearDown()