eliminate portbinding by ODL networking topology

Since portbinding logic based networking topology has design flaw
the logic based on pseudo agent based is being introduced.
- https://review.openstack.org/#/c/308031/
Which is agnostic underlying switch technology and fits well with
the design philosophy of networking-odl.
Once it's merged and ready for use, the code of portbinding based on
networking topology will be eliminated

Change-Id: I585f83bffa4dfa4369a809dd6bc49cc396e4dd99
Co-Authored-By: Rajiv Kumar <rajiv.kumar@nectechnologies.in>
This commit is contained in:
Isaku Yamahata
2016-06-26 12:29:36 -07:00
parent ec12533673
commit d884c99cdf
14 changed files with 13 additions and 2241 deletions

View File

@@ -47,13 +47,14 @@ if [[ -z "$ODL_GATE_PORT_BINDING" ]]; then
case "$ODL_RELEASE_BASE" in
beryllium-snapshot)
# pseudo-agentdb-binding is supported from boron
ODL_GATE_PORT_BINDING=network-topology
ODL_GATE_PORT_BINDING=legacy-port-binding
;;
*)
ODL_GATE_PORT_BINDING=pseudo-agentdb-binding
;;
esac
fi
case "$ODL_GATE_PORT_BINDING" in
pseudo-agentdb-binding)
ODL_PORT_BINDING_CONTROLLER=pseudo-agentdb-binding
@@ -61,9 +62,6 @@ case "$ODL_GATE_PORT_BINDING" in
legacy-port-binding)
ODL_PORT_BINDING_CONTROLLER=legacy-port-binding
;;
network-topology)
ODL_PORT_BINDING_CONTROLLER=network-topology
;;
*)
echo "Unknown port binding controller: $ODL_GATE_PORT_BINDING"
exit 1

View File

@@ -1,195 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import six
import sys
import time
from oslo_log import log
LOG = log.getLogger(__name__)
class CacheEntry(collections.namedtuple('CacheEntry', ['timeout', 'values'])):
error = None
@classmethod
def create(cls, timeout, *values):
return CacheEntry(timeout, list(values))
def add_value(self, value):
self.values.append(value)
def is_expired(self, current_clock):
return self.timeout <= current_clock
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def __ne__(self, other):
return not self.__eq__(other)
class Cache(object):
'''Generic mapping class used to cache mapping
Example of uses:
- host name to IP addresses mapping
- IP addresses to ODL networking topology elements mapping
'''
# TODO(Federico Ressi) after Mitaka: this class should store cached data
# in a place shared between more hosts using a caching mechanism coherent
# with other OpenStack libraries. This is specially interesting in the
# context of reliability when there are more Neutron instances and direct
# connection to ODL is broken.
create_new_entry = CacheEntry.create
def __init__(self, fetch_all_func):
if not callable(fetch_all_func):
message = 'Expected callable as parameter, got {!r}.'.format(
fetch_all_func)
raise TypeError(message)
self._fetch_all = fetch_all_func
self.clear()
def clear(self):
self._entries = collections.OrderedDict()
def fetch(self, key, timeout):
__, value = self.fetch_any([key], timeout=timeout)
return value
def fetch_any(self, keys, timeout):
return next(self.fetch_all(keys=keys, timeout=timeout))
def fetch_all(self, keys, timeout):
# this mean now in numbers
current_clock = time.clock()
# this is the moment in the future in which new entries will expires
new_entries_timeout = current_clock + timeout
# entries to be fetched because missing or expired
new_entries = collections.OrderedDict()
# all entries missing or expired
missing = collections.OrderedDict()
# captured error for the case a problem has to be reported
cause_exc_info = None
for key in keys:
entry = self._entries.get(key)
if entry is None or entry.is_expired(current_clock) or entry.error:
# this entry has to be fetched
new_entries[key] = missing[key] =\
self.create_new_entry(new_entries_timeout)
elif entry.values:
# Yield existing entry
for value in entry.values:
yield key, value
else:
# This entry is not expired and there were no error where it
# has been fetch. Therefore we accept that there are no values
# for given key until it expires. This is going to produce a
# KeyError if it is still missing at the end of this function.
missing[key] = entry
if missing:
if new_entries:
# Fetch some entries and update the cache
try:
new_entry_keys = tuple(new_entries)
for key, value in self._fetch_all(new_entry_keys):
entry = new_entries.get(key)
if entry:
# Add fresh new value
entry.add_value(value)
else:
# This key was not asked, but we take it in any
# way. "Noli equi dentes inspicere donati."
new_entries[key] = entry = self.create_new_entry(
new_entries_timeout, value)
# pylint: disable=broad-except
except Exception:
# Something has gone wrong: update and yield what got until
# now before raising any error
cause_exc_info = sys.exc_info()
LOG.warning(
'Error fetching values for keys: %r',
', '.join(repr(k) for k in new_entry_keys),
exc_info=cause_exc_info)
# update the cache with new fresh entries
self._entries.update(new_entries)
missing_keys = []
for key, entry in missing.items():
if entry.values:
# yield entries that was missing before
for value in entry.values:
# Yield just fetched entry
yield key, value
else:
if cause_exc_info:
# mark this entry as failed
entry.error = cause_exc_info
# after all this entry is still without any value
missing_keys.append(key)
if missing_keys:
# After all some entry is still missing, probably because the
# key was invalid. It's time to raise an error.
missing_keys = tuple(missing_keys)
if not cause_exc_info:
# Search for the error cause in missing entries
for key in missing_keys:
error = self._entries[key].error
if error:
# A cached entry for which fetch method produced an
# error will produce the same error if fetch method
# fails to fetch it again without giving any error
# Is this what we want?
break
else:
# If the cause of the problem is not knwow then
# probably keys were wrong
message = 'Invalid keys: {!r}'.format(
', '.join(missing_keys))
error = KeyError(message)
try:
raise error
except KeyError:
cause_exc_info = sys.exc_info()
raise CacheFetchError(
missing_keys=missing_keys, cause_exc_info=cause_exc_info)
class CacheFetchError(KeyError):
def __init__(self, missing_keys, cause_exc_info):
super(CacheFetchError, self).__init__(str(cause_exc_info[1]))
self.cause_exc_info = cause_exc_info
self.missing_keys = missing_keys
def reraise_cause(self):
six.reraise(*self.cause_exc_info)

View File

@@ -13,12 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_config import cfg
import six.moves.urllib.parse as urlparse
import socket
from networking_odl.common import cache
from networking_odl.common import constants as odl_const
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
@@ -33,34 +30,6 @@ def try_del(d, keys):
pass
def _fetch_all_addresses_by_hostnames(hostnames):
for name in hostnames:
# it uses an ordered dict to avoid duplicates and keep order
entries = collections.OrderedDict(
(info[4][0], None) for info in socket.getaddrinfo(name, None))
for entry in entries:
yield name, entry
_addresses_by_name_cache = cache.Cache(_fetch_all_addresses_by_hostnames)
def get_addresses_by_name(name, time_to_live=60.0):
"""Gets and caches addresses for given name.
This is a cached wrapper for function 'socket.getaddrinfo'.
:returns: a sequence of unique addresses bound to given hostname.
"""
try:
results = _addresses_by_name_cache.fetch_all(
[name], timeout=time_to_live)
return tuple(address for name, address in results)
except cache.CacheFetchError as error:
error.reraise_cause()
def make_url_object(object_type):
obj_pl = odl_const.RESOURCE_URL_MAPPINGS.get(object_type, None)
if obj_pl is None:

View File

@@ -1,315 +0,0 @@
# Copyright (c) 2015-2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import importlib
import logging
import six
from six.moves.urllib import parse
from neutron_lib.api.definitions import portbindings
from oslo_log import log
from oslo_serialization import jsonutils
from networking_odl._i18n import _
from networking_odl.common import cache
from networking_odl.common import client
from networking_odl.common import utils
from networking_odl.ml2 import port_binding
LOG = log.getLogger(__name__)
class NetworkTopologyManager(port_binding.PortBindingController):
# the first valid vif type will be chosed following the order
# on this list. This list can be modified to adapt to user preferences.
valid_vif_types = [
portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS]
# List of class names of registered implementations of interface
# NetworkTopologyParser
network_topology_parsers = [
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyParser']
def __init__(self, vif_details=None, client=None):
# Details for binding port
self._vif_details = vif_details or {portbindings.CAP_PORT_FILTER: True}
# Rest client used for getting network topology from ODL
self._client = client or NetworkTopologyClient.create_client()
# Table of NetworkTopologyElement
self._elements_by_ip = cache.Cache(
self._fetch_and_parse_network_topology)
# Parsers used for processing network topology
self._parsers = list(self._create_parsers())
LOG.warning(
"networking-topology port binding controller is deprecated "
"and will be removed. switch to pseudo-agentdb-binding.")
def bind_port(self, port_context):
"""Set binding for a valid segment
"""
host_name = port_context.host
elements = list()
try:
# Append to empty list to add as much elements as possible
# in the case it raises an exception
elements.extend(self._fetch_elements_by_host(host_name))
except Exception:
LOG.exception(
'Error fetching elements for host %(host_name)r.',
{'host_name': host_name}, exc_info=1)
if not elements:
# In case it wasn't able to find any network topology element
# for given host then it uses the legacy OVS one keeping the old
# behaviour
LOG.warning(
'Using legacy OVS network topology element for port '
'binding for host: %(host_name)r.',
{'host_name': host_name})
# Imported here to avoid cyclic module dependencies
from networking_odl.ml2 import ovsdb_topology
elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]
# TODO(Federico Ressi): in the case there are more candidate virtual
# switches instances for the same host it chooses one for binding
# port. As there isn't any know way to perform this selection it
# selects a VIF type that is valid for all switches that have
# been found and a VIF type valid for all them. This has to be improved
for vif_type in self.valid_vif_types:
vif_type_is_valid_for_all = True
for element in elements:
if vif_type not in element.valid_vif_types:
# it is invalid for at least one element: discard it
vif_type_is_valid_for_all = False
break
if vif_type_is_valid_for_all:
# This is the best VIF type valid for all elements
LOG.debug(
"Found VIF type %(vif_type)r valid for all network "
"topology elements for host %(host_name)r.",
{'vif_type': vif_type, 'host_name': host_name})
for element in elements:
# It assumes that any element could be good for given host
# In most of the cases I expect exactely one element for
# every compute host
try:
return element.bind_port(
port_context, vif_type, self._vif_details)
except Exception:
LOG.exception(
'Network topology element has failed binding '
'port:\n%(element)s',
{'element': element.to_json()})
LOG.error(
'Unable to bind port element for given host and valid VIF types:\n'
'\thostname: %(host_name)s\n'
'\tvalid VIF types: %(valid_vif_types)s',
{'host_name': host_name,
'valid_vif_types': ', '.join(self.valid_vif_types)})
# TDOO(Federico Ressi): should I raise an exception here?
def _create_parsers(self):
for parser_name in self.network_topology_parsers:
try:
yield NetworkTopologyParser.create_parser(parser_name)
except Exception:
LOG.exception(
'Error initializing topology parser: %(parser_name)r',
{'parser_name': parser_name})
def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
'''Yields all network topology elements referring to given host name
'''
host_addresses = [host_name]
try:
# It uses both compute host name and known IP addresses to
# recognize topology elements valid for given computed host
ip_addresses = utils.get_addresses_by_name(host_name)
except Exception:
ip_addresses = []
LOG.exception(
'Unable to resolve IP addresses for host %(host_name)r',
{'host_name': host_name})
else:
host_addresses.extend(ip_addresses)
yield_elements = set()
try:
for __, element in self._elements_by_ip.fetch_all(
host_addresses, cache_timeout):
# yields every element only once
if element not in yield_elements:
yield_elements.add(element)
yield element
except cache.CacheFetchError as error:
# This error is expected on most of the cases because typically not
# all host_addresses maps to a network topology element.
if yield_elements:
# As we need only one element for every host we ignore the
# case in which others host addresseses didn't map to any host
LOG.debug(
'Host addresses not found in networking topology: %s',
', '.join(error.missing_keys))
else:
LOG.exception(
'No such network topology elements for given host '
'%(host_name)r and given IPs: %(ip_addresses)s.',
{'host_name': host_name,
'ip_addresses': ", ".join(ip_addresses)})
error.reraise_cause()
def _fetch_and_parse_network_topology(self, addresses):
# The cache calls this method to fecth new elements when at least one
# of the addresses is not in the cache or it has expired.
# pylint: disable=unused-argument
LOG.info('Fetch network topology from ODL.')
response = self._client.get()
response.raise_for_status()
network_topology = response.json()
if LOG.isEnabledFor(logging.DEBUG):
topology_str = jsonutils.dumps(
network_topology, sort_keys=True, indent=4,
separators=(',', ': '))
LOG.debug("Got network topology:\n%s", topology_str)
at_least_one_element_for_asked_addresses = False
for parser in self._parsers:
try:
for element in parser.parse_network_topology(network_topology):
if not isinstance(element, NetworkTopologyElement):
raise TypeError(_(
"Yield element doesn't implement interface "
"'NetworkTopologyElement': {!r}").format(element))
# the same element can be known by more host addresses
for host_address in element.host_addresses:
if host_address in addresses:
at_least_one_element_for_asked_addresses = True
yield host_address, element
except Exception:
LOG.exception(
"Parser %(parser)r failed to parse network topology.",
{'parser': parser})
if not at_least_one_element_for_asked_addresses:
# this will mark entries for given addresses as failed to allow
# calling this method again as soon it is requested and avoid
# waiting for cache expiration
raise ValueError(
_('No such topology element for given host addresses: {}')
.format(', '.join(addresses)))
@six.add_metaclass(abc.ABCMeta)
class NetworkTopologyParser(object):
@classmethod
def create_parser(cls, parser_class_name):
'''Creates a 'NetworkTopologyParser' of given class name.
'''
module_name, class_name = parser_class_name.rsplit('.', 1)
module = importlib.import_module(module_name)
clss = getattr(module, class_name)
if not issubclass(clss, cls):
raise TypeError(_(
"Class {class_name!r} of module {module_name!r} doesn't "
"implement 'NetworkTopologyParser' interface.").format(
class_name=class_name, module_name=module_name))
return clss()
@abc.abstractmethod
def parse_network_topology(self, network_topology):
'''Parses OpenDaylight network topology
Yields all network topology elements implementing
'NetworkTopologyElement' interface found in given network topology.
'''
@six.add_metaclass(abc.ABCMeta)
class NetworkTopologyElement(object):
@abc.abstractproperty
def host_addresses(self):
'''List of known host addresses of a single compute host
Either host names and ip addresses are valid.
Neutron host controller must know at least one of these compute host
names or ip addresses to find this element.
'''
@abc.abstractproperty
def valid_vif_types(self):
'''Returns a tuple listing VIF types supported by the compute node
'''
@abc.abstractmethod
def bind_port(self, port_context, vif_type, vif_details):
'''Bind port context using given vif type and vif details
This method is expected to search for a valid segment and then
call port_context.set_binding()
'''
def to_dict(self):
cls = type(self)
return {
'class': cls.__module__ + '.' + cls.__name__,
'host_addresses': list(self.host_addresses),
'valid_vif_types': list(self.valid_vif_types)}
def to_json(self):
return jsonutils.dumps(
self.to_dict(), sort_keys=True, indent=4, separators=(',', ': '))
class NetworkTopologyClient(client.OpenDaylightRestClient):
_GET_ODL_NETWORK_TOPOLOGY_URL =\
'restconf/operational/network-topology:network-topology'
def __init__(self, url, username, password, timeout):
if url:
url = parse.urlparse(url)
port = ''
if url.port:
port = ':' + str(url.port)
topology_url = '{}://{}{}/{}'.format(
url.scheme, url.hostname, port,
self._GET_ODL_NETWORK_TOPOLOGY_URL)
else:
topology_url = None
super(NetworkTopologyClient, self).__init__(
topology_url, username, password, timeout)

View File

@@ -1,217 +0,0 @@
# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import os
from oslo_log import log
from six.moves.urllib import parse
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
from neutron_lib.plugins.ml2 import api
from networking_odl._i18n import _
from networking_odl.ml2 import network_topology
LOG = log.getLogger(__name__)
class OvsdbNetworkTopologyParser(network_topology.NetworkTopologyParser):
def new_element(self, uuid):
return OvsdbNetworkTopologyElement(uuid=uuid)
def parse_network_topology(self, network_topologies):
elements_by_uuid = collections.OrderedDict()
for topology in network_topologies[
'network-topology']['topology']:
if topology['topology-id'].startswith('ovsdb:'):
for node in topology['node']:
# expected url format: ovsdb://uuid/<uuid>[/<path>]]
node_url = parse.urlparse(node['node-id'])
if node_url.scheme == 'ovsdb'\
and node_url.netloc == 'uuid':
# split_res = ['', '<uuid>', '<path>']
split_res = node_url.path.split('/', 2)
# uuid is used to identify nodes referring to the same
# element
uuid = split_res[1]
element = elements_by_uuid.get(uuid)
if element is None:
elements_by_uuid[uuid] = element =\
self.new_element(uuid)
# inner_path can be [] or [<path>]
inner_path = split_res[2:]
self._update_element_from_json_ovsdb_topology_node(
node, element, uuid, *inner_path)
# There can be more OVS instances connected beside the same IP address
# Cache will yield more instaces for the same key
for __, element in elements_by_uuid.items():
yield element
def _update_element_from_json_ovsdb_topology_node(
self, node, element, uuid, path=None):
if not path:
# global element section (root path)
# fetch remote IP address
element.remote_ip = node["ovsdb:connection-info"]["remote-ip"]
for vif_type_entry in node.get(
"ovsdb:interface-type-entry", []):
# Is this a good place to add others OVS VIF types?
if vif_type_entry.get("interface-type") ==\
"ovsdb:interface-type-dpdkvhostuser":
element.support_vhost_user = True
break
else:
LOG.debug(
'Interface type not found in network topology node %r.',
uuid)
LOG.debug(
'Topology element updated:\n'
' - uuid: %(uuid)r\n'
' - remote_ip: %(remote_ip)r\n'
' - support_vhost_user: %(support_vhost_user)r',
{'uuid': uuid,
'remote_ip': element.remote_ip,
'support_vhost_user': element.support_vhost_user})
elif path == 'bridge/br-int':
datapath_type = node.get("ovsdb:datapath-type")
if datapath_type == "ovsdb:datapath-type-netdev":
element.has_datapath_type_netdev = True
LOG.debug(
'Topology element updated:\n'
' - uuid: %(uuid)r\n'
' - has_datapath_type_netdev: %('
'has_datapath_type_netdev)r',
{'uuid': uuid,
'has_datapath_type_netdev':
element.has_datapath_type_netdev})
class OvsdbNetworkTopologyElement(network_topology.NetworkTopologyElement):
uuid = None
remote_ip = None # it can be None or a string
has_datapath_type_netdev = False # it can be False or True
support_vhost_user = False # it can be False or True
# location for vhostuser sockets
vhostuser_socket_dir = '/var/run/openvswitch'
# prefix for ovs port
port_prefix = 'vhu'
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
@property
def host_addresses(self):
# For now it support only the remote IP found in connection info
return self.remote_ip,
@property
def valid_vif_types(self):
if self.has_datapath_type_netdev and self.support_vhost_user:
return [
portbindings.VIF_TYPE_VHOST_USER,
portbindings.VIF_TYPE_OVS]
else:
return [portbindings.VIF_TYPE_OVS]
def bind_port(self, port_context, vif_type, vif_details):
port_context_id = port_context.current['id']
network_context_id = port_context.network.current['id']
# Bind port to the first valid segment
for segment in port_context.segments_to_bind:
if self._is_valid_segment(segment):
# Guest best VIF type for given host
vif_details = self._get_vif_details(
vif_details=vif_details, port_context_id=port_context_id,
vif_type=vif_type)
LOG.debug(
'Bind port with valid segment:\n'
'\tport: %(port)r\n'
'\tnetwork: %(network)r\n'
'\tsegment: %(segment)r\n'
'\tVIF type: %(vif_type)r\n'
'\tVIF details: %(vif_details)r',
{'port': port_context_id,
'network': network_context_id,
'segment': segment, 'vif_type': vif_type,
'vif_details': vif_details})
port_context.set_binding(
segment[api.ID], vif_type, vif_details,
status=n_const.PORT_STATUS_ACTIVE)
return
raise ValueError(
_('Unable to find any valid segment in given context.'))
def to_dict(self):
data = super(OvsdbNetworkTopologyElement, self).to_dict()
data.update(
{'uuid': self.uuid,
'has_datapath_type_netdev': self.has_datapath_type_netdev,
'support_vhost_user': self.support_vhost_user,
'valid_vif_types': self.valid_vif_types})
if portbindings.VIF_TYPE_VHOST_USER in self.valid_vif_types:
data.update({'port_prefix': self.port_prefix,
'vhostuser_socket_dir': self.vhostuser_socket_dir})
return data
def _is_valid_segment(self, segment):
"""Verify a segment is valid for the OpenDaylight MechanismDriver.
Verify the requested segment is supported by ODL and return True or
False to indicate this to callers.
"""
network_type = segment[api.NETWORK_TYPE]
return network_type in [n_const.TYPE_FLAT, n_const.TYPE_LOCAL,
n_const.TYPE_GRE, n_const.TYPE_VXLAN,
n_const.TYPE_VLAN]
def _get_vif_details(self, vif_details, port_context_id, vif_type):
vif_details = dict(vif_details)
if vif_type == portbindings.VIF_TYPE_VHOST_USER:
socket_path = os.path.join(
self.vhostuser_socket_dir,
(self.port_prefix + port_context_id)[:14])
vif_details.update({
portbindings.VHOST_USER_MODE:
portbindings.VHOST_USER_MODE_CLIENT,
portbindings.VHOST_USER_OVS_PLUG: True,
portbindings.VHOST_USER_SOCKET: socket_path
})
return vif_details
def __setattr__(self, name, value):
# raises Attribute error if the class hasn't this attribute
getattr(type(self), name)
super(OvsdbNetworkTopologyElement, self).__setattr__(name, value)

View File

@@ -1,242 +0,0 @@
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests import base
from networking_odl.common import cache
class TestCache(base.DietTestCase):
def test_init_with_callable(self):
def given_fetch_method():
pass
cache.Cache(given_fetch_method)
def test_init_without_callable(self):
self.assertRaises(TypeError, lambda: cache.Cache(object()))
def test_fecth_once(self):
value = 'value'
given_fetch_method = mock.Mock(return_value=iter([('key', value)]))
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
result = given_cache.fetch('key', 60.0)
# Result is returned
self.assertIs(value, result)
# Then fetch method is called once
given_fetch_method.assert_called_once_with(('key',))
def test_fecth_with_no_result(self):
given_fetch_method = mock.Mock(return_value=iter([]))
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
try:
given_cache.fetch('key', 60.0)
except cache.CacheFetchError as error:
given_fetch_method.assert_called_once_with(('key',))
self.assertRaises(KeyError, error.reraise_cause)
else:
self.fail('Expecting CacheFetchError to be raised.')
@mock.patch.object(cache, 'LOG')
def test_fecth_with_failure(self, logger):
# pylint: disable=unused-argument
given_error = RuntimeError("It doesn't work like this!")
def failing_function(keys):
raise given_error
given_fetch_method = mock.Mock(side_effect=failing_function)
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
try:
given_cache.fetch('key', 60.0)
except cache.CacheFetchError as error:
given_fetch_method.assert_called_once_with(('key',))
self.assertRaises(RuntimeError, error.reraise_cause)
else:
self.fail('Expecting CacheFetchError to be raised.')
logger.warning.assert_called_once_with(
'Error fetching values for keys: %r', "'key'",
exc_info=(type(given_error), given_error, mock.ANY))
def test_fecth_again_after_clear(self):
value1 = 'value1'
value2 = 'value2'
given_fetch_method = mock.Mock(
side_effect=[iter([('key', value1)]),
iter([('key', value2)])])
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
result1 = given_cache.fetch('key', 60.0)
# When cache is cleared
given_cache.clear()
# When value with same key is fetched again
result2 = given_cache.fetch('key', 0.0)
# Then first result is returned
self.assertIs(value1, result1)
# Then fetch method is called twice
self.assertEqual(
[mock.call(('key',)), mock.call(('key',))],
given_fetch_method.mock_calls)
# Then second result is returned
self.assertIs(value2, result2)
def test_fecth_again_before_timeout(self):
value1 = 'value1'
value2 = 'value2'
given_fetch_method = mock.Mock(
side_effect=[iter([('key', value1)]),
iter([('key', value2)])])
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
result1 = given_cache.fetch('key', 1.0)
# When value with same key is fetched again and cached entry is not
# expired
result2 = given_cache.fetch('key', 0.0)
# First result is returned
self.assertIs(value1, result1)
# Then fetch method is called once
given_fetch_method.assert_called_once_with(('key',))
# Then first result is returned twice
self.assertIs(value1, result2)
def test_fecth_again_after_timeout(self):
value1 = 'value1'
value2 = 'value2'
given_fetch_method = mock.Mock(
side_effect=[iter([('key', value1)]),
iter([('key', value2)])])
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
result1 = given_cache.fetch('key', 0.0)
# When value with same key is fetched again and cached entry is
# expired
result2 = given_cache.fetch('key', 0.0)
# Then first result is returned
self.assertIs(value1, result1)
# Then fetch method is called twice
self.assertEqual(
[mock.call(('key',)), mock.call(('key',))],
given_fetch_method.mock_calls)
# Then second result is returned
self.assertIs(value2, result2)
def test_fecth_two_values_yielding_both_before_timeout(self):
value1 = 'value1'
value2 = 'value2'
given_fetch_method = mock.Mock(
return_value=iter([('key1', value1),
('key2', value2)]))
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
result1 = given_cache.fetch('key1', 60.0)
# When value with another key is fetched and cached entry is not
# expired
result2 = given_cache.fetch('key2', 60.0)
# Then first result is returned
self.assertIs(value1, result1)
# Then fetch method is called once
given_fetch_method.assert_called_once_with(('key1',))
# Then second result is returned
self.assertIs(value2, result2)
def test_fecth_two_values_yielding_both_after_timeout(self):
value1 = 'value1'
value2 = 'value2'
given_fetch_method = mock.Mock(
return_value=[('key1', value1), ('key2', value2)])
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
result1 = given_cache.fetch('key1', 0.0)
# When value with another key is fetched and cached entry is
# expired
result2 = given_cache.fetch('key2', 0.0)
# Then first result is returned
self.assertIs(value1, result1)
# Then fetch method is called twice
self.assertEqual(
[mock.call(('key1',)), mock.call(('key2',))],
given_fetch_method.mock_calls)
# Then second result is returned
self.assertIs(value2, result2)
def test_fecth_all_with_multiple_entries(self):
given_fetch_method = mock.Mock(
return_value=iter([('key', 'value1'),
('key', 'value2')]))
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
results = list(given_cache.fetch_all(['key'], 0.0))
# Then fetch method is once
given_fetch_method.assert_called_once_with(('key',))
# Then both results are yield in the right order
self.assertEqual([('key', 'value1'), ('key', 'value2')], results)
def test_fecth_all_with_repeated_entries(self):
entry = ('key', 'value')
given_fetch_method = mock.Mock(
return_value=iter([entry, entry, entry]))
given_cache = cache.Cache(given_fetch_method)
# When value with key is fetched
results = list(given_cache.fetch_all(['key'], 0.0))
# Then fetch method is once
given_fetch_method.assert_called_once_with(('key',))
# Then results are yield in the right order
self.assertEqual([entry, entry, entry], results)

View File

@@ -13,11 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests import base
from networking_odl.common import cache
from networking_odl.common import utils
@@ -38,138 +35,3 @@ class TestUtils(base.DietTestCase):
def test_make_url_object_conversion(self):
self.assertEqual('networks', utils.make_url_object('network'))
self.assertEqual('l2-gateways', utils.make_url_object('l2_gateway'))
class TestGetAddressesByName(base.DietTestCase):
# pylint: disable=protected-access, unused-argument
def setUp(self):
super(TestGetAddressesByName, self).setUp()
self.clear_cache()
self.addCleanup(self.clear_cache)
time = self.patch(
utils.cache, 'time', clock=mock.Mock(return_value=0.0))
self.clock = time.clock
socket = self.patch(utils, 'socket')
self.getaddrinfo = socket.getaddrinfo
def patch(self, target, name, *args, **kwargs):
context = mock.patch.object(target, name, *args, **kwargs)
mocked = context.start()
self.addCleanup(context.stop)
return mocked
def clear_cache(self):
utils._addresses_by_name_cache.clear()
def test_get_addresses_by_valid_name(self):
self.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 0)),
(2, 2, 17, '', ('127.0.0.1', 0)),
(2, 3, 0, '', ('127.0.0.1', 0)),
(2, 1, 6, '', ('10.237.214.247', 0)),
(2, 2, 17, '', ('10.237.214.247', 0)),
(2, 3, 0, '', ('10.237.214.247', 0))]
# When valid host name is requested
result = utils.get_addresses_by_name('some_host_name')
# Then correct addresses are returned
self.assertEqual(('127.0.0.1', '10.237.214.247'), result)
# Then fetched addresses are cached
self.assertEqual(result, utils.get_addresses_by_name('some_host_name'))
# Then addresses are fetched only once
self.getaddrinfo.assert_called_once_with('some_host_name', None)
def test_get_addresses_by_valid_name_when_cache_expires(self):
self.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 0)),
(2, 2, 17, '', ('127.0.0.1', 0)),
(2, 3, 0, '', ('127.0.0.1', 0)),
(2, 1, 6, '', ('10.237.214.247', 0)),
(2, 2, 17, '', ('10.237.214.247', 0)),
(2, 3, 0, '', ('10.237.214.247', 0))]
# When valid host name is requested
result1 = utils.get_addresses_by_name('some_host_name')
# and after a long time
self.clock.return_value = 1.0e6
# When valid host name is requested
result2 = utils.get_addresses_by_name('some_host_name')
# Then correct addresses are returned
self.assertEqual(('127.0.0.1', '10.237.214.247'), result1)
self.assertEqual(('127.0.0.1', '10.237.214.247'), result2)
# Then addresses are fetched twice
self.getaddrinfo.assert_has_calls(
[mock.call('some_host_name', None),
mock.call('some_host_name', None)])
@mock.patch.object(cache, 'LOG')
def test_get_addresses_by_invalid_name(self, cache_logger):
# Given addresses resolution is failing
given_error = RuntimeError("I don't know him!")
def failing_getaddrinfo(name, service):
raise given_error
self.getaddrinfo.side_effect = failing_getaddrinfo
# When invalid name is requested
self.assertRaises(
RuntimeError, utils.get_addresses_by_name, 'some_host_name')
# When invalid name is requested again
self.assertRaises(
RuntimeError, utils.get_addresses_by_name, 'some_host_name')
# Then result is fetched only once
self.getaddrinfo.assert_has_calls(
[mock.call('some_host_name', None)])
cache_logger.warning.assert_has_calls(
[mock.call(
'Error fetching values for keys: %r', "'some_host_name'",
exc_info=(RuntimeError, given_error, mock.ANY)),
mock.call(
'Error fetching values for keys: %r', "'some_host_name'",
exc_info=(RuntimeError, given_error, mock.ANY))])
@mock.patch.object(cache, 'LOG')
def test_get_addresses_failing_when_expired_in_cache(self, cache_logger):
self.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 0)),
(2, 2, 17, '', ('127.0.0.1', 0)),
(2, 3, 0, '', ('127.0.0.1', 0)),
(2, 1, 6, '', ('10.237.214.247', 0)),
(2, 2, 17, '', ('10.237.214.247', 0)),
(2, 3, 0, '', ('10.237.214.247', 0))]
# Given valid result is in chache but expired
utils.get_addresses_by_name('some_host_name')
self.clock.return_value = 1.0e6
# Given addresses resolution is now failing
given_error = RuntimeError("This is top secret.")
def failing_getaddrinfo(name, service):
raise given_error
self.getaddrinfo.side_effect = failing_getaddrinfo
self.assertRaises(
RuntimeError, utils.get_addresses_by_name, 'some_host_name')
# Then result is fetched more times
self.getaddrinfo.assert_has_calls(
[mock.call('some_host_name', None),
mock.call('some_host_name', None)])
cache_logger.warning.assert_called_once_with(
'Error fetching values for keys: %r', "'some_host_name'",
exc_info=(RuntimeError, given_error, mock.ANY))

View File

@@ -1,171 +0,0 @@
{
"network-topology": {
"topology": [
{
"topology-id": "flow:1"
},
{
"node": [
{
"node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-ex",
"ovsdb:bridge-external-ids": [
{
"bridge-external-id-key": "bridge-id",
"bridge-external-id-value": "br-ex"
}
],
"ovsdb:bridge-name": "br-ex",
"ovsdb:bridge-other-configs": [
{
"bridge-other-config-key": "disable-in-band",
"bridge-other-config-value": "true"
}
],
"ovsdb:bridge-uuid": "4ba78705-3ac2-4e36-a2e1-32f1647d97a7",
"ovsdb:datapath-id": "00:00:06:87:a7:4b:36:4e",
"ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
"ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2']",
"termination-point": [
{
"ovsdb:interface-external-ids": [
{
"external-id-key": "iface-id",
"external-id-value": "c44000c6-f199-4609-9325-afd8c72b6777"
},
{
"external-id-key": "iface-status",
"external-id-value": "active"
},
{
"external-id-key": "attached-mac",
"external-id-value": "fa:16:3e:a0:d5:49"
}
],
"ovsdb:interface-type": "ovsdb:interface-type-internal",
"ovsdb:interface-uuid": "c1081aa3-607f-404e-a71e-ea1dd334b263",
"ovsdb:name": "qg-c44000c6-f1",
"ovsdb:ofport": 1,
"ovsdb:port-uuid": "1a2ef41e-4836-420c-977f-7a662c7abe62",
"tp-id": "qg-c44000c6-f1"
},
{
"ovsdb:interface-type": "ovsdb:interface-type-internal",
"ovsdb:interface-uuid": "54439f6a-7a88-4cf6-84b7-0645642618f9",
"ovsdb:name": "br-ex",
"ovsdb:ofport": 65534,
"ovsdb:port-uuid": "9bf4c1ab-d111-479d-84ab-1874f166153b",
"tp-id": "br-ex"
}
]
},
{
"node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2",
"ovsdb:connection-info": {
"local-ip": "10.237.214.247",
"local-port": 6640,
"remote-ip": "10.237.214.247",
"remote-port": 43247
},
"ovsdb:managed-node-entry": [
{
"bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-int']"
},
{
"bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-ex']"
}
],
"ovsdb:openvswitch-external-ids": [
{
"external-id-key": "system-id",
"external-id-value": "c4dcfd6c-8f0e-43a6-9cf5-d2a0c37f5c52"
}
],
"ovsdb:openvswitch-other-configs": [
{
"other-config-key": "local_ip",
"other-config-value": "10.237.214.247"
},
{
"other-config-key": "provider_mappings",
"other-config-value": "default:ens786f0"
}
],
"ovsdb:ovs-version": "2.3.2"
},
{
"node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-int",
"ovsdb:bridge-external-ids": [
{
"bridge-external-id-key": "bridge-id",
"bridge-external-id-value": "br-int"
}
],
"ovsdb:bridge-name": "br-int",
"ovsdb:bridge-uuid": "d3acbe7f-cdab-4ef1-80b8-68e5db3b3b7b",
"ovsdb:datapath-id": "00:00:7e:be:ac:d3:f1:4e",
"ovsdb:datapath-type": "ovsdb:datapath-type-system",
"ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2']",
"termination-point": [
{
"ovsdb:interface-type": "ovsdb:interface-type-internal",
"ovsdb:interface-uuid": "8164bb4f-2b8c-4405-b8de-4b6b776baa27",
"ovsdb:name": "br-int",
"ovsdb:ofport": 65534,
"ovsdb:port-uuid": "c34e1347-6757-4770-a05e-66cfb4b65167",
"tp-id": "br-int"
},
{
"ovsdb:interface-external-ids": [
{
"external-id-key": "iface-id",
"external-id-value": "1d5780fc-da03-4c98-8082-089d70cb65e3"
},
{
"external-id-key": "iface-status",
"external-id-value": "active"
},
{
"external-id-key": "attached-mac",
"external-id-value": "fa:16:3e:ee:3e:36"
}
],
"ovsdb:interface-type": "ovsdb:interface-type-internal",
"ovsdb:interface-uuid": "00d8d482-abf9-4459-8cb1-9c8e80df4943",
"ovsdb:name": "tap1d5780fc-da",
"ovsdb:ofport": 1,
"ovsdb:port-uuid": "743a236a-a34c-4084-a5ed-8dac56371ca8",
"tp-id": "tap1d5780fc-da"
},
{
"ovsdb:interface-external-ids": [
{
"external-id-key": "iface-id",
"external-id-value": "674fd914-74c0-4065-a88a-929919446555"
},
{
"external-id-key": "iface-status",
"external-id-value": "active"
},
{
"external-id-key": "attached-mac",
"external-id-value": "fa:16:3e:62:0c:d3"
}
],
"ovsdb:interface-type": "ovsdb:interface-type-internal",
"ovsdb:interface-uuid": "41bde142-61bc-4297-a39d-8b0ee86a0731",
"ovsdb:name": "qr-674fd914-74",
"ovsdb:ofport": 2,
"ovsdb:port-uuid": "1c505a53-ccfd-4745-9526-211016d9cbb3",
"tp-id": "qr-674fd914-74"
}
]
}
],
"topology-id": "ovsdb:1"
},
{
"topology-id": "netvirt:1"
}
]
}
}

View File

@@ -16,7 +16,6 @@
import copy
import mock
import socket
import testscenarios
from oslo_config import cfg
@@ -43,7 +42,6 @@ from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.ml2 import legacy_port_binding
from networking_odl.ml2 import mech_driver
from networking_odl.ml2 import network_topology
from networking_odl.tests import base as odl_base
@@ -132,19 +130,6 @@ class OpenDaylightTestCase(test_plugin.Ml2PluginV2TestCase):
'sendjson',
new=self.check_sendjson).start()
# Prevent test from accidentally connecting to any web service
mock.patch.object(
network_topology, 'NetworkTopologyClient',
return_value=mock.Mock(
specs=network_topology.NetworkTopologyClient,
get=mock.Mock(side_effect=requests.HTTPError))).start()
# Prevent hosts resolution from changing the behaviour of tests
mock.patch.object(
network_topology.utils,
'get_addresses_by_name',
side_effect=socket.gaierror).start()
def check_sendjson(self, method, urlpath, obj):
self.assertFalse(urlpath.startswith("http://"))

View File

@@ -1,477 +0,0 @@
# Copyright (c) 2015-2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import path
import mock
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
import requests
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2 import driver_context
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_constants
from neutron_lib.plugins.ml2 import api
from networking_odl.common import cache
from networking_odl.ml2 import mech_driver
from networking_odl.ml2 import mech_driver_v2
from networking_odl.ml2 import network_topology
from networking_odl.tests import base
LOG = log.getLogger(__name__)
class TestNetworkTopologyManager(base.DietTestCase):
# pylint: disable=protected-access
# given valid and invalid segments
valid_segment = {
api.ID: 'API_ID',
api.NETWORK_TYPE: n_constants.TYPE_LOCAL,
api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
invalid_segment = {
api.ID: 'API_ID',
api.NETWORK_TYPE: n_constants.TYPE_NONE,
api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
segments_to_bind = [valid_segment, invalid_segment]
def setUp(self):
super(TestNetworkTopologyManager, self).setUp()
self.useFixture(base.OpendaylightFeaturesFixture())
cfg.CONF.set_override('port_binding_controller',
'network-topology', 'ml2_odl')
self.patch(network_topology.LOG, 'isEnabledFor', lambda level: True)
# patch given configuration
self.cfg = mocked_cfg = self.patch(network_topology.client, 'cfg')
mocked_cfg.CONF.ml2_odl.url =\
'http://localhost:8181/controller/nb/v2/neutron'
mocked_cfg.CONF.ml2_odl.username = 'admin'
mocked_cfg.CONF.ml2_odl.password = 'admin'
mocked_cfg.CONF.ml2_odl.timeout = 5
@mock.patch.object(cache, 'LOG')
@mock.patch.object(network_topology, 'LOG')
def test_fetch_elements_by_host_with_no_entry(
self, network_topology_logger, cache_logger):
given_client = self.mock_client('ovs_topology.json')
self.mock_get_addresses_by_name(['127.0.0.1', '192.168.0.1'])
given_network_topology = network_topology.NetworkTopologyManager(
client=given_client)
try:
next(given_network_topology._fetch_elements_by_host(
'some_host_name'))
except ValueError as error:
cache_logger.warning.assert_called_once_with(
'Error fetching values for keys: %r',
"'some_host_name', '127.0.0.1', '192.168.0.1'",
exc_info=(ValueError, error, mock.ANY))
network_topology_logger.exception.assert_called_once_with(
'No such network topology elements for given host '
'%(host_name)r and given IPs: %(ip_addresses)s.',
{'ip_addresses': '127.0.0.1, 192.168.0.1',
'host_name': 'some_host_name'})
else:
self.fail('Expected ValueError being raised.')
def test_fetch_element_with_ovs_entry(self):
given_client = self.mock_client('ovs_topology.json')
self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
given_network_topology = network_topology.NetworkTopologyManager(
client=given_client)
elements = given_network_topology._fetch_elements_by_host(
'some_host_name.')
self.assertEqual([
{'class':
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
'has_datapath_type_netdev': False,
'host_addresses': ['10.237.214.247'],
'support_vhost_user': False,
'uuid': 'c4ad780f-8f91-4fa4-804e-dd16beb191e2',
'valid_vif_types': [portbindings.VIF_TYPE_OVS]}],
[e.to_dict() for e in elements])
def test_fetch_elements_with_vhost_user_entry(self):
given_client = self.mock_client('vhostuser_topology.json')
self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
given_network_topology = network_topology.NetworkTopologyManager(
client=given_client)
elements = given_network_topology._fetch_elements_by_host(
'some_host_name.')
self.assertEqual([
{'class':
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
'has_datapath_type_netdev': True,
'host_addresses': ['192.168.66.1'],
'support_vhost_user': True,
'uuid': 'c805d82d-a5d8-419d-bc89-6e3713ff9f6c',
'valid_vif_types': [portbindings.VIF_TYPE_VHOST_USER,
portbindings.VIF_TYPE_OVS],
'port_prefix': 'vhu',
'vhostuser_socket_dir': '/var/run/openvswitch'}],
[e.to_dict() for e in elements])
def mock_get_addresses_by_name(self, ips):
utils = self.patch(
network_topology, 'utils',
mock.Mock(
get_addresses_by_name=mock.Mock(return_value=tuple(ips))))
return utils.get_addresses_by_name
def mock_client(self, topology_name=None):
mocked_client = mock.NonCallableMock(
specs=network_topology.NetworkTopologyClient)
if topology_name:
cached_file_path = path.join(path.dirname(__file__), topology_name)
with open(cached_file_path, 'rt') as fd:
topology = jsonutils.loads(str(fd.read()), encoding='utf-8')
mocked_client.get().json.return_value = topology
return mocked_client
def test_bind_port_from_mech_driver_with_ovs(self):
given_client = self.mock_client('ovs_topology.json')
self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
given_network_topology = network_topology.NetworkTopologyManager(
vif_details={'some': 'detail'},
client=given_client)
self.patch(
network_topology, 'NetworkTopologyManager',
return_value=given_network_topology)
given_driver = mech_driver.OpenDaylightMechanismDriver()
given_driver.odl_drv = mech_driver.OpenDaylightDriver()
given_port_context = self.given_port_context()
# when port is bound
given_driver.bind_port(given_port_context)
# then context binding is setup with returned vif_type and valid
# segment api ID
given_port_context.set_binding.assert_called_once_with(
self.valid_segment[api.ID], portbindings.VIF_TYPE_OVS,
{'some': 'detail'}, status=n_constants.PORT_STATUS_ACTIVE)
def test_bind_port_from_mech_driver_with_vhostuser(self):
given_client = self.mock_client('vhostuser_topology.json')
self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
given_network_topology = network_topology.NetworkTopologyManager(
vif_details={'some': 'detail'},
client=given_client)
self.patch(
network_topology, 'NetworkTopologyManager',
return_value=given_network_topology)
given_driver = mech_driver.OpenDaylightMechanismDriver()
given_driver.odl_drv = mech_driver.OpenDaylightDriver()
given_port_context = self.given_port_context()
# when port is bound
given_driver.bind_port(given_port_context)
expected_vif_details = {
'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
'vhostuser_ovs_plug': True,
'some': 'detail',
'vhostuser_mode': 'client'}
# then context binding is setup with returned vif_type and valid
# segment api ID
given_port_context.set_binding.assert_called_once_with(
self.valid_segment[api.ID],
portbindings.VIF_TYPE_VHOST_USER,
expected_vif_details, status=n_constants.PORT_STATUS_ACTIVE)
def test_bind_port_from_mech_driver_v2_with_ovs(self):
given_client = self.mock_client('ovs_topology.json')
self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
given_network_topology = network_topology.NetworkTopologyManager(
vif_details={'some': 'detail'},
client=given_client)
self.patch(
network_topology, 'NetworkTopologyManager',
return_value=given_network_topology)
given_driver = mech_driver_v2.OpenDaylightMechanismDriver()
given_port_context = self.given_port_context()
given_driver.initialize()
# when port is bound
given_driver.bind_port(given_port_context)
# then context binding is setup with returned vif_type and valid
# segment api ID
given_port_context.set_binding.assert_called_once_with(
self.valid_segment[api.ID], portbindings.VIF_TYPE_OVS,
{'some': 'detail'}, status=n_constants.PORT_STATUS_ACTIVE)
def test_bind_port_from_mech_driver_v2_with_vhostuser(self):
given_client = self.mock_client('vhostuser_topology.json')
self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
given_network_topology = network_topology.NetworkTopologyManager(
vif_details={'some': 'detail'},
client=given_client)
self.patch(
network_topology, 'NetworkTopologyManager',
return_value=given_network_topology)
given_driver = mech_driver_v2.OpenDaylightMechanismDriver()
given_driver._network_topology = given_network_topology
given_port_context = self.given_port_context()
given_driver.initialize()
# when port is bound
given_driver.bind_port(given_port_context)
expected_vif_details = {
'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
'vhostuser_ovs_plug': True,
'some': 'detail',
'vhostuser_mode': 'client'}
# then context binding is setup with returned vif_type and valid
# segment api ID
given_port_context.set_binding.assert_called_once_with(
self.valid_segment[api.ID],
portbindings.VIF_TYPE_VHOST_USER,
expected_vif_details, status=n_constants.PORT_STATUS_ACTIVE)
def test_bind_port_with_vif_type_ovs(self):
given_topology = self._mock_network_topology(
'ovs_topology.json', vif_details={'much': 'details'})
given_port_context = self.given_port_context()
# when port is bound
given_topology.bind_port(given_port_context)
# then context binding is setup wit returned vif_type and valid
# segment api ID
given_port_context.set_binding.assert_called_once_with(
self.valid_segment[api.ID], portbindings.VIF_TYPE_OVS,
{'much': 'details'}, status=n_constants.PORT_STATUS_ACTIVE)
def test_bind_port_with_vif_type_vhost_user(self):
given_topology = self._mock_network_topology(
'vhostuser_topology.json', vif_details={'much': 'details'})
given_port_context = self.given_port_context()
# when port is bound
given_topology.bind_port(given_port_context)
# then context binding is setup wit returned vif_type and valid
# segment api ID
given_port_context.set_binding.assert_called_once_with(
self.valid_segment[api.ID],
portbindings.VIF_TYPE_VHOST_USER,
{'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
'vhostuser_ovs_plug': True, 'vhostuser_mode': 'client',
'much': 'details'},
status=n_constants.PORT_STATUS_ACTIVE)
@mock.patch.object(network_topology, 'LOG')
def test_bind_port_without_valid_segment(self, logger):
given_topology = self._mock_network_topology('ovs_topology.json')
given_port_context = self.given_port_context(
given_segments=[self.invalid_segment])
# when port is bound
given_topology.bind_port(given_port_context)
self.assertFalse(given_port_context.set_binding.called)
logger.exception.assert_called_once_with(
'Network topology element has failed binding port:\n%(element)s',
{'element': mock.ANY})
logger.error.assert_called_once_with(
'Unable to bind port element for given host and valid VIF types:\n'
'\thostname: %(host_name)s\n'
'\tvalid VIF types: %(valid_vif_types)s',
{'host_name': 'some_host', 'valid_vif_types': 'vhostuser, ovs'})
def _mock_network_topology(self, given_topology, vif_details=None):
self.mock_get_addresses_by_name(
['127.0.0.1', '10.237.214.247', '192.168.66.1'])
return network_topology.NetworkTopologyManager(
client=self.mock_client(given_topology),
vif_details=vif_details)
def given_port_context(self, given_segments=None):
# given NetworkContext
network = mock.MagicMock(spec=driver_api.NetworkContext)
if given_segments is None:
given_segments = self.segments_to_bind
# given port context
return mock.MagicMock(
spec=driver_context.PortContext,
current={'id': 'CURRENT_CONTEXT_ID'},
host='some_host',
segments_to_bind=given_segments,
network=network,
_new_bound_segment=self.valid_segment)
NETOWORK_TOPOLOGY_URL =\
'http://localhost:8181/'\
'restconf/operational/network-topology:network-topology/'
def mock_request_network_topology(self, file_name):
cached_file_path = path.join(
path.dirname(__file__), file_name + '.json')
if path.isfile(cached_file_path):
LOG.debug('Loading topology from file: %r', cached_file_path)
with open(cached_file_path, 'rt') as fd:
topology = jsonutils.loads(str(fd.read()), encoding='utf-8')
else:
LOG.debug(
'Getting topology from ODL: %r', self.NETOWORK_TOPOLOGY_URL)
request = requests.get(
self.NETOWORK_TOPOLOGY_URL, auth=('admin', 'admin'),
headers={'Content-Type': 'application/json'})
request.raise_for_status()
with open(cached_file_path, 'wt') as fd:
LOG.debug('Saving topology to file: %r', cached_file_path)
topology = request.json()
jsonutils.dump(
topology, fd, sort_keys=True, indent=4,
separators=(',', ': '))
mocked_request = self.patch(
mech_driver.odl_client.requests, 'request',
return_value=mock.MagicMock(
spec=requests.Response,
json=mock.MagicMock(return_value=topology)))
return mocked_request
class TestNetworkTopologyClient(base.DietTestCase):
given_host = 'given.host'
given_port = 1234
given_url_with_port = 'http://{}:{}/'.format(
given_host, given_port)
given_url_without_port = 'http://{}/'.format(given_host)
given_username = 'GIVEN_USERNAME'
given_password = 'GIVEN_PASSWORD'
given_timeout = 20
def given_client(
self, url=None, username=None, password=None, timeout=None):
return network_topology.NetworkTopologyClient(
url=url or self.given_url_with_port,
username=username or self.given_username,
password=password or self.given_password,
timeout=timeout or self.given_timeout)
def test_constructor(self):
# When client is created
rest_client = network_topology.NetworkTopologyClient(
url=self.given_url_with_port,
username=self.given_username,
password=self.given_password,
timeout=self.given_timeout)
self.assertEqual(
self.given_url_with_port +
'restconf/operational/network-topology:network-topology',
rest_client.url)
self.assertEqual(
(self.given_username, self.given_password),
rest_client.session.auth)
self.assertEqual(self.given_timeout, rest_client.timeout)
def test_request_with_port(self):
# Given rest client and used 'requests' module
given_client = self.given_client()
mocked_request = self.mocked_request()
# When a request is performed
result = given_client.request(
'GIVEN_METHOD', 'given/path', 'GIVEN_DATA')
# Then request method is called
mocked_request.assert_called_once_with(
'GIVEN_METHOD',
url='http://given.host:1234/restconf/operational/' +
'network-topology:network-topology/given/path',
data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
timeout=self.given_timeout)
# Then request method result is returned
self.assertIs(mocked_request.return_value, result)
def test_request_without_port(self):
# Given rest client and used 'requests' module
given_client = self.given_client(url=self.given_url_without_port)
mocked_request = self.mocked_request()
# When a request is performed
result = given_client.request(
'GIVEN_METHOD', 'given/path', 'GIVEN_DATA')
# Then request method is called
mocked_request.assert_called_once_with(
'GIVEN_METHOD',
url='http://given.host/restconf/operational/' +
'network-topology:network-topology/given/path',
data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
timeout=self.given_timeout)
# Then request method result is returned
self.assertIs(mocked_request.return_value, result)
def test_get(self):
# Given rest client and used 'requests' module
given_client = self.given_client()
mocked_request = self.mocked_request()
# When a request is performed
result = given_client.get('given/path', 'GIVEN_DATA')
# Then request method is called
mocked_request.assert_called_once_with(
'get',
url='http://given.host:1234/restconf/operational/' +
'network-topology:network-topology/given/path',
data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
timeout=self.given_timeout)
# Then request method result is returned
self.assertIs(mocked_request.return_value, result)
def mocked_request(self):
return self.patch(requests.sessions.Session, 'request')

View File

@@ -1,253 +0,0 @@
# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import path
import mock
from oslo_log import log
from oslo_serialization import jsonutils
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2 import driver_context
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_constants
from neutron_lib.plugins.ml2 import api
from networking_odl.ml2 import ovsdb_topology
from networking_odl.tests import base
LOG = log.getLogger(__name__)
class TestOvsdbTopologyParser(base.DietTestCase):
def test_parse_network_topology_ovs(self):
given_parser = ovsdb_topology.OvsdbNetworkTopologyParser()
given_topology = self.load_network_topology('ovs_topology.json')
# when parse topology
elements = list(given_parser.parse_network_topology(given_topology))
# then parser yields one element supporting only OVS vif type
self.assertEqual(
[{'class':
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
'has_datapath_type_netdev': False,
'host_addresses': ['10.237.214.247'],
'support_vhost_user': False,
'uuid': 'c4ad780f-8f91-4fa4-804e-dd16beb191e2',
'valid_vif_types': [portbindings.VIF_TYPE_OVS]}],
[e.to_dict() for e in elements])
def test_parse_network_topology_vhostuser(self):
given_parser = ovsdb_topology.OvsdbNetworkTopologyParser()
given_topology = self.load_network_topology('vhostuser_topology.json')
# when parse topology
elements = list(given_parser.parse_network_topology(given_topology))
# then parser yields one element supporting VHOSTUSER and OVS vif types
self.assertEqual(
[{'class':
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
'has_datapath_type_netdev': True,
'host_addresses': ['192.168.66.1'],
'port_prefix': 'vhu',
'support_vhost_user': True,
'uuid': 'c805d82d-a5d8-419d-bc89-6e3713ff9f6c',
'valid_vif_types': [portbindings.VIF_TYPE_VHOST_USER,
portbindings.VIF_TYPE_OVS],
'vhostuser_socket_dir': '/var/run/openvswitch'}],
[e.to_dict() for e in elements])
def load_network_topology(self, file_name):
file_path = path.join(path.dirname(__file__), file_name)
LOG.debug('Loading topology from file: %r', file_path)
with open(file_path, 'rt') as fd:
return jsonutils.loads(str(fd.read()), encoding='utf-8')
class TestOvsdbNetworkingTopologyElement(base.DietTestCase):
# given valid and invalid segments
VALID_SEGMENT = {
api.ID: 'API_ID',
api.NETWORK_TYPE: n_constants.TYPE_LOCAL,
api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
INVALID_SEGMENT = {
api.ID: 'API_ID',
api.NETWORK_TYPE: n_constants.TYPE_NONE,
api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
segments_to_bind = [INVALID_SEGMENT, VALID_SEGMENT]
def setUp(self):
super(TestOvsdbNetworkingTopologyElement, self).setUp()
self.useFixture(base.OpendaylightFeaturesFixture())
def given_element(self, uuid='some_uuid', **kwargs):
return ovsdb_topology.OvsdbNetworkTopologyElement(uuid=uuid, **kwargs)
def test_valid_vif_types_with_no_positive_value(self):
given_element = self.given_element(
has_datapath_type_netdev=False, support_vhost_user=False)
valid_vif_types = given_element.valid_vif_types
self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
def test_valid_vif_types_with_datapath_type_netdev(self):
given_element = self.given_element(
has_datapath_type_netdev=True, support_vhost_user=False)
valid_vif_types = given_element.valid_vif_types
self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
def test_valid_vif_types_with_support_vhost_user(self):
given_element = self.given_element(
has_datapath_type_netdev=False, support_vhost_user=True)
valid_vif_types = given_element.valid_vif_types
self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
def test_valid_vif_types_with_all_positive_values(self):
given_element = self.given_element(
has_datapath_type_netdev=True, support_vhost_user=True)
valid_vif_types = given_element.valid_vif_types
self.assertEqual(
[portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS],
valid_vif_types)
def test_to_json_ovs(self):
given_element = self.given_element(
has_datapath_type_netdev=False, support_vhost_user=True,
remote_ip='192.168.99.33')
json = given_element.to_json()
self.assertEqual(
{'class':
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
'uuid': 'some_uuid',
'host_addresses': ['192.168.99.33'],
'has_datapath_type_netdev': False,
'support_vhost_user': True,
'valid_vif_types': [portbindings.VIF_TYPE_OVS]},
jsonutils.loads(json))
def test_to_json_vhost_user(self):
given_element = self.given_element(
has_datapath_type_netdev=True, support_vhost_user=True,
remote_ip='192.168.99.66')
json = given_element.to_json()
self.assertEqual(
{'class':
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
'uuid': 'some_uuid',
'host_addresses': ['192.168.99.66'],
'has_datapath_type_netdev': True,
'support_vhost_user': True,
'valid_vif_types':
[portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS],
'port_prefix': 'vhu',
'vhostuser_socket_dir': '/var/run/openvswitch'},
jsonutils.loads(json))
def test_set_attr_with_invalid_name(self):
element = self.given_element()
self.assertRaises(
AttributeError, lambda: setattr(element, 'invalid_attribute', 10))
def test_is_valid_segment(self):
"""Validate the _check_segment method."""
# given driver and all network types
given_element = self.given_element(
has_datapath_type_netdev=True, support_vhost_user=True,
remote_ip='192.168.99.66')
all_network_types = [n_constants.TYPE_FLAT, n_constants.TYPE_GRE,
n_constants.TYPE_LOCAL, n_constants.TYPE_VXLAN,
n_constants.TYPE_VLAN, n_constants.TYPE_NONE]
# when checking segments network type
valid_types = {
network_type
for network_type in all_network_types
if given_element._is_valid_segment(
{api.NETWORK_TYPE: network_type})}
# then true is returned only for valid network types
self.assertEqual({
n_constants.TYPE_FLAT, n_constants.TYPE_LOCAL,
n_constants.TYPE_GRE, n_constants.TYPE_VXLAN,
n_constants.TYPE_VLAN}, valid_types)
def test_bind_port_with_vif_type_ovs(self):
given_port_context = self.given_port_context(
given_segments=[self.INVALID_SEGMENT, self.VALID_SEGMENT])
given_element = self.given_element('some_uuid')
# When bind port
given_element.bind_port(
port_context=given_port_context,
vif_type=portbindings.VIF_TYPE_OVS,
vif_details={'some_details': None})
given_port_context.set_binding.assert_called_once_with(
self.VALID_SEGMENT[api.ID], portbindings.VIF_TYPE_OVS,
{'some_details': None}, status=n_constants.PORT_STATUS_ACTIVE)
def test_bind_port_with_vif_type_vhost_user(self):
given_port_context = self.given_port_context(
given_segments=[self.INVALID_SEGMENT, self.VALID_SEGMENT])
given_element = self.given_element('some_uuid')
# When bind port
given_element.bind_port(
port_context=given_port_context,
vif_type=portbindings.VIF_TYPE_VHOST_USER,
vif_details={'some_details': None})
given_port_context.set_binding.assert_called_once_with(
self.VALID_SEGMENT[api.ID],
portbindings.VIF_TYPE_VHOST_USER,
{'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
'some_details': None, 'vhostuser_ovs_plug': True,
'vhostuser_mode': 'client'},
status=n_constants.PORT_STATUS_ACTIVE)
@mock.patch.object(ovsdb_topology, 'LOG')
def test_bind_port_without_valid_segment(self, logger):
given_port_context = self.given_port_context(
given_segments=[self.INVALID_SEGMENT])
given_element = self.given_element('some_uuid')
# when port is bound
self.assertRaises(
ValueError, lambda: given_element.bind_port(
port_context=given_port_context,
vif_type=portbindings.VIF_TYPE_OVS,
vif_details={'some_details': None}))
self.assertFalse(given_port_context.set_binding.called)
def given_port_context(self, given_segments):
# given NetworkContext
network = mock.MagicMock(spec=driver_api.NetworkContext)
# given port context
return mock.MagicMock(
spec=driver_context.PortContext,
current={'id': 'CURRENT_CONTEXT_ID'},
segments_to_bind=given_segments,
network=network)

View File

@@ -1,182 +0,0 @@
{
"network-topology": {
"topology": [
{
"topology-id": "flow:1"
},
{
"node": [
{
"node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int",
"ovsdb:bridge-external-ids": [
{
"bridge-external-id-key": "opendaylight-iid",
"bridge-external-id-value": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int']"
}
],
"ovsdb:bridge-name": "br-int",
"ovsdb:bridge-uuid": "e92ec02d-dba8-46d8-8047-680cab5ee8b0",
"ovsdb:controller-entry": [
{
"controller-uuid": "8521e6df-54bd-48ac-a249-3bb810fd812c",
"is-connected": false,
"target": "tcp:192.168.66.1:6653"
}
],
"ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
"ovsdb:fail-mode": "ovsdb:ovsdb-fail-mode-secure",
"ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
"ovsdb:protocol-entry": [
{
"protocol": "ovsdb:ovsdb-bridge-protocol-openflow-13"
}
],
"termination-point": [
{
"ovsdb:interface-type": "ovsdb:interface-type-internal",
"ovsdb:interface-uuid": "d21472db-5c3c-4b38-bf18-6ed3a32edff1",
"ovsdb:name": "br-int",
"ovsdb:port-uuid": "30adf59e-ff0d-478f-b37a-e37ea20dddd3",
"tp-id": "br-int"
}
]
},
{
"node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-nian1_1",
"ovsdb:bridge-name": "br-nian1_1",
"ovsdb:bridge-uuid": "243e01cb-e413-4615-a044-b254141e407d",
"ovsdb:datapath-id": "00:00:ca:01:3e:24:15:46",
"ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
"ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
"termination-point": [
{
"ovsdb:interface-type": "ovsdb:interface-type-internal",
"ovsdb:interface-uuid": "45184fd2-31eb-4c87-a071-2d64a0893662",
"ovsdb:name": "br-nian1_1",
"ovsdb:ofport": 65534,
"ovsdb:port-uuid": "f5952c1b-6b6d-4fd2-b2cd-201b8c9e0779",
"tp-id": "br-nian1_1"
}
]
},
{
"node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-ex",
"ovsdb:bridge-external-ids": [
{
"bridge-external-id-key": "bridge-id",
"bridge-external-id-value": "br-ex"
}
],
"ovsdb:bridge-name": "br-ex",
"ovsdb:bridge-other-configs": [
{
"bridge-other-config-key": "disable-in-band",
"bridge-other-config-value": "true"
}
],
"ovsdb:bridge-uuid": "43f7768e-c2f9-4ae7-8099-8aee5a17add7",
"ovsdb:datapath-id": "00:00:8e:76:f7:43:e7:4a",
"ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
"ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
"termination-point": [
{
"ovsdb:interface-type": "ovsdb:interface-type-internal",
"ovsdb:interface-uuid": "bdec1830-e6a5-4476-adff-569c455adb33",
"ovsdb:name": "br-ex",
"ovsdb:ofport": 65534,
"ovsdb:port-uuid": "7ba5939b-ff13-409d-86de-67556021ddff",
"tp-id": "br-ex"
}
]
},
{
"node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c",
"ovsdb:connection-info": {
"local-ip": "192.168.66.1",
"local-port": 6640,
"remote-ip": "192.168.66.1",
"remote-port": 41817
},
"ovsdb:datapath-type-entry": [
{
"datapath-type": "ovsdb:datapath-type-netdev"
},
{
"datapath-type": "ovsdb:datapath-type-system"
}
],
"ovsdb:interface-type-entry": [
{
"interface-type": "ovsdb:interface-type-ipsec-gre"
},
{
"interface-type": "ovsdb:interface-type-gre"
},
{
"interface-type": "ovsdb:interface-type-gre64"
},
{
"interface-type": "ovsdb:interface-type-dpdkr"
},
{
"interface-type": "ovsdb:interface-type-vxlan"
},
{
"interface-type": "ovsdb:interface-type-dpdkvhostuser"
},
{
"interface-type": "ovsdb:interface-type-tap"
},
{
"interface-type": "ovsdb:interface-type-geneve"
},
{
"interface-type": "ovsdb:interface-type-dpdk"
},
{
"interface-type": "ovsdb:interface-type-internal"
},
{
"interface-type": "ovsdb:interface-type-system"
},
{
"interface-type": "ovsdb:interface-type-lisp"
},
{
"interface-type": "ovsdb:interface-type-patch"
},
{
"interface-type": "ovsdb:interface-type-ipsec-gre64"
},
{
"interface-type": "ovsdb:interface-type-stt"
}
],
"ovsdb:managed-node-entry": [
{
"bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-ex']"
},
{
"bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int']"
},
{
"bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-nian1_1']"
}
],
"ovsdb:openvswitch-other-configs": [
{
"other-config-key": "local_ip",
"other-config-value": "192.168.66.1"
},
{
"other-config-key": "pmd-cpu-mask",
"other-config-value": "400004"
}
]
}
],
"topology-id": "ovsdb:1"
}
]
}
}

View File

@@ -0,0 +1,11 @@
---
prelude: >
Eliminate network topology based port binding
upgrade:
- If network topology based port binding,
network-topology, is used, migrate to pseodu agent
based port binding, pseudo-agentdb-binding.
deprecations:
- network topology based port binding was removed.
So is network-topology value for port_binding_controllers.
Migrate pseudo-agentdb-binding port binding.

View File

@@ -62,7 +62,6 @@ neutron.service_plugins =
neutron.db.alembic_migrations =
networking-odl = networking_odl.db.migration:alembic_migrations
networking_odl.ml2.port_binding_controllers =
network-topology = networking_odl.ml2.network_topology:NetworkTopologyManager
legacy-port-binding = networking_odl.ml2.legacy_port_binding:LegacyPortBindingManager
pseudo-agentdb-binding = networking_odl.ml2.pseudo_agentdb_binding:PseudoAgentDBBindingController
oslo.config.opts =