Merge "ML2 additions to support DVR"

This commit is contained in:
Jenkins 2014-07-29 01:12:03 +00:00 committed by Gerrit Code Review
commit 30556c4a23
8 changed files with 317 additions and 38 deletions

View File

@ -49,6 +49,7 @@ VIF_DETAILS_VLAN = 'vlan'
VIF_TYPE_UNBOUND = 'unbound'
VIF_TYPE_BINDING_FAILED = 'binding_failed'
VIF_TYPE_DISTRIBUTED = 'distributed'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
@ -66,7 +67,7 @@ VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS,
VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG,
VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET,
VIF_TYPE_MLNX_DIRECT, VIF_TYPE_MLNX_HOSTDEV, VIF_TYPE_HW_VEB,
VIF_TYPE_DVS, VIF_TYPE_OTHER]
VIF_TYPE_DVS, VIF_TYPE_OTHER, VIF_TYPE_DISTRIBUTED]
VNIC_NORMAL = 'normal'
VNIC_DIRECT = 'direct'

View File

@ -119,6 +119,13 @@ def delete_dvr_port_binding(session, port_id, host):
delete(synchronize_session=False))
def delete_dvr_port_binding_if_stale(session, binding):
if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN:
with session.begin(subtransactions=True):
LOG.debug("DVR: Deleting binding %s", binding)
session.delete(binding)
def get_port(session, port_id):
"""Get port record for update within transcation."""

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.openstack.common import jsonutils
from neutron.plugins.ml2 import db
@ -149,3 +150,20 @@ class PortContext(MechanismDriverContext, api.PortContext):
self._binding.vif_type = vif_type
self._binding.vif_details = jsonutils.dumps(vif_details)
self._new_port_status = status
class DvrPortContext(PortContext):
def __init__(self, plugin, plugin_context, port, network, binding,
original_port=None):
super(DvrPortContext, self).__init__(
plugin, plugin_context, port, network, binding,
original_port=original_port)
@property
def bound_host(self):
if self._port['device_owner'] == constants.DEVICE_OWNER_DVR_INTERFACE:
agent_host = self._binding.host
else:
agent_host = self._port['binding:host_id']
return agent_host

View File

@ -42,17 +42,32 @@ class L2populationMechanismDriver(api.MechanismDriver,
LOG.debug(_("Experimental L2 population driver"))
self.rpc_ctx = n_context.get_admin_context_without_session()
self.migrated_ports = {}
self.remove_fdb_entries = {}
def _get_port_fdb_entries(self, port):
return [[port['mac_address'],
return [[port['mac_address'], port['device_owner'],
ip['ip_address']] for ip in port['fixed_ips']]
def delete_port_precommit(self, context):
port = context.current
agent_host = context.host
if port['id'] not in self.remove_fdb_entries:
self.remove_fdb_entries[port['id']] = {}
self.remove_fdb_entries[port['id']][agent_host] = (
self._update_port_down(context, port, agent_host, 1))
def delete_port_postcommit(self, context):
fanout_msg = self._update_port_down(
context, context.current, context.host)
if fanout_msg:
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fanout_msg)
port = context.current
agent_host = context.host
if port['id'] in self.remove_fdb_entries:
for agent_host in list(self.remove_fdb_entries[port['id']]):
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx,
self.remove_fdb_entries[port['id']][agent_host])
self.remove_fdb_entries[port['id']].pop(agent_host, 0)
self.remove_fdb_entries.pop(port['id'], 0)
def _get_diff_ips(self, orig, port):
orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']])
@ -72,19 +87,21 @@ class L2populationMechanismDriver(api.MechanismDriver,
context, orig, context.original_host)
if not port_infos:
return
agent, agent_ip, segment, port_fdb_entries = port_infos
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
orig_mac_ip = [[port['mac_address'], ip] for ip in orig_ips]
port_mac_ip = [[port['mac_address'], ip] for ip in port_ips]
orig_mac_devowner_ip = [[port['mac_address'], port['device_owner'], ip]
for ip in orig_ips]
port_mac_devowner_ip = [[port['mac_address'], port['device_owner'], ip]
for ip in port_ips]
upd_fdb_entries = {port['network_id']: {agent_ip: {}}}
ports = upd_fdb_entries[port['network_id']][agent_ip]
if orig_mac_ip:
ports['before'] = orig_mac_ip
if orig_mac_devowner_ip:
ports['before'] = orig_mac_devowner_ip
if port_mac_ip:
ports['after'] = port_mac_ip
if port_mac_devowner_ip:
ports['after'] = port_mac_devowner_ip
self.L2populationAgentNotify.update_fdb_entries(
self.rpc_ctx, {'chg_ip': upd_fdb_entries})
@ -98,7 +115,16 @@ class L2populationMechanismDriver(api.MechanismDriver,
diff_ips = self._get_diff_ips(orig, port)
if diff_ips:
self._fixed_ips_changed(context, orig, port, diff_ips)
if (context.host != context.original_host
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
if context.status == const.PORT_STATUS_ACTIVE:
self._update_port_up(context)
if context.status == const.PORT_STATUS_DOWN:
agent_host = context.host
fdb_entries = self._update_port_down(
context, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif (context.host != context.original_host
and context.status == const.PORT_STATUS_ACTIVE
and not self.migrated_ports.get(orig['id'])):
# The port has been migrated. We have to store the original
@ -153,7 +179,7 @@ class L2populationMechanismDriver(api.MechanismDriver,
fdb_entries = self._get_port_fdb_entries(port)
return agent, agent_ip, segment, fdb_entries
return agent, agent_host, agent_ip, segment, fdb_entries
def _update_port_up(self, context):
port = context.current
@ -161,7 +187,7 @@ class L2populationMechanismDriver(api.MechanismDriver,
port_infos = self._get_port_infos(context, port, agent_host)
if not port_infos:
return
agent, agent_ip, segment, port_fdb_entries = port_infos
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
network_id = port['network_id']
@ -184,8 +210,9 @@ class L2populationMechanismDriver(api.MechanismDriver,
'ports': {}}}
ports = agent_fdb_entries[network_id]['ports']
network_ports = self.get_network_ports(session, network_id)
for network_port in network_ports:
nondvr_network_ports = self.get_nondvr_network_ports(session,
network_id)
for network_port in nondvr_network_ports:
binding, agent = network_port
if agent.host == agent_host:
continue
@ -201,6 +228,22 @@ class L2populationMechanismDriver(api.MechanismDriver,
agent_ports += self._get_port_fdb_entries(binding.port)
ports[ip] = agent_ports
dvr_network_ports = self.get_dvr_network_ports(session, network_id)
for network_port in dvr_network_ports:
binding, agent = network_port
if agent.host == agent_host:
continue
ip = self.get_agent_ip(agent)
if not ip:
LOG.debug("Unable to retrieve the agent ip, check "
"the agent %(agent_host)s configuration.",
{'agent_host': agent.host})
continue
agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
ports[ip] = agent_ports
# And notify other agents to add flooding entry
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
@ -210,16 +253,19 @@ class L2populationMechanismDriver(api.MechanismDriver,
self.rpc_ctx, agent_fdb_entries, agent_host)
# Notify other agents to add fdb rule for current port
other_fdb_entries[network_id]['ports'][agent_ip] += port_fdb_entries
if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
other_fdb_entries[network_id]['ports'][agent_ip] += (
port_fdb_entries)
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
other_fdb_entries)
def _update_port_down(self, context, port, agent_host):
def _update_port_down(self, context, port, agent_host,
agent_active_ports_count_for_flooding=0):
port_infos = self._get_port_infos(context, port, agent_host)
if not port_infos:
return
agent, agent_ip, segment, port_fdb_entries = port_infos
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
network_id = port['network_id']
@ -231,12 +277,14 @@ class L2populationMechanismDriver(api.MechanismDriver,
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
if not agent_active_ports:
if agent_active_ports == agent_active_ports_count_for_flooding:
# Agent is removing its last activated port in this network,
# other agents needs to be notified to delete their flooding entry.
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
# Notify other agents to remove fdb rules for current port
other_fdb_entries[network_id]['ports'][agent_ip] += port_fdb_entries
if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
fdb_entries = port_fdb_entries
other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries
return other_fdb_entries

View File

@ -32,6 +32,7 @@ from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import dvr_mac_db
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import models_v2
@ -68,6 +69,7 @@ TYPE_MULTI_SEGMENT = 'multi-segment'
class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
dvr_mac_db.DVRDbMixin,
external_net_db.External_net_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
@ -246,6 +248,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
binding.driver = None
binding.segment = None
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED
binding.vif_details = ''
binding.driver = None
binding.segment = None
binding.host = ''
self._update_port_dict_binding(port, binding)
return changes
@ -870,6 +879,84 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
need_notify=need_port_update_notify)
return bound_port._port
def _process_dvr_port_binding(self, mech_context, context, attrs):
binding = mech_context._binding
port = mech_context.current
if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
binding.vif_details = ''
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.driver = None
binding.segment = None
binding.host = ''
self._update_port_dict_binding(port, binding)
binding.host = attrs and attrs.get(portbindings.HOST_ID)
def update_dvr_port_binding(self, context, id, port):
attrs = port['port']
host = attrs and attrs.get(portbindings.HOST_ID)
host_set = attributes.is_attr_set(host)
if not host_set:
LOG.error(_("No Host supplied to bind DVR Port %s"), id)
return
session = context.session
binding = db.get_dvr_port_binding_by_host(session, id, host)
if (not binding or
binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED):
with session.begin(subtransactions=True):
if not binding:
binding = db.ensure_dvr_port_binding(
session, id, host, router_id=attrs['device_id'])
orig_port = super(Ml2Plugin, self).get_port(context, id)
network = self.get_network(context, orig_port['network_id'])
mech_context = driver_context.DvrPortContext(self,
context, orig_port, network,
binding, original_port=orig_port)
self._process_dvr_port_binding(mech_context, context, attrs)
self.mechanism_manager.bind_port(mech_context)
# Now try to commit result of attempting to bind the port.
self._commit_dvr_port_binding(mech_context._plugin_context,
orig_port['id'],
host,
mech_context)
def _commit_dvr_port_binding(self, plugin_context,
port_id, host,
mech_context):
session = plugin_context.session
new_binding = mech_context._binding
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
cur_binding = db.get_dvr_port_binding_by_host(session,
port_id,
host)
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
new_binding.host == cur_binding.host and
new_binding.vnic_type == cur_binding.vnic_type and
new_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
cur_binding.driver = new_binding.driver
cur_binding.segment = new_binding.segment
if cur_binding.profile != new_binding.profile:
cur_binding.profile = new_binding.profile
def delete_port(self, context, id, l3_port_check=True):
LOG.debug(_("Deleting port %s"), id)
l3plugin = manager.NeutronManager.get_service_plugins().get(
@ -893,15 +980,24 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
mech_context = driver_context.PortContext(self, context, port,
network, binding)
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
LOG.debug(_("Calling base delete_port"))
mech_context = None
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, id)
for bind in bindings:
mech_context = driver_context.DvrPortContext(
self, context, port, network, bind)
self.mechanism_manager.delete_port_precommit(mech_context)
else:
mech_context = driver_context.PortContext(self, context, port,
network, binding)
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s"
% {"port_id": id, "owner": port['device_owner']})
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
@ -909,15 +1005,22 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
l3plugin.notify_routers_updated(context, router_ids)
try:
self.mechanism_manager.delete_port_postcommit(mech_context)
# for both normal and DVR Interface ports, only one invocation of
# delete_port_postcommit. We use gather/scatter technique for DVR
# interface ports, where the bindings are gathered in
# delete_port_precommit() call earlier and scattered as l2pop
# rules to cloud nodes in delete_port_postcommit() here
if mech_context:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
LOG.error(_("mechanism_manager.delete_port_postcommit failed for "
"port %s"), id)
self.notify_security_groups_member_updated(context, port)
def get_bound_port_context(self, plugin_context, port_id):
def get_bound_port_context(self, plugin_context, port_id, host=None):
session = plugin_context.session
with session.begin(subtransactions=True):
try:
@ -933,8 +1036,18 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
return
port = self._make_port_dict(port_db)
network = self.get_network(plugin_context, port['network_id'])
port_context = driver_context.PortContext(
self, plugin_context, port, network, port_db.port_binding)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
LOG.error(_("Binding info for DVR port %s not found"),
port_id)
return None
port_context = driver_context.DvrPortContext(
self, plugin_context, port, network, binding)
else:
port_context = driver_context.PortContext(
self, plugin_context, port, network, port_db.port_binding)
return self._bind_port_if_needed(port_context)
@ -956,7 +1069,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
LOG.warning(_("Port %(port)s updated up by agent not found"),
{'port': port_id})
return None
if port.status != status:
if (port.status != status and
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
@ -967,12 +1081,53 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
return
binding['status'] = status
binding.update(binding)
updated = True
if (updated and
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Port %s not found during update"), port_id)
return
original_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
port.status = db.generate_dvr_port_status(session, port['id'])
updated_port = self._make_port_dict(port)
mech_context = (driver_context.DvrPortContext(
self, context, updated_port, network,
binding, original_port=original_port))
self.mechanism_manager.update_port_precommit(mech_context)
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
db.delete_dvr_port_binding_if_stale(session, binding)
return port['id']
def port_bound_to_host(self, context, port_id, host):
port_host = db.get_port_binding_host(port_id)
return (port_host == host)
port = db.get_port(context.session, port_id)
if not port:
LOG.debug("No Port match for: %s", port_id)
return False
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, port_id)
for b in bindings:
if b.host == host:
return True
LOG.debug("No binding found for DVR port %s", port['id'])
return False
else:
port_host = db.get_port_binding_host(port_id)
return (port_host == host)

View File

@ -89,7 +89,9 @@ class RpcCallbacks(n_rpc.RpcCallback,
port_id = self._device_to_port_id(device)
plugin = manager.NeutronManager.get_plugin()
port_context = plugin.get_bound_port_context(rpc_context, port_id)
port_context = plugin.get_bound_port_context(rpc_context,
port_id,
host)
if not port_context:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s not found in database"),

View File

@ -107,6 +107,18 @@ class Ml2DBTestCase(base.BaseTestCase):
ml2_db.delete_dvr_port_binding(
self.ctx.session, 'foo_port_id', 'foo_host')
def test_delete_dvr_port_binding_if_stale(self):
network_id = 'foo_network_id'
port_id = 'foo_port_id'
self._setup_neutron_network(network_id, [port_id])
binding = self._setup_dvr_binding(
network_id, port_id, None, 'foo_host_id')
ml2_db.delete_dvr_port_binding_if_stale(self.ctx.session, binding)
count = (self.ctx.session.query(ml2_models.DVRPortBinding).
filter_by(port_id=binding.port_id).count())
self.assertFalse(count)
def test_get_dvr_port_binding_by_host_not_found(self):
port = ml2_db.get_dvr_port_binding_by_host(
self.ctx.session, 'foo_port_id', 'foo_host_id')

View File

@ -81,6 +81,7 @@ L2_AGENT_4 = {
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
DEVICE_OWNER_COMPUTE = 'compute:None'
class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
@ -157,6 +158,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
@ -178,6 +180,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
[p1['mac_address'],
p1['device_owner'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -216,11 +219,13 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
host_arg = {portbindings.HOST_ID: HOST,
'admin_state_up': True}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2',
'admin_state_up': True}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,
'admin_state_up',),
**host_arg) as port2:
@ -244,6 +249,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'ports':
{'20.0.0.2': [constants.FLOODING_ENTRY,
[p2['mac_address'],
p2['device_owner'],
p2_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -265,6 +271,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
[p1['mac_address'],
p1['device_owner'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -280,14 +287,17 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.subnet(cidr='10.1.0.0/24') as subnet2:
with self.port(subnet=subnet2,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
p1 = port1['port']
@ -310,6 +320,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'20.0.0.2':
[constants.FLOODING_ENTRY,
[p1['mac_address'],
p1['device_owner'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -334,6 +345,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'20.0.0.1':
[constants.FLOODING_ENTRY,
[p3['mac_address'],
p3['device_owner'],
p3_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -350,9 +362,11 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
@ -380,6 +394,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{p2['network_id']:
{'ports':
{'20.0.0.1': [[p2['mac_address'],
p2['device_owner'],
p2_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -395,9 +410,11 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
@ -419,6 +436,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
[p2['mac_address'],
p2['device_owner'],
p2_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -434,6 +452,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
@ -445,6 +464,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
device=device)
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
@ -461,6 +481,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{p2['network_id']:
{'ports':
{'20.0.0.1': [[p2['mac_address'],
p2['device_owner'],
p2_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -476,9 +497,11 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
@ -496,6 +519,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
[p1['mac_address'],
p1['device_owner'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -511,6 +535,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
@ -536,6 +561,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{p1['network_id']:
{'20.0.0.1':
{'after': [[p1['mac_address'],
p1['device_owner'],
'10.0.0.10']]}}}}},
'namespace': None,
'method': 'update_fdb_entries'}
@ -558,8 +584,10 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{p1['network_id']:
{'20.0.0.1':
{'before': [[p1['mac_address'],
p1['device_owner'],
'10.0.0.10']],
'after': [[p1['mac_address'],
p1['device_owner'],
'10.0.0.16']]}}}}},
'namespace': None,
'method': 'update_fdb_entries'}
@ -581,6 +609,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{p1['network_id']:
{'20.0.0.1':
{'before': [[p1['mac_address'],
p1['device_owner'],
'10.0.0.2']]}}}}},
'namespace': None,
'method': 'update_fdb_entries'}
@ -594,6 +623,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
@ -623,9 +653,11 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
host_arg = {portbindings.HOST_ID: L2_AGENT['host']}
host2_arg = {portbindings.HOST_ID: L2_AGENT_2['host']}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host2_arg) as port2:
p1 = port1['port']
@ -658,6 +690,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
[p1['mac_address'],
p1['device_owner'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
@ -673,9 +706,11 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
host_arg = {portbindings.HOST_ID: L2_AGENT['host']}
host2_arg = {portbindings.HOST_ID: L2_AGENT_2['host']}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host2_arg) as port2:
p1 = port1['port']
@ -714,6 +749,7 @@ class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
[p1['mac_address'],
p1['device_owner'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},