ML2: DB changes for hierarchical port binding

To support hierarchical port binding, the driver and segment columns
are moved from the ml2_port_bindings and ml2_dvr_port_bindings tables
to a new ml2_port_binding_levels table. This new table can store
multiple levels of binding information for each port. It has the host
as part of its primary key so that it can be used for both normal and
DVR port bindings.

The cap_port_filter column is also removed from the
ml2_dvr_port_bindings table, since the adjacent driver and segment
columns are being moved, and this can trivially be done via the same
DB migration. It was included in the table by mistake and was never
used.

The logic required for hierarchical port binding will be implemented
in a dependent patch.

Gerrit Spec: https://review.openstack.org/#/c/139886/

Partially-implements: blueprint ml2-hierarchical-port-binding

Change-Id: I08ddc384763087fbac0fa3da3ed6e99b897df031
This commit is contained in:
Robert Kukura 2014-08-21 15:46:11 -04:00 committed by Henry Gessau
parent c49c87d1e4
commit 0b790f6496
10 changed files with 282 additions and 84 deletions

View File

@ -0,0 +1,123 @@
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ML2 hierarchical binding
Revision ID: 2d2a8a565438
Revises: 4119216b7365
Create Date: 2014-08-24 21:56:36.422885
"""
# revision identifiers, used by Alembic.
revision = '2d2a8a565438'
down_revision = '4119216b7365'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import reflection
port_binding_tables = ['ml2_port_bindings', 'ml2_dvr_port_bindings']
def upgrade():
inspector = reflection.Inspector.from_engine(op.get_bind())
fk_name = [fk['name'] for fk in
inspector.get_foreign_keys('ml2_port_bindings')
if 'segment' in fk['constrained_columns']]
fk_name_dvr = [fk['name'] for fk in
inspector.get_foreign_keys('ml2_dvr_port_bindings')
if 'segment' in fk['constrained_columns']]
op.create_table(
'ml2_port_binding_levels',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('level', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('driver', sa.String(length=64), nullable=True),
sa.Column('segment_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['segment_id'], ['ml2_network_segments.id'],
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('port_id', 'host', 'level')
)
for table in port_binding_tables:
op.execute((
"INSERT INTO ml2_port_binding_levels "
"SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
"FROM %s "
"WHERE host <> '' "
"AND driver <> '';"
) % table)
op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
op.drop_column('ml2_dvr_port_bindings', 'segment')
op.drop_column('ml2_dvr_port_bindings', 'driver')
op.drop_constraint(fk_name[0], 'ml2_port_bindings', 'foreignkey')
op.drop_column('ml2_port_bindings', 'driver')
op.drop_column('ml2_port_bindings', 'segment')
def downgrade():
dialect = op.get_context().bind.dialect.name
op.add_column('ml2_port_bindings',
sa.Column('segment', sa.String(length=36), nullable=True))
op.add_column('ml2_port_bindings',
sa.Column('driver', sa.String(length=64), nullable=True))
op.create_foreign_key(
name=None,
source='ml2_port_bindings', referent='ml2_network_segments',
local_cols=['segment'], remote_cols=['id'], ondelete='SET NULL'
)
op.add_column('ml2_dvr_port_bindings',
sa.Column('driver', sa.String(length=64), nullable=True))
op.add_column('ml2_dvr_port_bindings',
sa.Column('segment', sa.String(length=36), nullable=True))
op.add_column('ml2_dvr_port_bindings',
sa.Column('cap_port_filter', sa.Boolean, nullable=False))
op.create_foreign_key(
name=None,
source='ml2_dvr_port_bindings', referent='ml2_network_segments',
local_cols=['segment'], remote_cols=['id'], ondelete='SET NULL'
)
for table in port_binding_tables:
if dialect == 'postgresql':
op.execute((
"UPDATE %s pb "
"SET driver = pbl.driver, segment = pbl.segment_id "
"FROM ml2_port_binding_levels pbl "
"WHERE pb.port_id = pbl.port_id "
"AND pb.host = pbl.host "
"AND pbl.level = 0;"
) % table)
else:
op.execute((
"UPDATE %s pb "
"INNER JOIN ml2_port_binding_levels pbl "
"ON pb.port_id = pbl.port_id "
"AND pb.host = pbl.host "
"AND pbl.level = 0 "
"SET pb.driver = pbl.driver, pb.segment = pbl.segment_id;"
) % table)
op.drop_table('ml2_port_binding_levels')

View File

@ -1 +1 @@
4119216b7365
2d2a8a565438

View File

@ -152,6 +152,44 @@ def get_locked_port_and_binding(session, port_id):
return None, None
def set_binding_levels(session, levels):
if levels:
for level in levels:
session.add(level)
LOG.debug("For port %(port_id)s, host %(host)s, "
"set binding levels %(levels)s",
{'port_id': levels[0].port_id,
'host': levels[0].host,
'levels': levels})
else:
LOG.debug("Attempted to set empty binding levels")
def get_binding_levels(session, port_id, host):
if host:
result = (session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host).
order_by(models.PortBindingLevel.level).
all())
LOG.debug("For port %(port_id)s, host %(host)s, "
"got binding levels %(levels)s",
{'port_id': port_id,
'host': host,
'levels': result})
return result
def clear_binding_levels(session, port_id, host):
if host:
(session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host).
delete())
LOG.debug("For port %(port_id)s, host %(host)s, "
"cleared binding levels",
{'port_id': port_id,
'host': host})
def ensure_dvr_port_binding(session, port_id, host, router_id=None):
record = (session.query(models.DVRPortBinding).
filter_by(port_id=port_id, host=host).first())
@ -166,7 +204,6 @@ def ensure_dvr_port_binding(session, port_id, host, router_id=None):
router_id=router_id,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vnic_type=portbindings.VNIC_NORMAL,
cap_port_filter=False,
status=n_const.PORT_STATUS_DOWN)
session.add(record)
return record
@ -196,6 +233,7 @@ def get_port(session, port_id):
with session.begin(subtransactions=True):
try:
record = (session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
return record

View File

@ -78,19 +78,19 @@ class SubnetContext(MechanismDriverContext, api.SubnetContext):
class PortContext(MechanismDriverContext, api.PortContext):
def __init__(self, plugin, plugin_context, port, network, binding,
original_port=None):
binding_levels, original_port=None):
super(PortContext, self).__init__(plugin, plugin_context)
self._port = port
self._original_port = original_port
self._network_context = NetworkContext(plugin, plugin_context,
network)
self._binding = binding
self._binding_levels = binding_levels
self._new_bound_segment = None
if original_port:
self._original_bound_segment_id = self._binding.segment
self._original_bound_driver = self._binding.driver
self._original_binding_levels = self._binding_levels
else:
self._original_bound_segment_id = None
self._original_bound_driver = None
self._original_binding_levels = None
self._new_port_status = None
@property
@ -120,46 +120,41 @@ class PortContext(MechanismDriverContext, api.PortContext):
@property
def binding_levels(self):
# TODO(rkukura): Implement for hierarchical port binding.
if self._binding.segment:
if self._binding_levels:
return [{
api.BOUND_DRIVER: self._binding.driver,
api.BOUND_SEGMENT: self._expand_segment(self._binding.segment)
}]
api.BOUND_DRIVER: level.driver,
api.BOUND_SEGMENT: self._expand_segment(level.segment_id)
} for level in self._binding_levels]
@property
def original_binding_levels(self):
# TODO(rkukura): Implement for hierarchical port binding.
if self._original_bound_segment_id:
if self._original_binding_levels:
return [{
api.BOUND_DRIVER: self._original_bound_driver,
api.BOUND_SEGMENT:
self._expand_segment(self._original_bound_segment_id)
}]
api.BOUND_DRIVER: level.driver,
api.BOUND_SEGMENT: self._expand_segment(level.segment_id)
} for level in self._original_binding_levels]
@property
def top_bound_segment(self):
# TODO(rkukura): Implement for hierarchical port binding.
if self._binding.segment:
return self._expand_segment(self._binding.segment)
if self._binding_levels:
return self._expand_segment(self._binding_levels[0].segment_id)
@property
def original_top_bound_segment(self):
# TODO(rkukura): Implement for hierarchical port binding.
if self._original_bound_segment_id:
return self._expand_segment(self._original_bound_segment_id)
if self._original_binding_levels:
return self._expand_segment(
self._original_binding_levels[0].segment_id)
@property
def bottom_bound_segment(self):
# TODO(rkukura): Implement for hierarchical port binding.
if self._binding.segment:
return self._expand_segment(self._binding.segment)
if self._binding_levels:
return self._expand_segment(self._binding_levels[-1].segment_id)
@property
def original_bottom_bound_segment(self):
# TODO(rkukura): Implement for hierarchical port binding.
if self._original_bound_segment_id:
return self._expand_segment(self._original_bound_segment_id)
if self._original_binding_levels:
return self._expand_segment(
self._original_binding_levels[-1].segment_id)
def _expand_segment(self, segment_id):
segment = db.get_segment_by_id(self._plugin_context.session,
@ -194,7 +189,7 @@ class PortContext(MechanismDriverContext, api.PortContext):
def set_binding(self, segment_id, vif_type, vif_details,
status=None):
# TODO(rkukura) Verify binding allowed, segment in network
self._binding.segment = segment_id
self._new_bound_segment = segment_id
self._binding.vif_type = vif_type
self._binding.vif_details = jsonutils.dumps(vif_details)
self._new_port_status = status

View File

@ -26,6 +26,7 @@ from neutron.openstack.common import log
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import models
LOG = log.getLogger(__name__)
@ -556,31 +557,39 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
binding.
"""
binding = context._binding
port_id = context._port['id']
LOG.debug("Attempting to bind port %(port)s on host %(host)s "
"for vnic_type %(vnic_type)s with profile %(profile)s",
{'port': context._port['id'],
{'port': port_id,
'host': binding.host,
'vnic_type': binding.vnic_type,
'profile': binding.profile})
for driver in self.ordered_mech_drivers:
try:
driver.obj.bind_port(context)
if binding.segment:
binding.driver = driver.name
LOG.debug("Bound port: %(port)s, host: %(host)s, "
segment = context._new_bound_segment
if segment:
context._binding_levels = [
models.PortBindingLevel(port_id=port_id,
host=binding.host,
level=0,
driver=driver.name,
segment_id=segment)
]
LOG.debug("Bound port: %(port)s, "
"host: %(host)s, "
"vnic_type: %(vnic_type)s, "
"profile: %(profile)s, "
"driver: %(driver)s, vif_type: %(vif_type)s, "
"vif_type: %(vif_type)s, "
"vif_details: %(vif_details)s, "
"segment: %(segment)s",
{'port': context._port['id'],
'host': binding.host,
"binding_levels: %(binding_levels)s",
{'port': port_id,
'host': context.host,
'vnic_type': binding.vnic_type,
'profile': binding.profile,
'driver': binding.driver,
'vif_type': binding.vif_type,
'vif_details': binding.vif_details,
'segment': binding.segment})
'binding_levels': context.binding_levels})
return
except Exception:
LOG.exception(_LE("Mechanism driver %s failed in "

View File

@ -68,10 +68,6 @@ class PortBinding(model_base.BASEV2):
vif_type = sa.Column(sa.String(64), nullable=False)
vif_details = sa.Column(sa.String(4095), nullable=False, default='',
server_default='')
driver = sa.Column(sa.String(64))
segment = sa.Column(sa.String(36),
sa.ForeignKey('ml2_network_segments.id',
ondelete="SET NULL"))
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load port bindings
@ -82,6 +78,27 @@ class PortBinding(model_base.BASEV2):
cascade='delete'))
class PortBindingLevel(model_base.BASEV2):
"""Represent each level of a port binding.
Stores information associated with each level of an established
port binding. Different levels might correspond to the host and
ToR switch, for instance.
"""
__tablename__ = 'ml2_port_binding_levels'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
host = sa.Column(sa.String(255), nullable=False, primary_key=True)
level = sa.Column(sa.Integer, primary_key=True, autoincrement=False)
driver = sa.Column(sa.String(64))
segment_id = sa.Column(sa.String(36),
sa.ForeignKey('ml2_network_segments.id',
ondelete="SET NULL"))
class DVRPortBinding(model_base.BASEV2):
"""Represent binding-related state of a DVR port.
@ -103,11 +120,6 @@ class DVRPortBinding(model_base.BASEV2):
server_default=portbindings.VNIC_NORMAL)
profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False,
default='', server_default='')
cap_port_filter = sa.Column(sa.Boolean, nullable=False)
driver = sa.Column(sa.String(64))
segment = sa.Column(sa.String(36),
sa.ForeignKey('ml2_network_segments.id',
ondelete="SET NULL"))
status = sa.Column(sa.String(16), nullable=False)
# Add a relationship to the Port model in order to instruct SQLAlchemy to

View File

@ -194,13 +194,16 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
return mac_change
def _process_port_binding(self, mech_context, attrs):
session = mech_context._plugin_context.session
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
changes = False
host = attrs and attrs.get(portbindings.HOST_ID)
original_host = binding.host
if (attributes.is_attr_set(host) and
binding.host != host):
original_host != host):
binding.host = host
changes = True
@ -227,14 +230,14 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if changes:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
binding.driver = None
binding.segment = None
db.clear_binding_levels(session, port_id, original_host)
mech_context._binding_levels = None
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED
binding.vif_details = ''
binding.driver = None
binding.segment = None
db.clear_binding_levels(session, port_id, original_host)
mech_context._binding_levels = None
binding.host = ''
self._update_port_dict_binding(port, binding)
@ -324,7 +327,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
self._update_port_dict_binding(port, new_binding)
new_context = driver_context.PortContext(
self, orig_context._plugin_context, port,
orig_context._network_context._network, new_binding)
orig_context._network_context._network, new_binding, None)
# Attempt to bind the port and return the context with the
# result.
@ -381,7 +384,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
cur_binding = db.get_dvr_port_binding_by_host(
session, port_id, orig_binding.host)
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding,
self, plugin_context, port, network, cur_binding, None,
original_port=oport)
# Commit our binding results only if port has not been
@ -399,15 +402,17 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
cur_binding.driver = new_binding.driver
cur_binding.segment = new_binding.segment
db.clear_binding_levels(session, port_id, cur_binding.host)
db.set_binding_levels(session, new_context._binding_levels)
cur_context._binding_levels = new_context._binding_levels
# Update PortContext's port dictionary to reflect the
# updated binding state.
self._update_port_dict_binding(port, cur_binding)
# Update the port status if requested by the bound driver.
if new_binding.segment and new_context._new_port_status:
if (new_context._binding_levels and
new_context._new_port_status):
port_db.status = new_context._new_port_status
port['status'] = new_context._new_port_status
@ -913,8 +918,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
network = self.get_network(context, result['network_id'])
binding = db.add_port_binding(session, result['id'])
mech_context = driver_context.PortContext(self, context, result,
network, binding)
network, binding, None)
self._process_port_binding(mech_context, attrs)
result[addr_pair.ADDRESS_PAIRS] = (
@ -1020,8 +1024,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
levels = db.get_binding_levels(session, id, binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding,
self, context, updated_port, network, binding, levels,
original_port=original_port)
new_host_port = self._get_host_port_if_changed(mech_context, attrs)
need_port_update_notify |= self._process_port_binding(
@ -1046,21 +1051,23 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
bound_port = self._bind_port_if_needed(
bound_context = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_port._port
return bound_context._port
def _process_dvr_port_binding(self, mech_context, context, attrs):
session = mech_context._plugin_context.session
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
binding.vif_details = ''
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.driver = None
binding.segment = None
if binding.host:
db.clear_binding_levels(session, port_id, binding.host)
binding.host = ''
self._update_port_dict_binding(port, binding)
@ -1095,9 +1102,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
binding = db.ensure_dvr_port_binding(
session, id, host, router_id=device_id)
network = self.get_network(context, orig_port['network_id'])
levels = db.get_binding_levels(session, id, host)
mech_context = driver_context.PortContext(self,
context, orig_port, network,
binding, original_port=orig_port)
binding, levels, original_port=orig_port)
self._process_dvr_port_binding(mech_context, context, attrs)
self._bind_port_if_needed(mech_context)
@ -1130,13 +1138,17 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if device_owner == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, id)
for bind in bindings:
levels = db.get_binding_levels(context.session, id,
bind.host)
mech_context = driver_context.PortContext(
self, context, port, network, bind)
self, context, port, network, bind, levels)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
else:
mech_context = driver_context.PortContext(self, context, port,
network, binding)
levels = db.get_binding_levels(context.session, id,
binding.host)
mech_context = driver_context.PortContext(
self, context, port, network, binding, levels)
if is_dvr_enabled and utils.is_dvr_serviced(device_owner):
removed_routers = l3plugin.dvr_deletens_if_no_port(
context, id)
@ -1203,8 +1215,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
LOG.error(_LE("Binding info for DVR port %s not found"),
port_id)
return None
levels = db.get_binding_levels(session, port_db.id, host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding)
self, plugin_context, port, network, binding, levels)
else:
# since eager loads are disabled in port_db query
# related attribute port_binding could disappear in
@ -1216,8 +1229,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
"it might have been deleted already."),
port_id)
return
levels = db.get_binding_levels(session, port_db.id,
port_db.port_binding.host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding)
self, plugin_context, port, network, binding, levels)
return self._bind_port_if_needed(port_context)
@ -1246,9 +1261,11 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
levels = db.get_binding_levels(session, port_id,
port.port_binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
original_port=original_port)
levels, original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
@ -1274,9 +1291,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
original_port['network_id'])
port.status = db.generate_dvr_port_status(session, port['id'])
updated_port = self._make_port_dict(port)
levels = db.get_binding_levels(session, port_id, host)
mech_context = (driver_context.PortContext(
self, context, updated_port, network,
binding, original_port=original_port))
binding, levels, original_port=original_port))
self.mechanism_manager.update_port_precommit(mech_context)
if updated:

View File

@ -65,7 +65,6 @@ class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
router_id=router_id,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vnic_type=portbindings.VNIC_NORMAL,
cap_port_filter=False,
status='DOWN')
self.ctx.session.add(record)
return record

View File

@ -42,7 +42,8 @@ class TestPortContext(base.BaseTestCase):
plugin_context,
port,
network,
binding)
binding,
None)
self.assertEqual('foohost', ctx.host)
def test_host_super(self):
@ -60,7 +61,8 @@ class TestPortContext(base.BaseTestCase):
plugin_context,
port,
network,
binding)
binding,
None)
self.assertEqual('host', ctx.host)
def test_status(self):
@ -77,7 +79,8 @@ class TestPortContext(base.BaseTestCase):
plugin_context,
port,
network,
binding)
binding,
None)
self.assertEqual('foostatus', ctx.status)
def test_status_super(self):
@ -95,5 +98,6 @@ class TestPortContext(base.BaseTestCase):
plugin_context,
port,
network,
binding)
binding,
None)
self.assertEqual('status', ctx.status)

View File

@ -444,7 +444,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding)
binding, None)
with contextlib.nested(
mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding',
@ -469,7 +469,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding)
binding, None)
# test when _commit_port_binding return binding_failed
self._test_bind_port_if_needed(plugin, mech_context, False)
@ -527,10 +527,10 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
router_id='old_router_id',
vif_type=portbindings.VIF_TYPE_OVS,
vnic_type=portbindings.VNIC_NORMAL,
cap_port_filter=False,
status=constants.PORT_STATUS_DOWN)
plugin = manager.NeutronManager.get_plugin()
mock_network = {'id': 'net_id'}
mock_port = {'id': 'port_id'}
context = mock.Mock()
new_router_id = 'new_router'
attrs = {'device_id': new_router_id, portbindings.HOST_ID: host_id}
@ -538,7 +538,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
with mock.patch.object(ml2_db, 'get_network_segments',
return_value=[]):
mech_context = driver_context.PortContext(
self, context, 'port', mock_network, binding)
self, context, mock_port, mock_network, binding, None)
plugin._process_dvr_port_binding(mech_context, context, attrs)
self.assertEqual(new_router_id,
mech_context._binding.router_id)