Merge "Refactor ml2_db to pass context"

This commit is contained in:
Jenkins 2016-12-16 17:05:45 +00:00 committed by Gerrit Code Review
commit 9e5a70c47c
6 changed files with 102 additions and 99 deletions

View File

@ -183,7 +183,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
for port in int_ports:
dvr_binding = (ml2_db.
get_distributed_port_binding_by_host(
context.session, port['id'], port_host))
context, port['id'], port_host))
if dvr_binding:
# unbind this port from router
dvr_binding['router_id'] = None

View File

@ -38,16 +38,16 @@ LOG = log.getLogger(__name__)
MAX_PORTS_PER_QUERY = 500
def add_port_binding(session, port_id):
with session.begin(subtransactions=True):
def add_port_binding(context, port_id):
with context.session.begin(subtransactions=True):
record = models.PortBinding(
port_id=port_id,
vif_type=portbindings.VIF_TYPE_UNBOUND)
session.add(record)
context.session.add(record)
return record
def get_locked_port_and_binding(session, port_id):
def get_locked_port_and_binding(context, port_id):
"""Get port and port binding records for update within transaction."""
try:
@ -55,12 +55,12 @@ def get_locked_port_and_binding(session, port_id):
# to both be added to the session and locked for update. A
# single joined query should work, but the combination of left
# outer joins and postgresql doesn't seem to work.
port = (session.query(models_v2.Port).
port = (context.session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(id=port_id).
with_lockmode('update').
one())
binding = (session.query(models.PortBinding).
binding = (context.session.query(models.PortBinding).
enable_eagerloads(False).
filter_by(port_id=port_id).
with_lockmode('update').
@ -70,10 +70,10 @@ def get_locked_port_and_binding(session, port_id):
return None, None
def set_binding_levels(session, levels):
def set_binding_levels(context, levels):
if levels:
for level in levels:
session.add(level)
context.session.add(level)
LOG.debug("For port %(port_id)s, host %(host)s, "
"set binding levels %(levels)s",
{'port_id': levels[0].port_id,
@ -83,9 +83,9 @@ def set_binding_levels(session, levels):
LOG.debug("Attempted to set empty binding levels")
def get_binding_levels(session, port_id, host):
def get_binding_levels(context, port_id, host):
if host:
result = (session.query(models.PortBindingLevel).
result = (context.session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host).
order_by(models.PortBindingLevel.level).
all())
@ -97,9 +97,9 @@ def get_binding_levels(session, port_id, host):
return result
def clear_binding_levels(session, port_id, host):
def clear_binding_levels(context, port_id, host):
if host:
(session.query(models.PortBindingLevel).
(context.session.query(models.PortBindingLevel).
filter_by(port_id=port_id, host=host).
delete())
LOG.debug("For port %(port_id)s, host %(host)s, "
@ -108,14 +108,14 @@ def clear_binding_levels(session, port_id, host):
'host': host})
def ensure_distributed_port_binding(session, port_id, host, router_id=None):
record = (session.query(models.DistributedPortBinding).
def ensure_distributed_port_binding(context, port_id, host, router_id=None):
record = (context.session.query(models.DistributedPortBinding).
filter_by(port_id=port_id, host=host).first())
if record:
return record
try:
with session.begin(subtransactions=True):
with context.session.begin(subtransactions=True):
record = models.DistributedPortBinding(
port_id=port_id,
host=host,
@ -123,27 +123,27 @@ def ensure_distributed_port_binding(session, port_id, host, router_id=None):
vif_type=portbindings.VIF_TYPE_UNBOUND,
vnic_type=portbindings.VNIC_NORMAL,
status=n_const.PORT_STATUS_DOWN)
session.add(record)
context.session.add(record)
return record
except db_exc.DBDuplicateEntry:
LOG.debug("Distributed Port %s already bound", port_id)
return (session.query(models.DistributedPortBinding).
return (context.session.query(models.DistributedPortBinding).
filter_by(port_id=port_id, host=host).one())
def delete_distributed_port_binding_if_stale(session, binding):
def delete_distributed_port_binding_if_stale(context, binding):
if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN:
with session.begin(subtransactions=True):
with context.session.begin(subtransactions=True):
LOG.debug("Distributed port: Deleting binding %s", binding)
session.delete(binding)
context.session.delete(binding)
def get_port(session, port_id):
def get_port(context, port_id):
"""Get port record for update within transaction."""
with session.begin(subtransactions=True):
with context.session.begin(subtransactions=True):
try:
record = (session.query(models_v2.Port).
record = (context.session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
@ -225,10 +225,10 @@ def make_port_dict_with_security_groups(port, sec_groups):
return port_dict
def get_port_binding_host(session, port_id):
def get_port_binding_host(context, port_id):
try:
with session.begin(subtransactions=True):
query = (session.query(models.PortBinding).
with context.session.begin(subtransactions=True):
query = (context.session.query(models.PortBinding).
filter(models.PortBinding.port_id.startswith(port_id)).
one())
except exc.NoResultFound:
@ -242,10 +242,10 @@ def get_port_binding_host(session, port_id):
return query.host
def generate_distributed_port_status(session, port_id):
def generate_distributed_port_status(context, port_id):
# an OR'ed value of status assigned to parent port from the
# distributedportbinding bucket
query = session.query(models.DistributedPortBinding)
query = context.session.query(models.DistributedPortBinding)
final_status = n_const.PORT_STATUS_BUILD
for bind in query.filter(models.DistributedPortBinding.port_id == port_id):
if bind.status == n_const.PORT_STATUS_ACTIVE:
@ -255,9 +255,9 @@ def generate_distributed_port_status(session, port_id):
return final_status
def get_distributed_port_binding_by_host(session, port_id, host):
with session.begin(subtransactions=True):
binding = (session.query(models.DistributedPortBinding).
def get_distributed_port_binding_by_host(context, port_id, host):
with context.session.begin(subtransactions=True):
binding = (context.session.query(models.DistributedPortBinding).
filter(models.DistributedPortBinding.port_id.startswith(port_id),
models.DistributedPortBinding.host == host).first())
if not binding:
@ -266,9 +266,9 @@ def get_distributed_port_binding_by_host(session, port_id, host):
return binding
def get_distributed_port_bindings(session, port_id):
with session.begin(subtransactions=True):
bindings = (session.query(models.DistributedPortBinding).
def get_distributed_port_bindings(context, port_id):
with context.session.begin(subtransactions=True):
bindings = (context.session.query(models.DistributedPortBinding).
filter(models.DistributedPortBinding.port_id.startswith(
port_id)).all())
if not bindings:

View File

@ -211,7 +211,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
def _port_provisioned(self, rtype, event, trigger, context, object_id,
**kwargs):
port_id = object_id
port = db.get_port(context.session, port_id)
port = db.get_port(context, port_id)
if not port or not port.port_binding:
LOG.debug("Port %s was deleted so its status cannot be updated.",
port_id)
@ -291,7 +291,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
return mac_change
def _process_port_binding(self, mech_context, attrs):
session = mech_context._plugin_context.session
plugin_context = mech_context._plugin_context
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
@ -330,7 +330,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if changes:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
db.clear_binding_levels(plugin_context, port_id, original_host)
mech_context._clear_binding_levels()
port['status'] = const.PORT_STATUS_DOWN
super(Ml2Plugin, self).update_port(
@ -340,7 +340,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
db.clear_binding_levels(plugin_context, port_id, original_host)
mech_context._clear_binding_levels()
binding.host = ''
@ -432,19 +432,18 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
need_notify, try_again):
port_id = orig_context.current['id']
plugin_context = orig_context._plugin_context
session = plugin_context.session
orig_binding = orig_context._binding
new_binding = bind_context._binding
# After we've attempted to bind the port, we begin a
# transaction, get the current port state, and decide whether
# to commit the binding results.
with session.begin(subtransactions=True):
with plugin_context.session.begin(subtransactions=True):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
port_db, cur_binding = db.get_locked_port_and_binding(session,
port_id)
port_db, cur_binding = db.get_locked_port_and_binding(
plugin_context, port_id)
# Since the mechanism driver bind_port() calls must be made
# outside a DB transaction locking the port state, it is
# possible (but unlikely) that the port's state could change
@ -485,7 +484,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# instance will then be needed, it does not make sense
# to optimize this code to avoid fetching it.
cur_binding = db.get_distributed_port_binding_by_host(
session, port_id, orig_binding.host)
plugin_context, port_id, orig_binding.host)
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding, None,
original_port=oport)
@ -505,8 +504,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
db.clear_binding_levels(session, port_id, cur_binding.host)
db.set_binding_levels(session, bind_context._binding_levels)
db.clear_binding_levels(plugin_context, port_id,
cur_binding.host)
db.set_binding_levels(plugin_context,
bind_context._binding_levels)
cur_context._binding_levels = bind_context._binding_levels
# Update PortContext's port dictionary to reflect the
@ -1255,7 +1256,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
sgids = self._get_security_groups_on_port(context, port)
self._process_port_create_security_group(context, result, sgids)
network = self.get_network(context, result['network_id'])
binding = db.add_port_binding(session, result['id'])
binding = db.add_port_binding(context, result['id'])
mech_context = driver_context.PortContext(self, context, result,
network, binding, None)
self._process_port_binding(mech_context, attrs)
@ -1389,7 +1390,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
bound_mech_contexts = []
with session.begin(subtransactions=True):
port_db, binding = db.get_locked_port_and_binding(session, id)
port_db, binding = db.get_locked_port_and_binding(context, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
mac_address_updated = self._check_mac_update_allowed(
@ -1424,7 +1425,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
levels = db.get_binding_levels(session, id, binding.host)
levels = db.get_binding_levels(context, id, binding.host)
# one of the operations above may have altered the model call
# _make_port_dict again to ensure latest state is reflected so mech
# drivers, callback handlers, and the API caller see latest state.
@ -1450,10 +1451,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# DVR and non-DVR cases here.
# TODO(Swami): This code need to be revisited.
if port_db['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
dist_binding_list = db.get_distributed_port_bindings(session,
dist_binding_list = db.get_distributed_port_bindings(context,
id)
for dist_binding in dist_binding_list:
levels = db.get_binding_levels(session, id,
levels = db.get_binding_levels(context, id,
dist_binding.host)
dist_mech_context = driver_context.PortContext(
self, context, updated_port, network,
@ -1510,7 +1511,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
return bound_context.current
def _process_distributed_port_binding(self, mech_context, context, attrs):
session = mech_context._plugin_context.session
plugin_context = mech_context._plugin_context
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
@ -1519,7 +1520,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
binding.vif_details = ''
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
if binding.host:
db.clear_binding_levels(session, port_id, binding.host)
db.clear_binding_levels(plugin_context, port_id, binding.host)
binding.host = ''
self._update_port_dict_binding(port, binding)
@ -1539,7 +1540,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
return
session = context.session
binding = db.get_distributed_port_binding_by_host(session, id, host)
binding = db.get_distributed_port_binding_by_host(context,
id, host)
device_id = attrs and attrs.get('device_id')
router_id = binding and binding.get('router_id')
update_required = (not binding or
@ -1551,10 +1553,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
orig_port = self.get_port(context, id)
if not binding:
binding = db.ensure_distributed_port_binding(
session, id, host, router_id=device_id)
context, id, host, router_id=device_id)
network = self.get_network(context,
orig_port['network_id'])
levels = db.get_binding_levels(session, id, host)
levels = db.get_binding_levels(context, id, host)
mech_context = driver_context.PortContext(self,
context, orig_port, network,
binding, levels, original_port=orig_port)
@ -1594,7 +1596,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
session = context.session
with session.begin(subtransactions=True):
port_db, binding = db.get_locked_port_and_binding(session, id)
port_db, binding = db.get_locked_port_and_binding(context, id)
if not port_db:
LOG.debug("The port '%s' was deleted", id)
return
@ -1604,17 +1606,17 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
bound_mech_contexts = []
device_owner = port['device_owner']
if device_owner == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_distributed_port_bindings(context.session,
bindings = db.get_distributed_port_bindings(context,
id)
for bind in bindings:
levels = db.get_binding_levels(context.session, id,
levels = db.get_binding_levels(context, id,
bind.host)
mech_context = driver_context.PortContext(
self, context, port, network, bind, levels)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
else:
levels = db.get_binding_levels(context.session, id,
levels = db.get_binding_levels(context, id,
binding.host)
mech_context = driver_context.PortContext(
self, context, port, network, binding, levels)
@ -1681,12 +1683,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_distributed_port_binding_by_host(
session, port['id'], host)
plugin_context, port['id'], host)
if not binding:
LOG.error(_LE("Binding info for DVR port %s not found"),
port_id)
return None
levels = db.get_binding_levels(session, port_db.id, host)
levels = db.get_binding_levels(plugin_context,
port_db.id, host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding, levels)
else:
@ -1700,7 +1703,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
"it might have been deleted already."),
port_id)
return
levels = db.get_binding_levels(session, port_db.id,
levels = db.get_binding_levels(plugin_context, port_db.id,
port_db.port_binding.host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding, levels)
@ -1720,7 +1723,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
updated = False
session = context.session
with session.begin(subtransactions=True):
port = db.get_port(session, port_id)
port = db.get_port(context, port_id)
if not port:
LOG.debug("Port %(port)s update to %(val)s by agent not found",
{'port': port_id, 'val': status})
@ -1735,7 +1738,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
updated_port = self._make_port_dict(port)
network = network or self.get_network(
context, original_port['network_id'])
levels = db.get_binding_levels(session, port.id,
levels = db.get_binding_levels(context, port.id,
port.port_binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
@ -1744,7 +1747,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
updated = True
elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_distributed_port_binding_by_host(
session, port['id'], host)
context, port['id'], host)
if not binding:
return
binding['status'] = status
@ -1754,7 +1757,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if (updated and
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
with session.begin(subtransactions=True):
port = db.get_port(session, port_id)
port = db.get_port(context, port_id)
if not port:
LOG.warning(_LW("Port %s not found during update"),
port_id)
@ -1762,10 +1765,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
original_port = self._make_port_dict(port)
network = network or self.get_network(
context, original_port['network_id'])
port.status = db.generate_distributed_port_status(session,
port.status = db.generate_distributed_port_status(context,
port['id'])
updated_port = self._make_port_dict(port)
levels = db.get_binding_levels(session, port_id, host)
levels = db.get_binding_levels(context, port_id, host)
mech_context = (driver_context.PortContext(
self, context, updated_port, network,
binding, levels, original_port=original_port))
@ -1785,7 +1788,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
**kwargs)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
db.delete_distributed_port_binding_if_stale(session, binding)
db.delete_distributed_port_binding_if_stale(context, binding)
return port['id']
@ -1793,12 +1796,12 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
def port_bound_to_host(self, context, port_id, host):
if not host:
return
port = db.get_port(context.session, port_id)
port = db.get_port(context, port_id)
if not port:
LOG.debug("No Port match for: %s", port_id)
return
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_distributed_port_bindings(context.session,
bindings = db.get_distributed_port_bindings(context,
port_id)
for b in bindings:
if b.host == host:
@ -1806,7 +1809,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
LOG.debug("No binding found for DVR port %s", port['id'])
return
else:
port_host = db.get_port_binding_host(context.session, port_id)
port_host = db.get_port_binding_host(context, port_id)
return port if (port_host == host) else None
@db_api.retry_if_session_inactive()

View File

@ -244,7 +244,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
# agent did not provide a full one (e.g. Linux Bridge case). We
# need to look up the full one before calling provisioning_complete
if not port:
port = ml2_db.get_port(rpc_context.session, port_id)
port = ml2_db.get_port(rpc_context, port_id)
if not port:
# port doesn't exist, no need to add a provisioning block
return
@ -260,7 +260,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
if not l2pop_driver:
return
if not port:
port = ml2_db.get_port(rpc_context.session, port_id)
port = ml2_db.get_port(rpc_context, port_id)
if not port:
return
is_ha_port = l3_hamode_db.is_ha_router_port(rpc_context,

View File

@ -170,7 +170,7 @@ class Ml2DBTestCase(testlib_api.SqlTestCase):
self._setup_neutron_network(network_id)
self._setup_neutron_port(network_id, port_id)
port = ml2_db.add_port_binding(self.ctx.session, port_id)
port = ml2_db.add_port_binding(self.ctx, port_id)
self.assertEqual(port_id, port.port_id)
self.assertEqual(portbindings.VIF_TYPE_UNBOUND, port.vif_type)
@ -183,7 +183,7 @@ class Ml2DBTestCase(testlib_api.SqlTestCase):
self._setup_neutron_port(network_id, port_id)
self._setup_neutron_portbinding(port_id, vif_type, host)
port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id)
port_host = ml2_db.get_port_binding_host(self.ctx, port_id)
self.assertEqual(host, port_host)
def test_get_port_binding_host_multiple_results_found(self):
@ -199,13 +199,13 @@ class Ml2DBTestCase(testlib_api.SqlTestCase):
self._setup_neutron_port(network_id, port_id_two)
self._setup_neutron_portbinding(port_id_two, vif_type, host)
port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id)
port_host = ml2_db.get_port_binding_host(self.ctx, port_id)
self.assertIsNone(port_host)
def test_get_port_binding_host_result_not_found(self):
port_id = uuidutils.generate_uuid()
port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id)
port_host = ml2_db.get_port_binding_host(self.ctx, port_id)
self.assertIsNone(port_host)
def test_get_port(self):
@ -214,7 +214,7 @@ class Ml2DBTestCase(testlib_api.SqlTestCase):
self._setup_neutron_network(network_id)
self._setup_neutron_port(network_id, port_id)
port = ml2_db.get_port(self.ctx.session, port_id)
port = ml2_db.get_port(self.ctx, port_id)
self.assertEqual(port_id, port.id)
def test_get_port_multiple_results_found(self):
@ -226,12 +226,12 @@ class Ml2DBTestCase(testlib_api.SqlTestCase):
self._setup_neutron_port(network_id, port_id_one)
self._setup_neutron_port(network_id, port_id_two)
port = ml2_db.get_port(self.ctx.session, port_id)
port = ml2_db.get_port(self.ctx, port_id)
self.assertIsNone(port)
def test_get_port_result_not_found(self):
port_id = uuidutils.generate_uuid()
port = ml2_db.get_port(self.ctx.session, port_id)
port = ml2_db.get_port(self.ctx, port_id)
self.assertIsNone(port)
def test_get_port_from_device_mac(self):
@ -253,7 +253,7 @@ class Ml2DBTestCase(testlib_api.SqlTestCase):
self._setup_neutron_port(network_id, port_id)
self._setup_neutron_portbinding(port_id, vif_type, host)
port, binding = ml2_db.get_locked_port_and_binding(self.ctx.session,
port, binding = ml2_db.get_locked_port_and_binding(self.ctx,
port_id)
self.assertEqual(port_id, port.id)
self.assertEqual(port_id, binding.port_id)
@ -261,7 +261,7 @@ class Ml2DBTestCase(testlib_api.SqlTestCase):
def test_get_locked_port_and_binding_result_not_found(self):
port_id = uuidutils.generate_uuid()
port, binding = ml2_db.get_locked_port_and_binding(self.ctx.session,
port, binding = ml2_db.get_locked_port_and_binding(self.ctx,
port_id)
self.assertIsNone(port)
self.assertIsNone(binding)
@ -322,7 +322,7 @@ class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
query_first.return_value = []
with mock.patch.object(ml2_db.LOG, 'debug') as log_trace:
binding = ml2_db.ensure_distributed_port_binding(
self.ctx.session, port_id, host_id, router_id)
self.ctx, port_id, host_id, router_id)
self.assertTrue(query_first.called)
self.assertTrue(log_trace.called)
self.assertEqual(port_id, binding.port_id)
@ -333,7 +333,7 @@ class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
self._setup_neutron_network(network_id, [port_id])
router = self._setup_neutron_router()
ml2_db.ensure_distributed_port_binding(
self.ctx.session, port_id, 'foo_host', router.id)
self.ctx, port_id, 'foo_host', router.id)
expected = (self.ctx.session.query(models.DistributedPortBinding).
filter_by(port_id=port_id).one())
self.assertEqual(port_id, expected.port_id)
@ -344,9 +344,9 @@ class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
self._setup_neutron_network(network_id, [port_id])
router = self._setup_neutron_router()
ml2_db.ensure_distributed_port_binding(
self.ctx.session, port_id, 'foo_host_1', router.id)
self.ctx, port_id, 'foo_host_1', router.id)
ml2_db.ensure_distributed_port_binding(
self.ctx.session, port_id, 'foo_host_2', router.id)
self.ctx, port_id, 'foo_host_2', router.id)
bindings = (self.ctx.session.query(models.DistributedPortBinding).
filter_by(port_id=port_id).all())
self.assertEqual(2, len(bindings))
@ -358,7 +358,7 @@ class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
binding = self._setup_distributed_binding(
network_id, port_id, None, 'foo_host_id')
ml2_db.delete_distributed_port_binding_if_stale(self.ctx.session,
ml2_db.delete_distributed_port_binding_if_stale(self.ctx,
binding)
count = (self.ctx.session.query(models.DistributedPortBinding).
filter_by(port_id=binding.port_id).count())
@ -366,11 +366,11 @@ class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
def test_get_distributed_port_binding_by_host_not_found(self):
port = ml2_db.get_distributed_port_binding_by_host(
self.ctx.session, 'foo_port_id', 'foo_host_id')
self.ctx, 'foo_port_id', 'foo_host_id')
self.assertIsNone(port)
def test_get_distributed_port_bindings_not_found(self):
port = ml2_db.get_distributed_port_bindings(self.ctx.session,
port = ml2_db.get_distributed_port_bindings(self.ctx,
'foo_port_id')
self.assertFalse(len(port))
@ -384,7 +384,7 @@ class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
network_id, port_id_1, router.id, 'foo_host_id_1')
self._setup_distributed_binding(
network_id, port_id_1, router.id, 'foo_host_id_2')
ports = ml2_db.get_distributed_port_bindings(self.ctx.session,
ports = ml2_db.get_distributed_port_bindings(self.ctx,
'foo_port_id')
self.assertEqual(2, len(ports))
@ -418,6 +418,6 @@ class Ml2DvrDBTestCase(testlib_api.SqlTestCase):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.delete(port)
self.assertEqual([], warning_list)
ports = ml2_db.get_distributed_port_bindings(self.ctx.session,
ports = ml2_db.get_distributed_port_bindings(self.ctx,
'port_id')
self.assertEqual(0, len(ports))

View File

@ -1113,7 +1113,7 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
plugin = directory.get_plugin()
with self.port() as port:
port_db, binding = ml2_db.get_locked_port_and_binding(
ctx.session, port['port']['id'])
ctx, port['port']['id'])
with mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding') as lock:
lock.side_effect = [db_exc.DBDeadlock,
@ -1537,7 +1537,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
# create a port and delete it so we have an expired mechanism context
with self.port() as port:
plugin = directory.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
@ -1558,7 +1558,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def _create_port_and_bound_context(self, port_vif_type, bound_vif_type):
with self.port() as port:
plugin = directory.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding['host'] = 'fake_host'
binding['vif_type'] = port_vif_type
@ -1658,7 +1658,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_update_port_binding_host_id_none(self):
with self.port() as port:
plugin = directory.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
@ -1675,7 +1675,7 @@ class TestMl2PortBinding(Ml2PluginV2TestCase,
def test_update_port_binding_host_id_not_changed(self):
with self.port() as port:
plugin = directory.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
@ -2726,7 +2726,7 @@ class TestML2Segments(Ml2PluginV2TestCase):
ml2_db.subscribe()
plugin = directory.get_plugin()
with self.port(device_owner=fake_owner_compute) as port:
binding = ml2_db.get_locked_port_and_binding(self.context.session,
binding = ml2_db.get_locked_port_and_binding(self.context,
port['port']['id'])[1]
binding['host'] = 'host-ovs-no_filter'
mech_context = driver_context.PortContext(