Integration of Router Extra Attributes OVO
This patch integrates Router Extra Attributes OVO and uses proper context in calling methods for object operations. The other integration parts of this OVO in l3_agentschedulers_db.py and l3_attrs_db.py are being done in patch [1] and [2] respectively. [1] - I0af665a97087ad72431d58f04089a804088ef005 [2] - Id5ed0a541d09cd1105f9cb067401e2afa8cd9b83 Change-Id: Ib6e59ab3405857d3ed4d82df1a80800089c3f06e Partially-Implements: blueprint adopt-oslo-versioned-objects-for-db
This commit is contained in:
parent
143a6e8546
commit
79cf488c4c
@ -32,11 +32,11 @@ from neutron.agent.common import utils as agent_utils
|
||||
from neutron.common import utils as n_utils
|
||||
from neutron.db import agentschedulers_db
|
||||
from neutron.db.models import agent as agent_model
|
||||
from neutron.db.models import l3 as l3_model
|
||||
from neutron.db.models import l3_attrs
|
||||
from neutron.db.models import l3agent as rb_model
|
||||
from neutron.extensions import l3agentscheduler
|
||||
from neutron.extensions import router_availability_zone as router_az
|
||||
from neutron.objects import router as l3_objs
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -87,6 +87,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
rescheduling_failed=l3agentscheduler.RouterReschedulingFailed)
|
||||
|
||||
def get_down_router_bindings(self, context, agent_dead_limit):
|
||||
# TODO(sshank): This portion is done in seperate patch: [1]
|
||||
# [1] Change-Id: I0af665a97087ad72431d58f04089a804088ef005
|
||||
cutoff = self.get_cutoff_time(agent_dead_limit)
|
||||
return (context.session.query(
|
||||
rb_model.RouterL3AgentBinding).
|
||||
@ -398,27 +400,13 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
|
||||
def get_routers_l3_agents_count(self, context):
|
||||
"""Return a map between routers and agent counts for all routers."""
|
||||
|
||||
# Postgres requires every column in the select to be present in
|
||||
# the group by statement when using an aggregate function.
|
||||
# One solution is to generate a subquery and join it with the desired
|
||||
# columns.
|
||||
binding_model = rb_model.RouterL3AgentBinding
|
||||
sub_query = (context.session.query(
|
||||
binding_model.router_id,
|
||||
func.count(binding_model.router_id).label('count')).
|
||||
join(l3_attrs.RouterExtraAttributes,
|
||||
binding_model.router_id ==
|
||||
l3_attrs.RouterExtraAttributes.router_id).
|
||||
join(l3_model.Router).
|
||||
group_by(binding_model.router_id).subquery())
|
||||
|
||||
query = (context.session.query(l3_model.Router, sub_query.c.count).
|
||||
outerjoin(sub_query))
|
||||
|
||||
return [(self._make_router_dict(router), agent_count) if agent_count
|
||||
else (self._make_router_dict(router), 0)
|
||||
for router, agent_count in query]
|
||||
# TODO(sshank): This portion needs Router OVO integration when it is
|
||||
# merged.
|
||||
l3_model_list = l3_objs.RouterExtraAttributes.get_router_agents_count(
|
||||
context)
|
||||
return [(self._make_router_dict(router_model),
|
||||
agent_count if agent_count else 0)
|
||||
for router_model, agent_count in l3_model_list]
|
||||
|
||||
def get_l3_agents(self, context, active=None, filters=None):
|
||||
query = context.session.query(agent_model.Agent)
|
||||
|
@ -45,10 +45,10 @@ from neutron.db import l3_dvr_db
|
||||
from neutron.db.l3_dvr_db import is_distributed_router
|
||||
from neutron.db.models import agent as agent_model
|
||||
from neutron.db.models import l3 as l3_models
|
||||
from neutron.db.models import l3_attrs
|
||||
from neutron.db.models import l3ha as l3ha_model
|
||||
from neutron.extensions import l3
|
||||
from neutron.extensions import l3_ext_ha_mode as l3_ha
|
||||
from neutron.objects import router as l3_objs
|
||||
from neutron.plugins.common import utils as p_utils
|
||||
|
||||
|
||||
@ -715,14 +715,10 @@ def is_ha_router(router):
|
||||
|
||||
|
||||
def is_ha_router_port(context, device_owner, router_id):
|
||||
session = db_api.get_reader_session()
|
||||
if device_owner == constants.DEVICE_OWNER_HA_REPLICATED_INT:
|
||||
return True
|
||||
elif device_owner == constants.DEVICE_OWNER_ROUTER_SNAT:
|
||||
query = session.query(l3_attrs.RouterExtraAttributes)
|
||||
query = query.filter_by(ha=True)
|
||||
query = query.filter(l3_attrs.RouterExtraAttributes.router_id ==
|
||||
router_id)
|
||||
return bool(query.limit(1).count())
|
||||
return l3_objs.RouterExtraAttributes.objects_exist(
|
||||
context, router_id=router_id, ha=True)
|
||||
else:
|
||||
return False
|
||||
|
@ -14,10 +14,12 @@ import netaddr
|
||||
|
||||
from oslo_versionedobjects import base as obj_base
|
||||
from oslo_versionedobjects import fields as obj_fields
|
||||
from sqlalchemy import func
|
||||
|
||||
from neutron.common import utils
|
||||
from neutron.db.models import l3
|
||||
from neutron.db.models import l3_attrs
|
||||
from neutron.db.models import l3agent as rb_model
|
||||
from neutron.extensions import availability_zone as az_ext
|
||||
from neutron.objects import base
|
||||
from neutron.objects import common_types
|
||||
@ -95,3 +97,22 @@ class RouterExtraAttributes(base.NeutronDbObject):
|
||||
result[az_ext.AZ_HINTS] = (
|
||||
az_ext.convert_az_list_to_string(result[az_ext.AZ_HINTS]))
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def get_router_agents_count(cls, context):
|
||||
# TODO(sshank): This is pulled out from l3_agentschedulers_db.py
|
||||
# until a way to handle joins is figured out.
|
||||
binding_model = rb_model.RouterL3AgentBinding
|
||||
sub_query = (context.session.query(
|
||||
binding_model.router_id,
|
||||
func.count(binding_model.router_id).label('count')).
|
||||
join(l3_attrs.RouterExtraAttributes,
|
||||
binding_model.router_id ==
|
||||
l3_attrs.RouterExtraAttributes.router_id).
|
||||
join(l3.Router).
|
||||
group_by(binding_model.router_id).subquery())
|
||||
|
||||
query = (context.session.query(l3.Router, sub_query.c.count).
|
||||
outerjoin(sub_query))
|
||||
|
||||
return [(router, agent_count) for router, agent_count in query]
|
||||
|
@ -71,8 +71,9 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
||||
def delete_port_postcommit(self, context):
|
||||
port = context.current
|
||||
agent_host = context.host
|
||||
plugin_context = context._plugin_context
|
||||
fdb_entries = self._get_agent_fdb(
|
||||
context, context.bottom_bound_segment, port, agent_host)
|
||||
plugin_context, context.bottom_bound_segment, port, agent_host)
|
||||
if fdb_entries and l3_hamode_db.is_ha_router_port(
|
||||
context, port['device_owner'], port['device_id']):
|
||||
session = db_api.get_reader_session()
|
||||
@ -149,7 +150,8 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
||||
def update_port_postcommit(self, context):
|
||||
port = context.current
|
||||
orig = context.original
|
||||
if l3_hamode_db.is_ha_router_port(context, port['device_owner'],
|
||||
plugin_context = context._plugin_context
|
||||
if l3_hamode_db.is_ha_router_port(plugin_context, port['device_owner'],
|
||||
port['device_id']):
|
||||
return
|
||||
diff_ips = self._get_diff_ips(orig, port)
|
||||
@ -161,7 +163,7 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
||||
if context.status == const.PORT_STATUS_DOWN:
|
||||
agent_host = context.host
|
||||
fdb_entries = self._get_agent_fdb(
|
||||
context, context.bottom_bound_segment, port,
|
||||
plugin_context, context.bottom_bound_segment, port,
|
||||
agent_host)
|
||||
self.L2populationAgentNotify.remove_fdb_entries(
|
||||
self.rpc_ctx, fdb_entries)
|
||||
@ -171,7 +173,7 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
||||
# The port has been migrated. Send notification about port
|
||||
# removal from old host.
|
||||
fdb_entries = self._get_agent_fdb(
|
||||
context, context.original_bottom_bound_segment,
|
||||
plugin_context, context.original_bottom_bound_segment,
|
||||
orig, context.original_host)
|
||||
self.L2populationAgentNotify.remove_fdb_entries(
|
||||
self.rpc_ctx, fdb_entries)
|
||||
@ -180,7 +182,7 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
||||
self.update_port_up(context)
|
||||
elif context.status == const.PORT_STATUS_DOWN:
|
||||
fdb_entries = self._get_agent_fdb(
|
||||
context, context.bottom_bound_segment, port,
|
||||
plugin_context, context.bottom_bound_segment, port,
|
||||
context.host)
|
||||
self.L2populationAgentNotify.remove_fdb_entries(
|
||||
self.rpc_ctx, fdb_entries)
|
||||
@ -245,11 +247,12 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
||||
if agent_host and l3plugin and getattr(
|
||||
l3plugin, "list_router_ids_on_host", None):
|
||||
admin_context = n_context.get_admin_context()
|
||||
port_context = context._plugin_context
|
||||
if l3plugin.list_router_ids_on_host(
|
||||
admin_context, agent_host, [port['device_id']]):
|
||||
return
|
||||
fdb_entries = self._get_agent_fdb(
|
||||
context, context.bottom_bound_segment, port, agent_host)
|
||||
port_context, context.bottom_bound_segment, port, agent_host)
|
||||
self.L2populationAgentNotify.remove_fdb_entries(
|
||||
self.rpc_ctx, fdb_entries)
|
||||
|
||||
@ -257,6 +260,7 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
||||
port = context.current
|
||||
agent_host = context.host
|
||||
session = db_api.get_reader_session()
|
||||
port_context = context._plugin_context
|
||||
agent = l2pop_db.get_agent_by_host(session, agent_host)
|
||||
if not agent:
|
||||
LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"),
|
||||
@ -295,7 +299,7 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
||||
# Notify other agents to add fdb rule for current port
|
||||
if (port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE and
|
||||
not l3_hamode_db.is_ha_router_port(
|
||||
context, port['device_owner'], port['device_id'])):
|
||||
port_context, port['device_owner'], port['device_id'])):
|
||||
other_fdb_ports[agent_ip] += self._get_port_fdb_entries(port)
|
||||
|
||||
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
|
||||
|
@ -20,10 +20,10 @@ from oslo_utils import uuidutils
|
||||
|
||||
from neutron.common import constants as n_const
|
||||
from neutron.db.models import l3 as l3_models
|
||||
from neutron.db.models import l3_attrs
|
||||
from neutron.db.models import l3ha as l3ha_model
|
||||
from neutron.db import models_v2
|
||||
from neutron.objects import network as network_obj
|
||||
from neutron.objects import router as l3_objs
|
||||
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
|
||||
from neutron.plugins.ml2 import models
|
||||
from neutron.tests.common import helpers
|
||||
@ -35,7 +35,7 @@ HOST_2 = 'HOST_2'
|
||||
HOST_3 = 'HOST_3'
|
||||
HOST_2_TUNNELING_IP = '20.0.0.2'
|
||||
HOST_3_TUNNELING_IP = '20.0.0.3'
|
||||
TEST_ROUTER_ID = 'router_id'
|
||||
TEST_ROUTER_ID = uuidutils.generate_uuid()
|
||||
TEST_NETWORK_ID = uuidutils.generate_uuid()
|
||||
TEST_HA_NETWORK_ID = uuidutils.generate_uuid()
|
||||
PLUGIN_NAME = 'ml2'
|
||||
@ -55,8 +55,10 @@ class TestL2PopulationDBTestCase(testlib_api.SqlTestCase):
|
||||
def _create_router(self, distributed=True, ha=False):
|
||||
with self.ctx.session.begin(subtransactions=True):
|
||||
self.ctx.session.add(l3_models.Router(id=TEST_ROUTER_ID))
|
||||
self.ctx.session.add(l3_attrs.RouterExtraAttributes(
|
||||
router_id=TEST_ROUTER_ID, distributed=distributed, ha=ha))
|
||||
l3_objs.RouterExtraAttributes(
|
||||
self.ctx,
|
||||
router_id=TEST_ROUTER_ID,
|
||||
distributed=distributed, ha=ha).create()
|
||||
|
||||
def _create_ha_router(self, distributed=False):
|
||||
helpers.register_l3_agent(HOST_2)
|
||||
|
@ -31,7 +31,6 @@ from neutron.db import agents_db
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron.db import l3_agentschedulers_db
|
||||
from neutron.db import l3_hamode_db
|
||||
from neutron.plugins.ml2 import db as ml2_db
|
||||
from neutron.plugins.ml2 import driver_context
|
||||
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
|
||||
from neutron.plugins.ml2.drivers.l2pop import mech_driver as l2pop_mech_driver
|
||||
@ -1137,28 +1136,19 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
|
||||
device = 'tap' + p['port']['id']
|
||||
self.callbacks.update_device_up(self.adminContext,
|
||||
agent_id=HOST, device=device)
|
||||
dvr_snat_port = ml2_db.get_port(self.adminContext,
|
||||
p['port']['id'])
|
||||
self.mock_fanout.reset_mock()
|
||||
|
||||
context = mock.Mock()
|
||||
context.current = dvr_snat_port
|
||||
context.host = HOST
|
||||
segment = {'network_type': 'vxlan', 'segmentation_id': 1}
|
||||
context.bottom_bound_segment = segment
|
||||
|
||||
expected = {self._network['network']['id']:
|
||||
{'segment_id': segment['segmentation_id'],
|
||||
'network_type': segment['network_type'],
|
||||
'ports': {'20.0.0.1':
|
||||
[l2pop_rpc.PortInfo(
|
||||
mac_address=p['port']['mac_address'],
|
||||
ip_address=p['port']['fixed_ips'][0]
|
||||
['ip_address'])]}}}
|
||||
|
||||
l2pop_mech.delete_port_postcommit(context)
|
||||
self.mock_fanout.assert_called_with(
|
||||
mock.ANY, 'remove_fdb_entries', expected)
|
||||
p['port'][portbindings.HOST_ID] = HOST
|
||||
bindings = [mock.Mock()]
|
||||
port_context = driver_context.PortContext(
|
||||
self.driver, self.context, p['port'],
|
||||
self.driver.get_network(
|
||||
self.context, p['port']['network_id']),
|
||||
None, bindings)
|
||||
mock.patch.object(port_context, '_expand_segment').start()
|
||||
# The point is to provide coverage and to assert that
|
||||
# no exceptions are raised.
|
||||
l2pop_mech.delete_port_postcommit(port_context)
|
||||
|
||||
def test_fixed_ips_change_unbound_port_no_rpc(self):
|
||||
l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
|
||||
|
Loading…
x
Reference in New Issue
Block a user