Check for ha port to become ACTIVE

After reboot(restart of l3 and l2 agents) of the node routers
can be processed by l3 agent before openvswitch agent sets up
appropriate ha ports. This change add notification for l3 agent
that ha port becomes ACTIVE and keepalived can be enabled.

Closes-bug: #1597461

Co-Authored-By: venkata anil <anilvenkata@redhat.com>

Change-Id: Iedad1ccae45005efaaa74d5571df04197757d07a
This commit is contained in:
AKamyshnikova 2016-08-18 23:18:40 +03:00 committed by John Schwarz
parent 84a1873112
commit 25f5912cf8
5 changed files with 69 additions and 1 deletions

View File

@ -385,7 +385,9 @@ class HaRouter(router.RouterInfo):
def process(self, agent):
super(HaRouter, self).process(agent)
if self.ha_port:
self.ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if (self.ha_port and
self.ha_port['status'] == n_consts.PORT_STATUS_ACTIVE):
self.enable_keepalived()
@common_utils.synchronized('enable_radvd')

View File

@ -12,13 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from sqlalchemy import func
from sqlalchemy import sql
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db as l3_sch_db
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants as service_constants
class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin):
@ -81,3 +88,25 @@ class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin):
bindings = [(binding.l3_agent, None) for binding in bindings]
return self._get_agents_dict_for_router(bindings)
def _notify_l3_agent_ha_port_update(resource, event, trigger, **kwargs):
new_port = kwargs.get('port')
original_port = kwargs.get('original_port')
context = kwargs.get('context')
host = new_port[portbindings.HOST_ID]
if new_port and original_port and host:
new_device_owner = new_port.get('device_owner', '')
if (new_device_owner == constants.DEVICE_OWNER_ROUTER_HA_INTF and
new_port['status'] == constants.PORT_STATUS_ACTIVE and
original_port['status'] != new_port['status']):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
l3plugin.l3_rpc_notifier.routers_updated_on_host(
context, [new_port['device_id']], host)
def subscribe():
registry.subscribe(
_notify_l3_agent_ha_port_update, resources.PORT, events.AFTER_UPDATE)

View File

@ -30,6 +30,7 @@ from neutron.db import l3_dvr_ha_scheduler_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.extensions import l3
from neutron.plugins.common import constants
from neutron.quota import resource_registry
@ -72,6 +73,8 @@ class L3RouterPlugin(service_base.ServicePluginBase,
super(L3RouterPlugin, self).__init__()
if 'dvr' in self.supported_extension_aliases:
l3_dvrscheduler_db.subscribe()
if 'l3-ha' in self.supported_extension_aliases:
l3_hascheduler_db.subscribe()
self.agent_notifiers.update(
{n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})

View File

@ -273,6 +273,18 @@ class L3HATestCase(framework.L3AgentTestFramework):
# call the configure_fip_addresses directly here
router.configure_fip_addresses(interface_name)
def test_ha_port_status_update(self):
router_info = self.generate_router_info(enable_ha=True)
router_info[constants.HA_INTERFACE_KEY]['status'] = (
constants.PORT_STATUS_DOWN)
router1 = self.manage_router(self.agent, router_info)
common_utils.wait_until_true(lambda: router1.ha_state == 'backup')
router1.router[constants.HA_INTERFACE_KEY]['status'] = (
constants.PORT_STATUS_ACTIVE)
self.agent._process_updated_router(router1.router)
common_utils.wait_until_true(lambda: router1.ha_state == 'master')
class L3HATestFailover(framework.L3AgentTestFramework):

View File

@ -58,6 +58,7 @@ from neutron.plugins.ml2.drivers import type_vlan
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.services.l3_router import l3_router_plugin
from neutron.services.qos import qos_consts
from neutron.services.segments import db as segments_plugin_db
from neutron.services.segments import plugin as segments_plugin
@ -83,6 +84,7 @@ PLUGIN_NAME = 'ml2'
DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
HOST = 'fake_host'
TEST_ROUTER_ID = 'router_id'
# TODO(marun) - Move to somewhere common for reuse
@ -708,6 +710,26 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
self.assertEqual('DOWN', port['port']['status'])
self.assertEqual('DOWN', self.port_create_status)
def test_update_port_status_notify_port_event_after_update(self):
ctx = context.get_admin_context()
plugin = ml2_plugin.Ml2Plugin()
# enable subscription for events
l3_router_plugin.L3RouterPlugin()
l3plugin = manager.NeutronManager.get_service_plugins().get(
p_const.L3_ROUTER_NAT)
host_arg = {portbindings.HOST_ID: HOST}
with mock.patch.object(l3plugin.l3_rpc_notifier,
'routers_updated_on_host') as mock_updated:
with self.port(device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
device_id=TEST_ROUTER_ID,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
plugin.update_port_status(
ctx, port['port']['id'],
constants.PORT_STATUS_ACTIVE, host=HOST)
mock_updated.assert_called_once_with(
mock.ANY, [TEST_ROUTER_ID], HOST)
def test_update_port_status_short_id(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()