Merge "Add ALLOCATING state to routers"
This commit is contained in:
commit
81c61a9939
@ -30,6 +30,11 @@ ROUTER_INTERFACE_OWNERS = lib_constants.ROUTER_INTERFACE_OWNERS + \
|
||||
ROUTER_INTERFACE_OWNERS_SNAT = lib_constants.ROUTER_INTERFACE_OWNERS_SNAT + \
|
||||
(DEVICE_OWNER_HA_REPLICATED_INT,)
|
||||
|
||||
ROUTER_STATUS_ACTIVE = 'ACTIVE'
|
||||
# NOTE(kevinbenton): a BUILD status for routers could be added in the future
|
||||
# for agents to indicate when they are wiring up the ports. The following is
|
||||
# to indicate when the server is busy building sub-components of a router
|
||||
ROUTER_STATUS_ALLOCATING = 'ALLOCATING'
|
||||
L3_AGENT_MODE_DVR = 'dvr'
|
||||
L3_AGENT_MODE_DVR_SNAT = 'dvr_snat'
|
||||
L3_AGENT_MODE_LEGACY = 'legacy'
|
||||
|
@ -347,11 +347,13 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
router_ids):
|
||||
if n_utils.is_extension_supported(self,
|
||||
constants.L3_HA_MODE_EXT_ALIAS):
|
||||
return self.get_ha_sync_data_for_host(context, host, agent,
|
||||
router_ids=router_ids,
|
||||
active=True)
|
||||
|
||||
return self.get_sync_data(context, router_ids=router_ids, active=True)
|
||||
routers = self.get_ha_sync_data_for_host(context, host, agent,
|
||||
router_ids=router_ids,
|
||||
active=True)
|
||||
else:
|
||||
routers = self.get_sync_data(context, router_ids=router_ids,
|
||||
active=True)
|
||||
return self.filter_allocating_and_missing_routers(context, routers)
|
||||
|
||||
def list_router_ids_on_host(self, context, host, router_ids=None):
|
||||
agent = self._get_agent_by_type_and_host(
|
||||
|
@ -176,17 +176,41 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
||||
self._apply_dict_extend_functions(l3.ROUTERS, res, router)
|
||||
return self._fields(res, fields)
|
||||
|
||||
def filter_allocating_and_missing_routers(self, context, routers):
|
||||
"""Filter out routers that shouldn't go to the agent.
|
||||
|
||||
Any routers in the ALLOCATING state will be excluded by
|
||||
this query because this indicates that the server is still
|
||||
building necessary dependent sub-resources for the router and it
|
||||
is not ready for consumption by the agent. It will also filter
|
||||
out any routers that no longer exist to prevent conditions where
|
||||
only part of a router's information was populated in sync_routers
|
||||
due to it being deleted during the sync.
|
||||
"""
|
||||
router_ids = set(r['id'] for r in routers)
|
||||
query = (context.session.query(Router.id).
|
||||
filter(
|
||||
Router.id.in_(router_ids),
|
||||
Router.status != l3_constants.ROUTER_STATUS_ALLOCATING))
|
||||
valid_routers = set(r.id for r in query)
|
||||
if router_ids - valid_routers:
|
||||
LOG.debug("Removing routers that were either concurrently "
|
||||
"deleted or are in the ALLOCATING state: %s",
|
||||
(router_ids - valid_routers))
|
||||
return [r for r in routers if r['id'] in valid_routers]
|
||||
|
||||
def _create_router_db(self, context, router, tenant_id):
|
||||
"""Create the DB object."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
# pre-generate id so it will be available when
|
||||
# configuring external gw port
|
||||
status = router.get('status', l3_constants.ROUTER_STATUS_ACTIVE)
|
||||
router_db = Router(id=(router.get('id') or
|
||||
uuidutils.generate_uuid()),
|
||||
tenant_id=tenant_id,
|
||||
name=router['name'],
|
||||
admin_state_up=router['admin_state_up'],
|
||||
status="ACTIVE",
|
||||
status=status,
|
||||
description=router.get('description'))
|
||||
context.session.add(router_db)
|
||||
return router_db
|
||||
|
@ -413,9 +413,11 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
self._core_plugin.delete_port(admin_ctx, port_id,
|
||||
l3_port_check=False)
|
||||
|
||||
def _notify_ha_interfaces_updated(self, context, router_id):
|
||||
def _notify_ha_interfaces_updated(self, context, router_id,
|
||||
schedule_routers=True):
|
||||
self.l3_rpc_notifier.routers_updated(
|
||||
context, [router_id], shuffle_agents=True)
|
||||
context, [router_id], shuffle_agents=True,
|
||||
schedule_routers=schedule_routers)
|
||||
|
||||
@classmethod
|
||||
def _is_ha(cls, router):
|
||||
@ -450,6 +452,11 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
def create_router(self, context, router):
|
||||
is_ha = self._is_ha(router['router'])
|
||||
router['router']['ha'] = is_ha
|
||||
if is_ha:
|
||||
# we set the allocating status to hide it from the L3 agents
|
||||
# until we have created all of the requisite interfaces/networks
|
||||
router['router']['status'] = constants.ROUTER_STATUS_ALLOCATING
|
||||
|
||||
router_dict = super(L3_HA_NAT_db_mixin,
|
||||
self).create_router(context, router)
|
||||
if is_ha:
|
||||
@ -461,11 +468,17 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
context, router_db)[1]
|
||||
|
||||
self._set_vr_id(context, router_db, ha_network)
|
||||
self._notify_ha_interfaces_updated(context, router_db.id)
|
||||
router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id
|
||||
|
||||
self.schedule_router(context, router_dict['id'])
|
||||
router_dict['status'] = self._update_router_db(
|
||||
context, router_dict['id'],
|
||||
{'status': constants.ROUTER_STATUS_ACTIVE})['status']
|
||||
self._notify_ha_interfaces_updated(context, router_db.id,
|
||||
schedule_routers=False)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.delete_router(context, router_dict['id'])
|
||||
router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id
|
||||
return router_dict
|
||||
|
||||
def _update_router_db(self, context, router_id, data):
|
||||
@ -500,19 +513,26 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
if requested_ha_state is False:
|
||||
raise l3_ha.HAmodeUpdateOfDvrHaNotSupported()
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db(
|
||||
context, router_id, data)
|
||||
|
||||
ha_not_changed = (requested_ha_state is None or
|
||||
requested_ha_state == original_ha_state)
|
||||
if ha_not_changed:
|
||||
return router_db
|
||||
|
||||
ha_changed = (requested_ha_state is not None and
|
||||
requested_ha_state != original_ha_state)
|
||||
if ha_changed:
|
||||
if router_db.admin_state_up:
|
||||
msg = _('Cannot change HA attribute of active routers. Please '
|
||||
'set router admin_state_up to False prior to upgrade.')
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
# set status to ALLOCATING so this router is no longer
|
||||
# provided to agents while its interfaces are being re-configured.
|
||||
# Keep in mind that if we want conversion to be hitless, this
|
||||
# status cannot be used because agents treat hidden routers as
|
||||
# deleted routers.
|
||||
data['status'] = constants.ROUTER_STATUS_ALLOCATING
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db(
|
||||
context, router_id, data)
|
||||
|
||||
if not ha_changed:
|
||||
return router_db
|
||||
|
||||
ha_network = self.get_ha_network(context,
|
||||
router_db.tenant_id)
|
||||
@ -532,10 +552,14 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
ha_network = self._create_ha_interfaces_and_ensure_network(
|
||||
context, router_db)[1]
|
||||
self._set_vr_id(context, router_db, ha_network)
|
||||
self._notify_ha_interfaces_updated(context, router_db.id)
|
||||
else:
|
||||
self._delete_ha_interfaces(context, router_db.id)
|
||||
self._notify_ha_interfaces_updated(context, router_db.id)
|
||||
|
||||
self.schedule_router(context, router_id)
|
||||
router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db(
|
||||
context, router_id, {'status': constants.ROUTER_STATUS_ACTIVE})
|
||||
self._notify_ha_interfaces_updated(context, router_db.id,
|
||||
schedule_routers=False)
|
||||
|
||||
return router_db
|
||||
|
||||
@ -666,9 +690,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
||||
if interface:
|
||||
self._populate_mtu_and_subnets_for_ports(context, [interface])
|
||||
|
||||
# we don't want to return HA routers without HA interfaces created yet
|
||||
return [r for r in list(routers_dict.values())
|
||||
if not r.get('ha') or r.get(constants.HA_INTERFACE_KEY)]
|
||||
return list(routers_dict.values())
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def get_ha_sync_data_for_host(self, context, host, agent,
|
||||
|
@ -87,6 +87,8 @@ class L3Scheduler(object):
|
||||
l3_db.Router.id ==
|
||||
l3_agentschedulers_db.RouterL3AgentBinding.router_id)
|
||||
query = context.session.query(l3_db.Router.id).filter(no_agent_binding)
|
||||
query = query.filter(l3_db.Router.status ==
|
||||
constants.ROUTER_STATUS_ACTIVE)
|
||||
unscheduled_router_ids = [router_id_[0] for router_id_ in query]
|
||||
if unscheduled_router_ids:
|
||||
return plugin.get_routers(
|
||||
@ -102,7 +104,9 @@ class L3Scheduler(object):
|
||||
:returns: the list of routers to be scheduled
|
||||
"""
|
||||
if router_ids is not None:
|
||||
routers = plugin.get_routers(context, filters={'id': router_ids})
|
||||
filters = {'id': router_ids,
|
||||
'status': [constants.ROUTER_STATUS_ACTIVE]}
|
||||
routers = plugin.get_routers(context, filters=filters)
|
||||
return self._filter_unscheduled_routers(context, plugin, routers)
|
||||
else:
|
||||
return self._get_unscheduled_routers(context, plugin)
|
||||
|
@ -15,7 +15,6 @@
|
||||
import functools
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
import testtools
|
||||
|
||||
from neutron.agent.l3 import agent as l3_agent
|
||||
from neutron.agent.l3 import namespaces
|
||||
@ -73,7 +72,6 @@ class TestHAL3Agent(base.BaseFullStackTestCase):
|
||||
return (
|
||||
agents['agents'][0]['ha_state'] != agents['agents'][1]['ha_state'])
|
||||
|
||||
@testtools.skip('bug/1550886')
|
||||
def test_ha_router(self):
|
||||
# TODO(amuller): Test external connectivity before and after a
|
||||
# failover, see: https://review.openstack.org/#/c/196393/
|
||||
|
@ -284,6 +284,7 @@ class L3AZSchedulerBaseTest(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
|
||||
super(L3AZSchedulerBaseTest, self).setUp(plugin=core_plugin)
|
||||
|
||||
self.l3_plugin = l3_router_plugin.L3RouterPlugin()
|
||||
self.l3_plugin.router_scheduler = None
|
||||
self.adminContext = context.get_admin_context()
|
||||
self.adminContext.tenant_id = '_func_test_tenant_'
|
||||
|
||||
|
@ -96,15 +96,6 @@ class L3HATestFramework(testlib_api.SqlTestCase):
|
||||
data['admin_state_up'] = admin_state
|
||||
return self.plugin._update_router_db(ctx, router_id, data)
|
||||
|
||||
def _bind_router(self, router_id):
|
||||
with self.admin_ctx.session.begin(subtransactions=True):
|
||||
agents_db = self.plugin.get_agents_db(self.admin_ctx)
|
||||
self.plugin.router_scheduler._bind_ha_router_to_agents(
|
||||
self.plugin,
|
||||
self.admin_ctx,
|
||||
router_id,
|
||||
agents_db)
|
||||
|
||||
|
||||
class L3HATestCase(L3HATestFramework):
|
||||
|
||||
@ -144,7 +135,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_get_ha_router_port_bindings(self):
|
||||
router = self._create_router()
|
||||
self._bind_router(router['id'])
|
||||
bindings = self.plugin.get_ha_router_port_bindings(
|
||||
self.admin_ctx, [router['id']])
|
||||
binding_dicts = [{'router_id': binding['router_id'],
|
||||
@ -157,7 +147,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_get_l3_bindings_hosting_router_with_ha_states_ha_router(self):
|
||||
router = self._create_router()
|
||||
self._bind_router(router['id'])
|
||||
self.plugin.update_routers_states(
|
||||
self.admin_ctx, {router['id']: 'active'}, self.agent1['host'])
|
||||
bindings = self.plugin.get_l3_bindings_hosting_router_with_ha_states(
|
||||
@ -167,8 +156,10 @@ class L3HATestCase(L3HATestFramework):
|
||||
self.assertIn((self.agent2['id'], 'standby'), agent_ids)
|
||||
|
||||
def test_get_l3_bindings_hosting_router_with_ha_states_agent_none(self):
|
||||
router = self._create_router()
|
||||
# Do not bind router to leave agents as None
|
||||
with mock.patch.object(self.plugin, 'schedule_router'):
|
||||
# Do not bind router to leave agents as None
|
||||
router = self._create_router()
|
||||
|
||||
res = self.admin_ctx.session.query(
|
||||
l3_hamode_db.L3HARouterAgentPortBinding).filter(
|
||||
l3_hamode_db.L3HARouterAgentPortBinding.router_id == router['id']
|
||||
@ -194,7 +185,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
def test_get_l3_bindings_hosting_router_with_ha_states_active_and_dead(
|
||||
self):
|
||||
router = self._create_router()
|
||||
self._bind_router(router['id'])
|
||||
with mock.patch.object(agents_db.Agent, 'is_active',
|
||||
new_callable=mock.PropertyMock,
|
||||
return_value=False):
|
||||
@ -206,6 +196,67 @@ class L3HATestCase(L3HATestFramework):
|
||||
agent_ids = [(agent[0]['id'], agent[1]) for agent in bindings]
|
||||
self.assertIn((self.agent1['id'], 'standby'), agent_ids)
|
||||
|
||||
def test_router_created_in_active_state(self):
|
||||
router = self._create_router()
|
||||
self.assertEqual(constants.ROUTER_STATUS_ACTIVE, router['status'])
|
||||
|
||||
def test_router_update_stay_active(self):
|
||||
router = self._create_router()
|
||||
router['name'] = 'test_update'
|
||||
router_updated = self.plugin._update_router_db(self.admin_ctx,
|
||||
router['id'], router)
|
||||
self.assertEqual(constants.ROUTER_STATUS_ACTIVE,
|
||||
router_updated['status'])
|
||||
|
||||
def test_allocating_router_hidden_from_sync(self):
|
||||
r1, r2 = self._create_router(), self._create_router()
|
||||
r1['status'] = constants.ROUTER_STATUS_ALLOCATING
|
||||
self.plugin._update_router_db(self.admin_ctx, r1['id'], r1)
|
||||
# store shorter name for readability
|
||||
get_method = self.plugin._get_active_l3_agent_routers_sync_data
|
||||
# r1 should be hidden
|
||||
expected = [self.plugin.get_router(self.admin_ctx, r2['id'])]
|
||||
self.assertEqual(expected, get_method(self.admin_ctx, None, None,
|
||||
[r1['id'], r2['id']]))
|
||||
# but once it transitions back, all is well in the world again!
|
||||
r1['status'] = constants.ROUTER_STATUS_ACTIVE
|
||||
self.plugin._update_router_db(self.admin_ctx, r1['id'], r1)
|
||||
expected.append(self.plugin.get_router(self.admin_ctx, r1['id']))
|
||||
# just compare ids since python3 won't let us sort dicts
|
||||
expected = sorted([r['id'] for r in expected])
|
||||
result = sorted([r['id'] for r in get_method(
|
||||
self.admin_ctx, None, None, [r1['id'], r2['id']])])
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_router_ha_update_allocating_then_active(self):
|
||||
router = self._create_router()
|
||||
_orig = self.plugin._delete_ha_interfaces
|
||||
|
||||
def check_state(context, router_id):
|
||||
self.assertEqual(
|
||||
constants.ROUTER_STATUS_ALLOCATING,
|
||||
self.plugin._get_router(context, router_id)['status'])
|
||||
return _orig(context, router_id)
|
||||
with mock.patch.object(self.plugin, '_delete_ha_interfaces',
|
||||
side_effect=check_state) as ha_mock:
|
||||
router = self._migrate_router(router['id'], ha=False)
|
||||
self.assertTrue(ha_mock.called)
|
||||
self.assertEqual(constants.ROUTER_STATUS_ACTIVE,
|
||||
router['status'])
|
||||
|
||||
def test_router_created_allocating_state_during_interface_create(self):
|
||||
_orig = self.plugin._create_ha_interfaces
|
||||
|
||||
def check_state(context, router_db, ha_network):
|
||||
self.assertEqual(constants.ROUTER_STATUS_ALLOCATING,
|
||||
router_db.status)
|
||||
return _orig(context, router_db, ha_network)
|
||||
with mock.patch.object(self.plugin, '_create_ha_interfaces',
|
||||
side_effect=check_state) as ha_mock:
|
||||
router = self._create_router()
|
||||
self.assertTrue(ha_mock.called)
|
||||
self.assertEqual(constants.ROUTER_STATUS_ACTIVE, router['status'])
|
||||
|
||||
def test_ha_router_create(self):
|
||||
router = self._create_router()
|
||||
self.assertTrue(router['ha'])
|
||||
@ -330,7 +381,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_unbind_ha_router(self):
|
||||
router = self._create_router()
|
||||
self._bind_router(router['id'])
|
||||
|
||||
bound_agents = self.plugin.get_l3_agents_hosting_routers(
|
||||
self.admin_ctx, [router['id']])
|
||||
@ -365,7 +415,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_l3_agent_routers_query_interface(self):
|
||||
router = self._create_router()
|
||||
self._bind_router(router['id'])
|
||||
routers = self.plugin.get_ha_sync_data_for_host(self.admin_ctx,
|
||||
self.agent1['host'],
|
||||
self.agent1)
|
||||
@ -396,7 +445,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def _deployed_router_change_ha_flag(self, to_ha):
|
||||
router1 = self._create_router(ha=not to_ha)
|
||||
self._bind_router(router1['id'])
|
||||
routers = self.plugin.get_ha_sync_data_for_host(
|
||||
self.admin_ctx, self.agent1['host'], self.agent1)
|
||||
router = routers[0]
|
||||
@ -427,31 +475,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
self._create_router()
|
||||
self.assertTrue(self.notif_m.called)
|
||||
|
||||
def test_allocating_router_hidden_from_sync(self):
|
||||
self.plugin.supported_extension_aliases = [
|
||||
constants.L3_HA_MODE_EXT_ALIAS]
|
||||
# simulate a router that is being allocated during
|
||||
# the agent's synchronization
|
||||
r1, r2 = self._create_router(), self._create_router()
|
||||
self.plugin._delete_ha_interfaces(self.admin_ctx, r1['id'])
|
||||
# store shorter name for readability
|
||||
get_method = self.plugin._get_active_l3_agent_routers_sync_data
|
||||
# r1 should be hidden
|
||||
self.assertEqual([r2['id']],
|
||||
[r['id'] for r in get_method(self.admin_ctx,
|
||||
None, self.agent1,
|
||||
[r1['id'], r2['id']])])
|
||||
# but once it transitions back, all is well in the world again!
|
||||
rdb = self.plugin._get_router(self.admin_ctx, r1['id'])
|
||||
self.plugin._create_ha_interfaces(
|
||||
self.admin_ctx, rdb, self.plugin.get_ha_network(
|
||||
self.admin_ctx, rdb.tenant_id))
|
||||
# just compare ids since python3 won't let us sort dicts
|
||||
expected = sorted([r1['id'], r2['id']])
|
||||
result = sorted([r['id'] for r in get_method(
|
||||
self.admin_ctx, None, self.agent1, [r1['id'], r2['id']])])
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_update_router_to_ha_notifies_agent(self):
|
||||
router = self._create_router(ha=False)
|
||||
self.notif_m.reset_mock()
|
||||
@ -459,10 +482,8 @@ class L3HATestCase(L3HATestFramework):
|
||||
self.assertTrue(self.notif_m.called)
|
||||
|
||||
def test_unique_vr_id_between_routers(self):
|
||||
router1 = self._create_router()
|
||||
router2 = self._create_router()
|
||||
self._bind_router(router1['id'])
|
||||
self._bind_router(router2['id'])
|
||||
self._create_router()
|
||||
self._create_router()
|
||||
routers = self.plugin.get_ha_sync_data_for_host(
|
||||
self.admin_ctx, self.agent1['host'], self.agent1)
|
||||
self.assertEqual(2, len(routers))
|
||||
@ -474,10 +495,8 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
@mock.patch('neutron.db.l3_hamode_db.VR_ID_RANGE', new=set(range(1, 2)))
|
||||
def test_vr_id_unique_range_per_tenant(self):
|
||||
router1 = self._create_router()
|
||||
router2 = self._create_router(tenant_id=_uuid())
|
||||
self._bind_router(router1['id'])
|
||||
self._bind_router(router2['id'])
|
||||
self._create_router()
|
||||
self._create_router(tenant_id=_uuid())
|
||||
routers = self.plugin.get_ha_sync_data_for_host(
|
||||
self.admin_ctx, self.agent1['host'], self.agent1)
|
||||
self.assertEqual(2, len(routers))
|
||||
@ -527,10 +546,8 @@ class L3HATestCase(L3HATestFramework):
|
||||
self.assertEqual(allocs_before, allocs_after)
|
||||
|
||||
def test_one_ha_router_one_not(self):
|
||||
router1 = self._create_router(ha=False)
|
||||
router2 = self._create_router()
|
||||
self._bind_router(router1['id'])
|
||||
self._bind_router(router2['id'])
|
||||
self._create_router(ha=False)
|
||||
self._create_router()
|
||||
routers = self.plugin.get_ha_sync_data_for_host(
|
||||
self.admin_ctx, self.agent1['host'], self.agent1)
|
||||
|
||||
@ -695,7 +712,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_get_active_host_for_ha_router(self):
|
||||
router = self._create_router()
|
||||
self._bind_router(router['id'])
|
||||
self.assertEqual(
|
||||
None,
|
||||
self.plugin.get_active_host_for_ha_router(
|
||||
@ -709,9 +725,7 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_update_routers_states(self):
|
||||
router1 = self._create_router()
|
||||
self._bind_router(router1['id'])
|
||||
router2 = self._create_router()
|
||||
self._bind_router(router2['id'])
|
||||
|
||||
routers = self.plugin.get_ha_sync_data_for_host(
|
||||
self.admin_ctx, self.agent1['host'], self.agent1)
|
||||
@ -731,9 +745,7 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_set_router_states_handles_concurrently_deleted_router(self):
|
||||
router1 = self._create_router()
|
||||
self._bind_router(router1['id'])
|
||||
router2 = self._create_router()
|
||||
self._bind_router(router2['id'])
|
||||
bindings = self.plugin.get_ha_router_port_bindings(
|
||||
self.admin_ctx, [router1['id'], router2['id']])
|
||||
self.plugin.delete_router(self.admin_ctx, router1['id'])
|
||||
@ -746,7 +758,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_update_routers_states_port_not_found(self):
|
||||
router1 = self._create_router()
|
||||
self._bind_router(router1['id'])
|
||||
port = {'id': 'foo', 'device_id': router1['id']}
|
||||
with mock.patch.object(self.core_plugin, 'get_ports',
|
||||
return_value=[port]):
|
||||
@ -891,7 +902,6 @@ class L3HATestCase(L3HATestFramework):
|
||||
|
||||
def test_update_port_status_port_bingding_deleted_concurrently(self):
|
||||
router1 = self._create_router()
|
||||
self._bind_router(router1['id'])
|
||||
states = {router1['id']: 'active'}
|
||||
with mock.patch.object(self.plugin, 'get_ha_router_port_bindings'):
|
||||
(self.admin_ctx.session.query(
|
||||
@ -961,7 +971,6 @@ class L3HAModeDbTestCase(L3HATestFramework):
|
||||
interface_info = {'subnet_id': subnet['id']}
|
||||
|
||||
router = self._create_router()
|
||||
self._bind_router(router['id'])
|
||||
self.plugin.add_router_interface(self.admin_ctx,
|
||||
router['id'],
|
||||
interface_info)
|
||||
@ -983,7 +992,6 @@ class L3HAModeDbTestCase(L3HATestFramework):
|
||||
interface_info = {'subnet_id': subnet['id']}
|
||||
|
||||
router = self._create_router()
|
||||
self._bind_router(router['id'])
|
||||
self.plugin.add_router_interface(self.admin_ctx,
|
||||
router['id'],
|
||||
interface_info)
|
||||
|
@ -1479,11 +1479,11 @@ class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
|
||||
# Mock scheduling so that the test can control it explicitly
|
||||
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
|
||||
'_notify_ha_interfaces_updated').start()
|
||||
|
||||
router1 = self._create_ha_router()
|
||||
router2 = self._create_ha_router()
|
||||
router3 = self._create_ha_router(ha=False)
|
||||
router4 = self._create_ha_router(ha=False)
|
||||
with mock.patch.object(self.plugin, 'schedule_router'):
|
||||
router1 = self._create_ha_router()
|
||||
router2 = self._create_ha_router()
|
||||
router3 = self._create_ha_router(ha=False)
|
||||
router4 = self._create_ha_router(ha=False)
|
||||
|
||||
# Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
|
||||
# host 2, and agent 4 will host 3.
|
||||
@ -1907,12 +1907,11 @@ class L3AgentAZLeastRoutersSchedulerTestCase(L3HATestCaseMixin):
|
||||
|
||||
def test_az_scheduler_ha_auto_schedule(self):
|
||||
cfg.CONF.set_override('max_l3_agents_per_router', 3)
|
||||
r1 = self._create_ha_router(az_hints=['az1', 'az3'])
|
||||
self._set_l3_agent_admin_state(self.adminContext, self.agent2['id'],
|
||||
state=False)
|
||||
self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'],
|
||||
state=False)
|
||||
self.plugin.schedule_router(self.adminContext, r1['id'])
|
||||
r1 = self._create_ha_router(az_hints=['az1', 'az3'])
|
||||
agents = self.plugin.get_l3_agents_hosting_routers(
|
||||
self.adminContext, [r1['id']])
|
||||
self.assertEqual(2, len(agents))
|
||||
|
Loading…
Reference in New Issue
Block a user