Revert "Add ALLOCATING state to routers"

This reverts commit 9c3c19f07c.

Following the merge of Ie98d5e3760cdb17450aea546f4b61f5ba14baf1c, the
creation of new router uses RouterL3AgentBinding and its' new
binding_index attribute to ensure correctness of the resources. As such,
the ALLOCATING state (which was used to do just that) is no longer
needed and can be removed.

Closes-Bug: #1609738
Change-Id: Ib04e08df13ef4e6b94bd588854a5795163e2a617
This commit is contained in:
John Schwarz 2016-08-19 15:23:36 +01:00
parent 5f2a2b8095
commit 3e4c0ae223
8 changed files with 27 additions and 129 deletions

View File

@ -20,10 +20,6 @@ ROUTER_PORT_OWNERS = lib_constants.ROUTER_INTERFACE_OWNERS_SNAT + \
(lib_constants.DEVICE_OWNER_ROUTER_GW,)
ROUTER_STATUS_ACTIVE = 'ACTIVE'
# NOTE(kevinbenton): a BUILD status for routers could be added in the future
# for agents to indicate when they are wiring up the ports. The following is
# to indicate when the server is busy building sub-components of a router
ROUTER_STATUS_ALLOCATING = 'ALLOCATING'
DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port"

View File

@ -310,13 +310,11 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
router_ids):
if n_utils.is_extension_supported(self,
constants.L3_HA_MODE_EXT_ALIAS):
routers = self.get_ha_sync_data_for_host(context, host, agent,
router_ids=router_ids,
active=True)
else:
routers = self.get_sync_data(context, router_ids=router_ids,
active=True)
return self.filter_allocating_and_missing_routers(context, routers)
return self.get_ha_sync_data_for_host(context, host, agent,
router_ids=router_ids,
active=True)
return self.get_sync_data(context, router_ids=router_ids, active=True)
def list_router_ids_on_host(self, context, host, router_ids=None):
agent = self._get_agent_by_type_and_host(

View File

@ -195,30 +195,6 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
self._apply_dict_extend_functions(l3.ROUTERS, res, router)
return db_utils.resource_fields(res, fields)
def filter_allocating_and_missing_routers(self, context, routers):
"""Filter out routers that shouldn't go to the agent.
Any routers in the ALLOCATING state will be excluded by
this query because this indicates that the server is still
building necessary dependent sub-resources for the router and it
is not ready for consumption by the agent. It will also filter
out any routers that no longer exist to prevent conditions where
only part of a router's information was populated in sync_routers
due to it being deleted during the sync.
"""
Router = l3_models.Router
router_ids = set(r['id'] for r in routers)
query = (context.session.query(Router.id).
filter(
Router.id.in_(router_ids),
Router.status != n_const.ROUTER_STATUS_ALLOCATING))
valid_routers = set(r.id for r in query)
if router_ids - valid_routers:
LOG.debug("Removing routers that were either concurrently "
"deleted or are in the ALLOCATING state: %s",
(router_ids - valid_routers))
return [r for r in routers if r['id'] in valid_routers]
def _create_router_db(self, context, router, tenant_id):
"""Create the DB object."""
registry.notify(resources.ROUTER, events.BEFORE_CREATE,
@ -226,7 +202,6 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
with context.session.begin(subtransactions=True):
# pre-generate id so it will be available when
# configuring external gw port
status = router.get('status', n_const.ROUTER_STATUS_ACTIVE)
router.setdefault('id', uuidutils.generate_uuid())
router['tenant_id'] = tenant_id
router_db = l3_models.Router(
@ -234,7 +209,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
tenant_id=router['tenant_id'],
name=router['name'],
admin_state_up=router['admin_state_up'],
status=status,
status=n_const.ROUTER_STATUS_ACTIVE,
description=router.get('description'))
context.session.add(router_db)
registry.notify(resources.ROUTER, events.PRECOMMIT_CREATE,

View File

@ -364,11 +364,9 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
self._core_plugin.delete_port(admin_ctx, port_id,
l3_port_check=False)
def _notify_router_updated(self, context, router_id,
schedule_routers=True):
def _notify_router_updated(self, context, router_id):
self.l3_rpc_notifier.routers_updated(
context, [router_id], shuffle_agents=True,
schedule_routers=schedule_routers)
context, [router_id], shuffle_agents=True)
@classmethod
def _is_ha(cls, router):
@ -415,10 +413,6 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
# handle this HA router
self.get_number_of_agents_for_scheduling(context)
# we set the allocating status to hide it from the L3 agents
# until we have created all of the requisite interfaces/networks
router['router']['status'] = n_const.ROUTER_STATUS_ALLOCATING
router_dict = super(L3_HA_NAT_db_mixin,
self).create_router(context, router)
if is_ha:
@ -428,11 +422,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
self.schedule_router(context, router_dict['id'])
router_dict['ha_vr_id'] = router_db.extra_attributes.ha_vr_id
router_dict['status'] = self._update_router_db(
context, router_dict['id'],
{'status': n_const.ROUTER_STATUS_ACTIVE})['status']
self._notify_router_updated(context, router_db.id,
schedule_routers=False)
self._notify_router_updated(context, router_db.id)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_router(context, router_dict['id'])
@ -484,13 +474,6 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
# enough l3 agents to handle this router.
self.get_number_of_agents_for_scheduling(context)
# set status to ALLOCATING so this router is no longer
# provided to agents while its interfaces are being re-configured.
# Keep in mind that if we want conversion to be hitless, this
# status cannot be used because agents treat hidden routers as
# deleted routers.
data['status'] = n_const.ROUTER_STATUS_ALLOCATING
with context.session.begin(subtransactions=True):
router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db(
context, router_id, data)
@ -524,10 +507,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
new_owner=constants.DEVICE_OWNER_ROUTER_INTF)
self.schedule_router(context, router_id)
router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db(
context, router_id, {'status': n_const.ROUTER_STATUS_ACTIVE})
self._notify_router_updated(context, router_db.id,
schedule_routers=False)
self._notify_router_updated(context, router_db.id)
return router_db

View File

@ -27,7 +27,6 @@ import six
from sqlalchemy import sql
from neutron._i18n import _LE, _LW
from neutron.common import constants
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import l3_hamode_db
@ -89,8 +88,6 @@ class L3Scheduler(object):
rb_model.RouterL3AgentBinding.router_id)
query = context.session.query(
l3_models.Router.id).filter(no_agent_binding)
query = query.filter(l3_models.Router.status ==
constants.ROUTER_STATUS_ACTIVE)
unscheduled_router_ids = [router_id_[0] for router_id_ in query]
if unscheduled_router_ids:
return plugin.get_routers(
@ -106,8 +103,7 @@ class L3Scheduler(object):
:returns: the list of routers to be scheduled
"""
if router_ids is not None:
filters = {'id': router_ids,
'status': [constants.ROUTER_STATUS_ACTIVE]}
filters = {'id': router_ids}
routers = plugin.get_routers(context, filters=filters)
result = self._filter_unscheduled_routers(plugin, context, routers)
else:

View File

@ -393,8 +393,12 @@ class L3AZLeastRoutersSchedulerTestCase(L3AZSchedulerBaseTest):
expected_scheduled_agent_count=[1, 1, 0])),
]
def setUp(self):
super(L3AZLeastRoutersSchedulerTestCase, self).setUp()
self.scheduler = l3_agent_scheduler.AZLeastRoutersScheduler()
self.l3_plugin.router_scheduler = self.scheduler
def test_schedule_router(self):
scheduler = l3_agent_scheduler.AZLeastRoutersScheduler()
ha = False
if self.max_l3_agents_per_router:
self.config(max_l3_agents_per_router=self.max_l3_agents_per_router)
@ -412,7 +416,8 @@ class L3AZLeastRoutersSchedulerTestCase(L3AZSchedulerBaseTest):
az_hints = ['az%s' % i for i in range(self.router_az_hints)]
router = self._create_router(az_hints, ha)
scheduler.schedule(self.l3_plugin, self.adminContext, router['id'])
self.scheduler.schedule(self.l3_plugin, self.adminContext,
router['id'])
# schedule returns only one agent. so get all agents scheduled.
scheduled_agents = self.l3_plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']])

View File

@ -100,6 +100,12 @@ class L3HATestFramework(testlib_api.SqlTestCase):
data['admin_state_up'] = admin_state
return self.plugin._update_router_db(ctx, router_id, data)
def _bind_router(self, router_id):
self.plugin.router_scheduler.schedule(
self.plugin,
self.admin_ctx,
router_id)
class L3HATestCase(L3HATestFramework):
@ -199,67 +205,6 @@ class L3HATestCase(L3HATestFramework):
helpers.set_agent_admin_state(self.agent1['id'])
self._assert_ha_state_for_agent_is_standby(router, self.agent1)
def test_router_created_in_active_state(self):
router = self._create_router()
self.assertEqual(n_const.ROUTER_STATUS_ACTIVE, router['status'])
def test_router_update_stay_active(self):
router = self._create_router()
router['name'] = 'test_update'
router_updated = self.plugin._update_router_db(self.admin_ctx,
router['id'], router)
self.assertEqual(n_const.ROUTER_STATUS_ACTIVE,
router_updated['status'])
def test_allocating_router_hidden_from_sync(self):
r1, r2 = self._create_router(), self._create_router()
r1['status'] = n_const.ROUTER_STATUS_ALLOCATING
self.plugin._update_router_db(self.admin_ctx, r1['id'], r1)
# store shorter name for readability
get_method = self.plugin._get_active_l3_agent_routers_sync_data
# r1 should be hidden
expected = [self.plugin.get_router(self.admin_ctx, r2['id'])]
self.assertEqual(expected, get_method(self.admin_ctx, None, None,
[r1['id'], r2['id']]))
# but once it transitions back, all is well in the world again!
r1['status'] = n_const.ROUTER_STATUS_ACTIVE
self.plugin._update_router_db(self.admin_ctx, r1['id'], r1)
expected.append(self.plugin.get_router(self.admin_ctx, r1['id']))
# just compare ids since python3 won't let us sort dicts
expected = sorted([r['id'] for r in expected])
result = sorted([r['id'] for r in get_method(
self.admin_ctx, None, None, [r1['id'], r2['id']])])
self.assertEqual(expected, result)
def test_router_ha_update_allocating_then_active(self):
router = self._create_router()
_orig = self.plugin._delete_ha_interfaces
def check_state(context, router_id):
self.assertEqual(
n_const.ROUTER_STATUS_ALLOCATING,
self.plugin._get_router(context, router_id)['status'])
return _orig(context, router_id)
with mock.patch.object(self.plugin, '_delete_ha_interfaces',
side_effect=check_state) as ha_mock:
router = self._migrate_router(router['id'], ha=False)
self.assertTrue(ha_mock.called)
self.assertEqual(n_const.ROUTER_STATUS_ACTIVE,
router['status'])
def test_router_created_allocating_state_during_interface_create(self):
_orig = self.plugin._ensure_vr_id
def check_state(context, router, ha_network):
self.assertEqual(n_const.ROUTER_STATUS_ALLOCATING,
router.status)
return _orig(context, router, ha_network)
with mock.patch.object(self.plugin, '_ensure_vr_id',
side_effect=check_state) as vr_id_mock:
router = self._create_router()
self.assertTrue(vr_id_mock.called)
self.assertEqual(n_const.ROUTER_STATUS_ACTIVE, router['status'])
def test_ha_router_create(self):
router = self._create_router()
self.assertTrue(router['ha'])

View File

@ -1470,6 +1470,7 @@ class L3HATestCaseMixin(testlib_api.SqlTestCase,
def test_create_ha_port_and_bind_catch_integrity_error(self):
router = self._create_ha_router(tenant_id='foo_tenant')
self.plugin.schedule_router(self.adminContext, router['id'])
agent = {'id': 'foo_agent'}
orig_fn = orm.Session.add
@ -1490,6 +1491,7 @@ class L3HATestCaseMixin(testlib_api.SqlTestCase,
def test_create_ha_port_and_bind_catch_router_not_found(self):
router = self._create_ha_router(tenant_id='foo_tenant')
self.plugin.schedule_router(self.adminContext, router['id'])
agent = {'id': 'foo_agent'}
with mock.patch.object(self.plugin.router_scheduler, 'bind_router'):
@ -2112,6 +2114,7 @@ class L3AgentAZLeastRoutersSchedulerTestCase(L3HATestCaseMixin):
self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'],
state=False)
r1 = self._create_ha_router(az_hints=['az1', 'az3'])
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(2, len(agents))