From 14ee23d520fa296a1d23f881262bfdfb3e78ffed Mon Sep 17 00:00:00 2001 From: Luis Tomas Bolivar Date: Tue, 26 Nov 2019 14:09:32 +0100 Subject: [PATCH] Ensure pools population does not happen until pools are recovered This patch ensures there is no pool repopulation triggered before the pools are recovered upon a kuryr-controller restart Closes-Bug: 1854064 Change-Id: I458e00fac26949a472aafd1985eb47ee979d681f --- .../controller/drivers/vif_pool.py | 30 +++++++-- .../unit/controller/drivers/test_vif_pool.py | 65 +++++++++++++++++++ 2 files changed, 88 insertions(+), 7 deletions(-) diff --git a/kuryr_kubernetes/controller/drivers/vif_pool.py b/kuryr_kubernetes/controller/drivers/vif_pool.py index 9f6a8c48f..6e0f2b2d0 100644 --- a/kuryr_kubernetes/controller/drivers/vif_pool.py +++ b/kuryr_kubernetes/controller/drivers/vif_pool.py @@ -160,6 +160,7 @@ class BaseVIFPool(base.VIFPoolDriver): def __init__(self): # Note(ltomasbo) Execute the port recycling periodic actions in a # background thread + self._recovered_pools = False eventlet.spawn(self._return_ports_to_pool) def set_vif_driver(self, driver): @@ -216,6 +217,9 @@ class BaseVIFPool(base.VIFPoolDriver): # REVISIT(ltomasbo): Drop the subnets parameter and get the information # from the pool_key, which will be required when multi-network is # supported + if not self._recovered_pools: + LOG.info("Kuryr-controller not yet ready to populate pools.") + raise exceptions.ResourceNotReady(pod) now = time.time() last_update = 0 pool_updates = self._last_update.get(pool_key) @@ -350,10 +354,6 @@ class BaseVIFPool(base.VIFPoolDriver): self._existing_vifs = collections.defaultdict() self._recyclable_ports = collections.defaultdict() self._last_update = collections.defaultdict() - # NOTE(ltomasbo): Ensure previously created ports are recovered into - # their respective pools - self._cleanup_leftover_ports() - self._recover_precreated_ports() def _get_trunks_info(self): """Returns information about trunks and their subports. @@ -594,6 +594,14 @@ class NeutronVIFPool(BaseVIFPool): except KeyError: LOG.debug('Port already recycled: %s', port_id) + def sync_pools(self): + super(NeutronVIFPool, self).sync_pools() + # NOTE(ltomasbo): Ensure previously created ports are recovered into + # their respective pools + self._cleanup_leftover_ports() + self._recover_precreated_ports() + self._recovered_pools = True + def _recover_precreated_ports(self): attrs = {'device_owner': kl_const.DEVICE_OWNER} tags = config.CONF.neutron_defaults.resource_tags @@ -643,7 +651,7 @@ class NeutronVIFPool(BaseVIFPool): self._create_healthcheck_file() def delete_network_pools(self, net_id): - if not hasattr(self, '_available_ports_pools'): + if not self._recovered_pools: LOG.info("Kuryr-controller not yet ready to delete network " "pools.") raise exceptions.ResourceNotReady(net_id) @@ -878,6 +886,14 @@ class NestedVIFPool(BaseVIFPool): parent_port = neutron.show_port(port_id).get('port') return parent_port['fixed_ips'][0]['ip_address'] + def sync_pools(self): + super(NestedVIFPool, self).sync_pools() + # NOTE(ltomasbo): Ensure previously created ports are recovered into + # their respective pools + self._recover_precreated_ports() + self._recovered_pools = True + eventlet.spawn(self._cleanup_leftover_ports) + def _recover_precreated_ports(self): self._precreated_ports(action='recover') LOG.info("PORTS POOL: pools updated with pre-created ports") @@ -974,7 +990,7 @@ class NestedVIFPool(BaseVIFPool): @lockutils.synchronized('return_to_pool_nested') def populate_pool(self, trunk_ip, project_id, subnets, security_groups): - if not hasattr(self, '_available_ports_pools'): + if not self._recovered_pools: LOG.info("Kuryr-controller not yet ready to populate pools.") raise exceptions.ResourceNotReady(trunk_ip) pool_key = self._get_pool_key(trunk_ip, project_id, None, subnets) @@ -1022,7 +1038,7 @@ class NestedVIFPool(BaseVIFPool): self._remove_precreated_ports(trunk_ips) def delete_network_pools(self, net_id): - if not hasattr(self, '_available_ports_pools'): + if not self._recovered_pools: LOG.info("Kuryr-controller not yet ready to delete network " "pools.") raise exceptions.ResourceNotReady(net_id) diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py index eae16d82e..0bd057abb 100644 --- a/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py +++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_vif_pool.py @@ -196,6 +196,7 @@ class BaseVIFPool(test_base.TestCase): m_driver._existing_vifs = {} m_driver._available_ports_pools = {} m_driver._last_update = {pool_key: {tuple(security_groups): 1}} + m_driver._recovered_pools = True oslo_cfg.CONF.set_override('ports_pool_min', 5, @@ -211,6 +212,28 @@ class BaseVIFPool(test_base.TestCase): m_driver._get_pool_size.assert_called_once() m_driver._drv_vif.request_vifs.assert_called_once() + @ddt.data((neutron_vif.NeutronPodVIFDriver), + (nested_vlan_vif.NestedVlanPodVIFDriver)) + def test__populate_pool_not_ready(self, m_vif_driver): + cls = vif_pool.BaseVIFPool + m_driver = mock.MagicMock(spec=cls) + + cls_vif_driver = m_vif_driver + vif_driver = mock.MagicMock(spec=cls_vif_driver) + m_driver._drv_vif = vif_driver + + pod = mock.sentinel.pod + project_id = str(uuid.uuid4()) + subnets = mock.sentinel.subnets + security_groups = 'test-sg' + pool_key = (mock.sentinel.host_addr, project_id) + m_driver._recovered_pools = False + + self.assertRaises(exceptions.ResourceNotReady, cls._populate_pool, + m_driver, pool_key, pod, subnets, + tuple(security_groups)) + m_driver._drv_vif.request_vifs.assert_not_called() + @mock.patch('time.time', return_value=0) def test__populate_pool_no_update(self, m_time): cls = vif_pool.BaseVIFPool @@ -226,6 +249,7 @@ class BaseVIFPool(test_base.TestCase): 15, group='vif_pool') m_driver._last_update = {pool_key: {tuple(security_groups): 1}} + m_driver._recovered_pools = True cls._populate_pool(m_driver, pool_key, pod, subnets, tuple(security_groups)) @@ -256,6 +280,7 @@ class BaseVIFPool(test_base.TestCase): group='vif_pool') m_driver._last_update = {pool_key: {tuple(security_groups): 1}} m_driver._get_pool_size.return_value = 10 + m_driver._recovered_pools = True cls._populate_pool(m_driver, pool_key, pod, subnets, tuple(security_groups)) @@ -933,6 +958,7 @@ class NeutronVIFPool(test_base.TestCase): m_driver._available_ports_pools = {pool_key: { tuple(['security_group']): [port_id]}} m_driver._existing_vifs = {port_id: mock.sentinel.vif} + m_driver._recovered_pools = True m_driver._get_pool_key_net.return_value = net_id @@ -942,6 +968,21 @@ class NeutronVIFPool(test_base.TestCase): m_driver._get_pool_key_net.assert_called_once() neutron.delete_port.assert_called_once_with(port_id) + def test_delete_network_pools_not_ready(self): + cls = vif_pool.NeutronVIFPool + m_driver = mock.MagicMock(spec=cls) + neutron = self.useFixture(k_fix.MockNeutronClient()).client + + net_id = mock.sentinel.net_id + m_driver._recovered_pools = False + + self.assertRaises(exceptions.ResourceNotReady, + cls.delete_network_pools, m_driver, net_id) + + m_driver._trigger_return_to_pool.assert_not_called() + m_driver._get_pool_key_net.assert_not_called() + neutron.delete_port.assert_not_called() + def test_delete_network_pools_missing_port_id(self): cls = vif_pool.NeutronVIFPool m_driver = mock.MagicMock(spec=cls) @@ -954,6 +995,7 @@ class NeutronVIFPool(test_base.TestCase): m_driver._available_ports_pools = {pool_key: { tuple(['security_group']): [port_id]}} m_driver._existing_vifs = {} + m_driver._recovered_pools = True neutron.delete_port.side_effect = n_exc.PortNotFoundClient m_driver._get_pool_key_net.return_value = net_id @@ -1807,6 +1849,7 @@ class NestedVIFPool(test_base.TestCase): m_driver._available_ports_pools = {pool_key: { tuple(['security_group']): [port_id]}} m_driver._existing_vifs = {port_id: vif} + m_driver._recovered_pools = True m_driver._get_trunk_id.return_value = trunk_id m_driver._get_pool_key_net.return_value = net_id @@ -1822,6 +1865,26 @@ class NestedVIFPool(test_base.TestCase): m_driver._drv_vif._release_vlan_id.assert_called_once_with(vlan_id) neutron.delete_port.assert_called_once_with(port_id) + def test_delete_network_pools_not_ready(self): + cls = vif_pool.NestedVIFPool + m_driver = mock.MagicMock(spec=cls) + cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver + vif_driver = mock.MagicMock(spec=cls_vif_driver) + m_driver._drv_vif = vif_driver + neutron = self.useFixture(k_fix.MockNeutronClient()).client + + net_id = mock.sentinel.net_id + m_driver._recovered_pools = False + + self.assertRaises(exceptions.ResourceNotReady, + cls.delete_network_pools, m_driver, net_id) + + m_driver._trigger_return_to_pool.assert_not_called() + m_driver._get_pool_key_net.assert_not_called() + m_driver._get_trunk_id.assert_not_called() + m_driver._drv_vif._remove_subports.assert_not_called() + neutron.delete_port.assert_not_called() + def test_delete_network_pools_exception(self): cls = vif_pool.NestedVIFPool m_driver = mock.MagicMock(spec=cls) @@ -1841,6 +1904,7 @@ class NestedVIFPool(test_base.TestCase): m_driver._available_ports_pools = {pool_key: { tuple(['security_group']): [port_id]}} m_driver._existing_vifs = {port_id: vif} + m_driver._recovered_pools = True m_driver._get_trunk_id.return_value = trunk_id m_driver._get_pool_key_net.return_value = net_id @@ -1877,6 +1941,7 @@ class NestedVIFPool(test_base.TestCase): m_driver._available_ports_pools = {pool_key: { tuple(['security_group']): [port_id]}} m_driver._existing_vifs = {} + m_driver._recovered_pools = True m_driver._get_trunk_id.return_value = trunk_id m_driver._get_pool_key_net.return_value = net_id