Protection from subport with wrong ACTIVE status

On high spikes, it seems that neutron may fail to properly update
the status of subports when being detached from the trunks. This
leads to ports with ACTIVE status while they are not attached
anywhere. Under such circunstance Kuryr was not able to clean up
the network associated to the deleted namespace as the port
is not considered since it is neither DOWN nor part of a trunk.
This patch adds protection from this.

Change-Id: I50e8a4117fc261ef67803dc114638acf378e2ba3
This commit is contained in:
Luis Tomas Bolivar 2019-11-08 12:15:21 +01:00
parent 779ff98e57
commit 2c41671076
2 changed files with 62 additions and 2 deletions

View File

@ -905,6 +905,22 @@ class NestedVIFPool(BaseVIFPool):
if not available_subports:
return
# FIXME(ltomasbo): Workaround for ports already detached from trunks
# whose status is ACTIVE
trunks_subports = [subport_id['port_id']
for p_port in parent_ports.values()
for subport_id in p_port['subports']]
port_ids_to_delete = [p_id for p_id in available_subports.keys()
if p_id not in trunks_subports]
for port_id in port_ids_to_delete:
LOG.debug("Deleting port with wrong status: %s", port_id)
try:
neutron.delete_port(port_id)
except n_exc.PortNotFoundClient:
LOG.debug('Port already deleted: %s', port_id)
except n_exc.NeutronClientException:
LOG.exception('Error removing the port %s', port_id)
for trunk_id, parent_port in parent_ports.items():
host_addr = parent_port.get('ip')
if trunk_ips and host_addr not in trunk_ips:

View File

@ -1545,6 +1545,51 @@ class NestedVIFPool(test_base.TestCase):
{tuple(port['security_groups']): [port_id]})
neutron.delete_port.assert_not_called()
@mock.patch('kuryr_kubernetes.os_vif_util.'
'neutron_to_osvif_vif_nested_vlan')
def test__precreated_ports_recover_plus_port_cleanup(self, m_to_osvif):
cls = vif_pool.NestedVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
m_driver._available_ports_pools = {}
m_driver._existing_vifs = {}
oslo_cfg.CONF.set_override('port_debug',
True,
group='kubernetes')
port_id = str(uuid.uuid4())
trunk_id = str(uuid.uuid4())
trunk_obj = self._get_trunk_obj(port_id=trunk_id, subport_id=port_id)
port = get_port_obj(port_id=port_id, device_owner='trunk:subport')
port_to_delete_id = str(uuid.uuid4())
port_to_delete = get_port_obj(port_id=port_to_delete_id,
device_owner='trunk:subport')
p_ports = self._get_parent_ports([trunk_obj])
a_subports = {port_id: port, port_to_delete_id: port_to_delete}
subnet_id = port['fixed_ips'][0]['subnet_id']
net_id = str(uuid.uuid4())
network = ovu.neutron_to_osvif_network({'id': net_id})
subnets = {subnet_id: {subnet_id: network}}
m_driver._get_trunks_info.return_value = (p_ports, a_subports,
subnets)
vif = mock.sentinel.vif
m_to_osvif.return_value = vif
pool_key = (port['binding:host_id'], port['project_id'], net_id)
m_driver._get_pool_key.return_value = pool_key
cls._precreated_ports(m_driver, 'recover')
m_driver._get_trunks_info.assert_called_once()
self.assertEqual(m_driver._existing_vifs[port_id], vif)
self.assertEqual(m_driver._available_ports_pools[pool_key],
{tuple(port['security_groups']): [port_id]})
neutron.delete_port.assert_called_with(port_to_delete_id)
def test__precreated_ports_free(self):
cls = vif_pool.NestedVIFPool
m_driver = mock.MagicMock(spec=cls)
@ -1731,13 +1776,12 @@ class NestedVIFPool(test_base.TestCase):
port = get_port_obj(port_id=port_id, device_owner='trunk:subport')
p_ports = {}
a_subports = {port_id: port}
a_subports = {}
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet = mock.sentinel.subnet
subnets = {subnet_id: {subnet_id: subnet}}
m_driver._get_trunks_info.return_value = (p_ports, a_subports,
subnets)
cls._precreated_ports(m_driver, m_action)
m_driver._get_trunks_info.assert_called_once()
self.assertEqual(m_driver._existing_vifs, {})