Merge "Add events for Pod object."

This commit is contained in:
Zuul 2021-12-16 00:54:22 +00:00 committed by Gerrit Code Review
commit b36d09284c
8 changed files with 209 additions and 89 deletions

View File

@ -94,9 +94,20 @@ class K8sCNIRegistryPlugin(base_cni.CNIPlugin):
def wait_for_active(kp_name):
return self.registry[kp_name]['vifs']
data = {'metadata': {'name': params.args.K8S_POD_NAME,
'namespace': params.args.K8S_POD_NAMESPACE}}
pod = k_utils.get_referenced_object(data, 'Pod')
try:
self.k8s.add_event(pod, 'CNIWaitingForVIFs',
f' Waiting for Neutron ports of {kp_name} to '
f'become ACTIVE after binding.')
vifs = wait_for_active(kp_name)
except retrying.RetryError:
self.k8s.add_event(pod, 'CNITimedOutWaitingForVIFs',
f' Time out on waiting for Neutron ports of '
f'{kp_name} to become ACTIVE after binding.',
'Warning')
raise exceptions.CNINeutronPortActivationTimeout(
kp_name, self.registry[kp_name]['vifs'])
@ -172,6 +183,13 @@ class K8sCNIRegistryPlugin(base_cni.CNIPlugin):
kp = d['kp']
vifs = d['vifs']
except KeyError:
data = {'metadata': {'name': params.args.K8S_POD_NAME,
'namespace': params.args.K8S_POD_NAMESPACE}}
pod = k_utils.get_referenced_object(data, 'Pod')
self.k8s.add_event(pod, 'CNITimeoutKuryrPortRegistry',
f'Timed out waiting for Neutron ports to be '
f'created for {kp_name}. Check '
f'kuryr-controller logs.', 'Warning')
raise exceptions.CNIKuryrPortTimeout(kp_name)
for ifname, vif in vifs.items():

View File

@ -228,6 +228,8 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
# REVISIT(ltomasbo): Drop the subnets parameter and get the information
# from the pool_key, which will be required when multi-network is
# supported
kubernetes = clients.get_kubernetes_client()
if not self._recovered_pools:
LOG.debug("Kuryr-controller not yet ready to populate pools.")
if raise_not_ready:
@ -253,12 +255,20 @@ class BaseVIFPool(base.VIFPoolDriver, metaclass=abc.ABCMeta):
if pool_size < oslo_cfg.CONF.vif_pool.ports_pool_min:
num_ports = max(oslo_cfg.CONF.vif_pool.ports_pool_batch,
oslo_cfg.CONF.vif_pool.ports_pool_min - pool_size)
vifs = self._drv_vif.request_vifs(
pod=pod,
project_id=pool_key[1],
subnets=subnets,
security_groups=security_groups,
num_ports=num_ports)
try:
vifs = self._drv_vif.request_vifs(
pod=pod,
project_id=pool_key[1],
subnets=subnets,
security_groups=security_groups,
num_ports=num_ports)
except os_exc.SDKException as exc:
kubernetes.add_event(pod, 'FailToPopulateVIFPool',
f'There was an error during populating '
f'VIF pool for pod: {exc.message}',
type_='Warning')
raise
for vif in vifs:
self._existing_vifs[vif.id] = vif
self._available_ports_pools.setdefault(

View File

@ -22,14 +22,17 @@ from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import nested_vlan_vif
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes.controller.managers import prometheus_exporter as exp
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
KURYRPORT_URI = constants.K8S_API_CRD_NAMESPACES + '/{ns}/kuryrports/{crd}'
ACTIVE_TIMEOUT = nested_vlan_vif.ACTIVE_TIMEOUT
class KuryrPortHandler(k8s_base.ResourceEventHandler):
@ -95,7 +98,17 @@ class KuryrPortHandler(k8s_base.ResourceEventHandler):
self._drv_vif_pool.activate_vif(data['vif'], pod=pod,
retry_info=retry_info)
changed = True
except k_exc.ResourceNotReady:
if retry_info and retry_info.get('elapsed',
0) > ACTIVE_TIMEOUT:
self.k8s.add_event(pod, 'ActivatePortFailed',
'Activating Neutron port has '
'timed out', 'Warning')
except os_exc.ResourceNotFound:
self.k8s.add_event(pod, 'ActivatePortFailed',
'Activating Neutron port has '
'failed, possibly deleted',
'Warning')
LOG.debug("Port not found, possibly already deleted. "
"No need to activate it")
finally:
@ -112,6 +125,9 @@ class KuryrPortHandler(k8s_base.ResourceEventHandler):
self._drv_vif_pool.release_vif(pod, data['vif'],
project_id,
security_groups)
self.k8s.add_event(pod, 'UpdateKuryrPortCRDFailed',
f'Marking ports are ACTIVE in the '
f'KuryrPort failed: {ex}', 'Warning')
except k_exc.K8sClientException:
raise k_exc.ResourceNotReady(pod['metadata']['name'])
try:
@ -154,6 +170,9 @@ class KuryrPortHandler(k8s_base.ResourceEventHandler):
LOG.warning('Manually triggered KuryrPort %s removal. This '
'action should be avoided, since KuryrPort CRDs are '
'internal to Kuryr.', name)
self.k8s.add_event(pod, 'NoKuryrPort', 'KuryrPort was not found, '
'most probably it was manually removed.',
'Warning')
return
project_id = self._drv_project.get_project(pod)
@ -172,6 +191,8 @@ class KuryrPortHandler(k8s_base.ResourceEventHandler):
# rules would be created too.
LOG.debug("Skipping SG rules deletion associated to the pod %s",
pod)
self.k8s.add_event(pod, 'SkipingSGDeletion', 'Skipping SG rules '
'deletion')
crd_pod_selectors = []
try:
security_groups = self._drv_sg.get_security_groups(pod, project_id)
@ -223,6 +244,11 @@ class KuryrPortHandler(k8s_base.ResourceEventHandler):
"get a port as it will be deleted too. If the "
"default subnet driver is used, then you must "
"select an existing subnet to be used by Kuryr.")
self.k8s.add_event(pod, 'NoPodSubnetFound', 'Pod subnet not '
'found. Namespace for this pod was probably '
'deleted. For default subnet driver it must '
'be existing subnet configured for Kuryr',
'Warning')
return False
# Request the default interface of pod
@ -265,6 +291,9 @@ class KuryrPortHandler(k8s_base.ResourceEventHandler):
self._drv_vif_pool.release_vif(pod, data['vif'],
project_id,
security_groups)
self.k8s.add_event(pod, 'ExceptionOnKPUpdate', f'There was k8s '
f'client exception on updating corresponding '
f'KuryrPort CRD: {ex}', 'Warning')
return True
def _update_kuryrport_crd(self, kuryrport_crd, vifs):
@ -305,5 +334,8 @@ class KuryrPortHandler(k8s_base.ResourceEventHandler):
return self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
f"/{namespace}/pods/{name}")
except k_exc.K8sResourceNotFound as ex:
self.k8s.add_event(kuryrport_crd,
f'Failed to get corresponding pod: {ex}',
'Warning')
LOG.exception("Failed to get pod: %s", ex)
raise

View File

@ -43,6 +43,10 @@ class VIFHandler(k8s_base.ResourceEventHandler):
OBJECT_KIND = constants.K8S_OBJ_POD
OBJECT_WATCH_PATH = "%s/%s" % (constants.K8S_API_BASE, "pods")
def __init__(self):
super(VIFHandler).__init__()
self.k8s = clients.get_kubernetes_client()
def on_present(self, pod, *args, **kwargs):
if utils.is_host_network(pod):
return
@ -61,7 +65,6 @@ class VIFHandler(k8s_base.ResourceEventHandler):
# networking solutions/CNI drivers.
return
k8s = clients.get_kubernetes_client()
namespace = pod['metadata']['namespace']
kuryrnetwork_path = '{}/{}/kuryrnetworks/{}'.format(
constants.K8S_API_CRD_NAMESPACES, namespace,
@ -74,8 +77,8 @@ class VIFHandler(k8s_base.ResourceEventHandler):
constants.K8S_API_NAMESPACES, namespace)
LOG.debug("Triggering Namespace Handling %s", namespace_path)
try:
k8s.annotate(namespace_path,
{'KuryrTrigger': str(uuid.uuid4())})
self.k8s.annotate(namespace_path,
{'KuryrTrigger': str(uuid.uuid4())})
except k_exc.K8sResourceNotFound:
LOG.warning('Ignoring Pod handling, no Namespace %s.',
namespace)
@ -84,13 +87,16 @@ class VIFHandler(k8s_base.ResourceEventHandler):
# NOTE(gryf): Set the finalizer as soon, as we have pod created. On
# subsequent updates of the pod, add_finalizer will ignore this if
# finalizer exists.
# finalizer exist.
try:
if not k8s.add_finalizer(pod, constants.POD_FINALIZER):
if not self.k8s.add_finalizer(pod, constants.POD_FINALIZER):
# NOTE(gryf) It might happen that pod will be deleted even
# before we got here.
return
except k_exc.K8sClientException as ex:
self.k8s.add_event(pod, 'FailedToAddFinalizerToPod',
f'Adding finalizer to pod has failed: {ex}',
'Warning')
LOG.exception("Failed to add finalizer to pod object: %s", ex)
raise
@ -108,20 +114,25 @@ class VIFHandler(k8s_base.ResourceEventHandler):
pod_name)
return
except k_exc.K8sClientException as ex:
self.k8s.add_event(pod, 'FailedToCreateKuryrPortCRD',
f'Creating corresponding KuryrPort CRD has '
f'failed: {ex}', 'Warning')
LOG.exception("Kubernetes Client Exception creating "
"KuryrPort CRD: %s", ex)
raise k_exc.ResourceNotReady(pod)
def on_finalize(self, pod, *args, **kwargs):
k8s = clients.get_kubernetes_client()
try:
kp = k8s.get(KURYRPORT_URI.format(ns=pod["metadata"]["namespace"],
crd=pod["metadata"]["name"]))
kp = self.k8s.get(KURYRPORT_URI.format(
ns=pod["metadata"]["namespace"], crd=pod["metadata"]["name"]))
except k_exc.K8sResourceNotFound:
try:
k8s.remove_finalizer(pod, constants.POD_FINALIZER)
self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)
except k_exc.K8sClientException as ex:
self.k8s.add_event(pod, 'FailedRemovingFinalizerFromPod',
f'Removing finalizer from pod has failed: '
f'{ex}', 'Warning')
LOG.exception('Failed to remove finalizer from pod: %s', ex)
raise
return
@ -131,21 +142,27 @@ class VIFHandler(k8s_base.ResourceEventHandler):
# annotations, force an emition of event to trigger on_finalize
# method on the KuryrPort.
try:
k8s.annotate(utils.get_res_link(kp),
{'KuryrTrigger': str(uuid.uuid4())})
self.k8s.annotate(utils.get_res_link(kp),
{'KuryrTrigger': str(uuid.uuid4())})
except k_exc.K8sResourceNotFound:
k8s.remove_finalizer(pod, constants.POD_FINALIZER)
except k_exc.K8sClientException:
self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)
except k_exc.K8sClientException as ex:
self.k8s.add_event(pod, 'FailedRemovingPodFinalzier',
f'Failed removing finalizer from pod: {ex}',
'Warning')
raise k_exc.ResourceNotReady(pod['metadata']['name'])
else:
try:
k8s.delete(KURYRPORT_URI
.format(ns=pod["metadata"]["namespace"],
crd=pod["metadata"]["name"]))
self.k8s.delete(KURYRPORT_URI
.format(ns=pod["metadata"]["namespace"],
crd=pod["metadata"]["name"]))
except k_exc.K8sResourceNotFound:
k8s.remove_finalizer(pod, constants.POD_FINALIZER)
self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)
except k_exc.K8sClientException:
except k_exc.K8sClientException as ex:
self.k8s.add_event(pod, 'FailedRemovingKuryrPortCRD',
f'Failed removing corresponding KuryrPort '
f'CRD: {ex}', 'Warning')
LOG.exception("Could not remove KuryrPort CRD for pod %s.",
pod['metadata']['name'])
raise k_exc.ResourceNotReady(pod['metadata']['name'])
@ -172,6 +189,11 @@ class VIFHandler(k8s_base.ResourceEventHandler):
if not vifs:
vifs = {}
owner_reference = {'apiVersion': pod['apiVersion'],
'kind': pod['kind'],
'name': pod['metadata']['name'],
'uid': pod['metadata']['uid']}
kuryr_port = {
'apiVersion': constants.K8S_API_CRD_VERSION,
'kind': constants.K8S_OBJ_KURYRPORT,
@ -180,7 +202,8 @@ class VIFHandler(k8s_base.ResourceEventHandler):
'finalizers': [constants.KURYRPORT_FINALIZER],
'labels': {
constants.KURYRPORT_LABEL: pod['spec']['nodeName']
}
},
'ownerReferences': [owner_reference]
},
'spec': {
'podUid': pod['metadata']['uid'],
@ -191,6 +214,5 @@ class VIFHandler(k8s_base.ResourceEventHandler):
}
}
k8s = clients.get_kubernetes_client()
k8s.post(KURYRPORT_URI.format(ns=pod["metadata"]["namespace"],
crd=''), kuryr_port)
self.k8s.post(KURYRPORT_URI.format(ns=pod["metadata"]["namespace"],
crd=''), kuryr_port)

View File

@ -454,6 +454,9 @@ class K8sClient(object):
if not self.are_events_enabled:
return {}
if not resource:
return {}
involved_object = {'apiVersion': resource['apiVersion'],
'kind': resource['kind'],
'name': resource['metadata']['name'],

View File

@ -140,10 +140,12 @@ class BaseVIFPool(test_base.TestCase):
security_groups)
self.assertIsNone(resp)
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('time.time', return_value=50)
@ddt.data((neutron_vif.NeutronPodVIFDriver),
(nested_vlan_vif.NestedVlanPodVIFDriver))
def test__populate_pool(self, m_vif_driver, m_time):
def test__populate_pool(self, m_vif_driver, m_time,
m_get_kubernetes_client):
cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls)
@ -178,9 +180,11 @@ class BaseVIFPool(test_base.TestCase):
m_driver._get_pool_size.assert_called_once()
m_driver._drv_vif.request_vifs.assert_called_once()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@ddt.data((neutron_vif.NeutronPodVIFDriver),
(nested_vlan_vif.NestedVlanPodVIFDriver))
def test__populate_pool_not_ready(self, m_vif_driver):
def test__populate_pool_not_ready(self, m_vif_driver,
m_get_kubernetes_client):
cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls)
@ -200,9 +204,11 @@ class BaseVIFPool(test_base.TestCase):
tuple(security_groups))
m_driver._drv_vif.request_vifs.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@ddt.data((neutron_vif.NeutronPodVIFDriver),
(nested_vlan_vif.NestedVlanPodVIFDriver))
def test__populate_pool_not_ready_dont_raise(self, m_vif_driver):
def test__populate_pool_not_ready_dont_raise(self, m_vif_driver,
m_get_kubernetes_client):
cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls)
@ -221,8 +227,9 @@ class BaseVIFPool(test_base.TestCase):
tuple(security_groups), raise_not_ready=False)
m_driver._drv_vif.request_vifs.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('time.time', return_value=0)
def test__populate_pool_no_update(self, m_time):
def test__populate_pool_no_update(self, m_time, m_get_kubernetes_client):
cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls)
@ -242,10 +249,12 @@ class BaseVIFPool(test_base.TestCase):
tuple(security_groups))
m_driver._get_pool_size.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('time.time', return_value=50)
@ddt.data((neutron_vif.NeutronPodVIFDriver),
(nested_vlan_vif.NestedVlanPodVIFDriver))
def test__populate_pool_large_pool(self, m_vif_driver, m_time):
def test__populate_pool_large_pool(self, m_vif_driver, m_time,
m_get_kubernetes_client):
cls = vif_pool.BaseVIFPool
m_driver = mock.MagicMock(spec=cls)

View File

@ -70,6 +70,7 @@ class TestVIFHandler(test_base.TestCase):
self._handler._drv_vif_pool = mock.MagicMock(
spec=drivers.VIFPoolDriver)
self._handler._drv_multi_vif = [self._multi_vif_drv]
self._handler.k8s = mock.Mock()
self._get_project = self._handler._drv_project.get_project
self._get_subnets = self._handler._drv_subnets.get_subnets
@ -124,21 +125,20 @@ class TestVIFHandler(test_base.TestCase):
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource')
@mock.patch('kuryr_kubernetes.utils.is_pod_completed')
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.utils.is_host_network')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_not_scheduled(self, m_get_kuryrport, m_host_network,
m_get_k8s_client, m_is_pod_completed):
m_is_pod_completed, m_get_k8s_res):
m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
m_is_pod_completed.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
m_get_k8s_res.return_value = {}
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_called()
self._handler.k8s.add_finalizer.assert_called()
m_get_kuryrport.assert_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
@ -180,120 +180,103 @@ class TestVIFHandler(test_base.TestCase):
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource')
@mock.patch('kuryr_kubernetes.utils.is_host_network')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_create(self, m_get_kuryrport, m_host_network,
m_get_k8s_client):
m_get_k8s_res):
m_get_kuryrport.return_value = None
m_host_network.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
m_get_k8s_res.return_value = {}
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
add_finalizer = self._handler.k8s.add_finalizer
add_finalizer.assert_called_once_with(self._pod, k_const.POD_FINALIZER)
m_get_kuryrport.assert_called_once_with(self._pod)
self._handler._add_kuryrport_crd.assert_called_once_with(self._pod)
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource')
@mock.patch('kuryr_kubernetes.utils.is_host_network')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_update(self, m_get_kuryrport, m_host_network,
m_get_k8s_client):
m_get_k8s_res):
m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
m_get_k8s_res.return_value = {}
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
add_finalizer = self._handler.k8s.add_finalizer
add_finalizer.assert_called_once_with(self._pod, k_const.POD_FINALIZER)
m_get_kuryrport.assert_called_once_with(self._pod)
self._handler._add_kuryrport_crd.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource')
@mock.patch('kuryr_kubernetes.utils.is_host_network')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_upgrade(self, m_get_kuryrport, m_host_network,
m_get_k8s_client):
m_get_k8s_res):
m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
m_get_k8s_res.return_value = {}
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
add_finalizer = self._handler.k8s.add_finalizer
add_finalizer.assert_called_once_with(self._pod, k_const.POD_FINALIZER)
m_get_kuryrport.assert_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_k8s_resource')
@mock.patch('kuryr_kubernetes.utils.is_host_network')
def test_on_present_pod_finalizer_exception(self, m_host_network,
m_get_k8s_client):
m_get_k8s_res):
m_host_network.return_value = False
k8s = mock.MagicMock()
k8s.add_finalizer.side_effect = k_exc.K8sClientException
m_get_k8s_client.return_value = k8s
m_get_k8s_res.return_value = {}
self._handler.k8s.add_finalizer.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.K8sClientException,
h_vif.VIFHandler.on_present, self._handler,
self._pod)
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
add_finalizer = self._handler.k8s.add_finalizer
add_finalizer.assert_called_once_with(self._pod, k_const.POD_FINALIZER)
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_finalize_crd(self, m_get_kuryrport, m_get_k8s_client):
m_get_kuryrport.return_value = self._kp
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
def test_on_finalize_crd(self):
self._handler.k8s.get.return_value = self._kp
h_vif.VIFHandler.on_finalize(self._handler, self._pod)
k8s.delete.assert_called_once_with(
self._handler.k8s.delete.assert_called_once_with(
h_vif.KURYRPORT_URI.format(
ns=self._pod["metadata"]["namespace"],
crd=self._pod["metadata"]["name"]))
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_finalize_crd_exception(self, m_get_kuryrport,
m_get_k8s_client):
m_get_kuryrport.return_value = self._kp
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
k8s.delete.side_effect = k_exc.K8sClientException
def test_on_finalize_crd_exception(self):
self._handler.k8s.get.return_value = self._kp
self._handler.k8s.delete.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.ResourceNotReady, h_vif.VIFHandler
.on_finalize, self._handler, self._pod)
k8s.delete.assert_called_once_with(
self._handler.k8s.delete.assert_called_once_with(
h_vif.KURYRPORT_URI.format(
ns=self._pod["metadata"]["namespace"],
crd=self._pod["metadata"]["name"]))
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_finalize_crd_not_found(self, m_get_kuryrport,
m_get_k8s_client):
m_get_kuryrport.return_value = self._kp
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
k8s.delete.side_effect = k_exc.K8sResourceNotFound(self._pod)
def test_on_finalize_crd_not_found(self):
self._handler.k8s.get.return_value = self._kp
(self._handler.k8s.delete
.side_effect) = k_exc.K8sResourceNotFound(self._pod)
h_vif.VIFHandler.on_finalize(self._handler, self._pod)
k8s.delete.assert_called_once_with(
self._handler.k8s.delete.assert_called_once_with(
h_vif.KURYRPORT_URI.format(
ns=self._pod["metadata"]["namespace"],
crd=self._pod["metadata"]["name"]))
(k8s.remove_finalizer
(self._handler.k8s.remove_finalizer
.assert_called_once_with(self._pod, k_const.POD_FINALIZER))

View File

@ -91,6 +91,9 @@ RESOURCE_MAP = {'Endpoints': 'endpoints',
'Pod': 'pods',
'Service': 'services',
'Machine': 'machines'}
API_VER_MAP = {'NetworkPolicy': 'networking.k8s.io/v1',
'Pod': 'v1',
'Service': 'v1'}
API_RE = re.compile(r'v\d+')
@ -659,3 +662,43 @@ def is_pod_completed(pod):
def is_host_network(pod):
return pod['spec'].get('hostNetwork', False)
def get_referenced_object(obj, kind):
"""Get referenced object.
Helper function for getting objects out of the CRDs like
KuryrLoadBalancer, KuryrNetworkPolicy or KuryrPort needed solely for
creating Event object, so there will be no exceptions raises from this
function.
"""
for ref in obj['metadata'].get('ownerReferences', []):
try:
return {'kind': kind,
'apiVersion': ref['apiVersion'],
'metadata': {'namespace': obj['metadata']['namespace'],
'name': ref['name'],
'uid': ref['uid']}}
except KeyError:
LOG.debug("Not all needed keys was found in ownerReferences "
"list: %s", ref)
# There was no ownerReferences field, let's query API
k8s = clients.get_kubernetes_client()
data = {'metadata': {'name': obj['metadata']['name']},
'kind': kind,
'apiVersion': API_VER_MAP[kind]}
if obj['metadata'].get('namespace'):
data['metadata']['namespace'] = obj['metadata']['namespace']
try:
url = get_res_link(data)
except KeyError:
LOG.debug("Not all needed data was found in provided object: %s",
data)
return
try:
return k8s.get(url)
except exceptions.K8sClientException:
LOG.debug('Error when fetching %s to add an event %s, ignoring',
kind, get_res_unique_name(obj))