Merge "Removing the upgrade code from Kuryr-Kubernetes repo"

This commit is contained in:
Zuul 2021-02-13 10:09:12 +00:00 committed by Gerrit Code Review
commit a843bd4e07
9 changed files with 2 additions and 328 deletions

View File

@ -116,71 +116,12 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
else:
self._patch_knp_crd(policy, i_rules, e_rules, knp)
def get_from_old_crd(self, netpolicy):
name = netpolicy['metadata']['name'][3:] # Remove 'np-'
namespace = netpolicy['metadata']['namespace']
link = (f'{constants.K8S_API_NETWORKING}/namespaces/{namespace}/'
f'networkpolicies/{name}')
knp = {
'apiVersion': constants.K8S_API_CRD_VERSION,
'kind': constants.K8S_OBJ_KURYRNETWORKPOLICY,
'metadata': {
'namespace': namespace,
'name': name,
'annotations': {
'networkPolicyLink': link,
},
'finalizers': [constants.NETWORKPOLICY_FINALIZER],
},
'spec': {
'podSelector':
netpolicy['spec']['networkpolicy_spec']['podSelector'],
'egressSgRules': [self._convert_old_sg_rule(r) for r in
netpolicy['spec']['egressSgRules']],
'ingressSgRules': [self._convert_old_sg_rule(r) for r in
netpolicy['spec']['ingressSgRules']],
'policyTypes':
netpolicy['spec']['networkpolicy_spec']['policyTypes'],
},
'status': {
'podSelector': netpolicy['spec']['podSelector'],
'securityGroupId': netpolicy['spec']['securityGroupId'],
# We'll just let KuryrNetworkPolicyHandler figure out if rules
# are created on its own.
'securityGroupRules': [],
},
}
return knp
def namespaced_pods(self, policy):
pod_namespace = policy['metadata']['namespace']
pods = self.kubernetes.get('{}/namespaces/{}/pods'.format(
constants.K8S_API_BASE, pod_namespace))
return pods.get('items')
def _convert_old_sg_rule(self, rule):
del rule['security_group_rule']['id']
del rule['security_group_rule']['security_group_id']
result = {
'sgRule': rule['security_group_rule'],
}
if 'namespace' in rule:
result['namespace'] = rule['namespace']
if 'remote_ip_prefixes' in rule:
result['affectedPods'] = []
for ip, namespace in rule['remote_ip_prefixes']:
if not ip:
continue
result['affectedPods'].append({
'podIP': ip,
'podNamespace': namespace,
})
return result
def _get_security_group_rules_from_network_policy(self, policy):
"""Get security group rules required to represent an NP

View File

@ -51,34 +51,6 @@ class KuryrNetworkPolicyHandler(k8s_base.ResourceEventHandler):
self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance()
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
self._convert_old_crds()
def _convert_old_crds(self):
try:
netpolicies = self.k8s.get(constants.K8S_API_CRD_KURYRNETPOLICIES)
except exceptions.K8sResourceNotFound:
LOG.debug("%s resource not found.",
constants.K8S_API_CRD_KURYRNETPOLICIES)
return
except exceptions.K8sClientException:
LOG.exception("Error when fetching old KuryrNetPolicy CRDs for "
"conversion.")
return
for netpolicy in netpolicies.get('items', []):
new_networkpolicy = self._drv_policy.get_from_old_crd(netpolicy)
url = (f"{constants.K8S_API_CRD_NAMESPACES}/"
f"{netpolicy['metadata']['namespace']}/"
f"kuryrnetworkpolicies")
try:
self.k8s.post(url, new_networkpolicy)
except exceptions.K8sConflict:
LOG.warning('KuryrNetworkPolicy %s already existed when '
'converting KuryrNetPolicy %s. Ignoring.',
utils.get_res_unique_name(new_networkpolicy),
utils.get_res_unique_name(netpolicy))
self.k8s.delete(utils.get_res_link(netpolicy))
def _patch_kuryrnetworkpolicy_crd(self, knp, field, data,
action='replace'):
name = knp['metadata']['name']

View File

@ -15,7 +15,6 @@
from kuryr.lib._i18n import _
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
@ -23,7 +22,6 @@ from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drv_base
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
@ -261,8 +259,6 @@ class EndpointsHandler(k8s_base.ResourceEventHandler):
def on_present(self, endpoints):
ep_name = endpoints['metadata']['name']
ep_namespace = endpoints['metadata']['namespace']
if self._move_annotations_to_crd(endpoints):
return
k8s = clients.get_kubernetes_client()
loadbalancer_crd = k8s.get_loadbalancer_crd(endpoints)
@ -385,72 +381,4 @@ class EndpointsHandler(k8s_base.ResourceEventHandler):
loadbalancer_crd)
raise
def _move_annotations_to_crd(self, endpoints):
"""Support upgrade from annotations to KuryrLoadBalancer CRD."""
try:
spec = (endpoints['metadata']['annotations']
[k_const.K8S_ANNOTATION_LBAAS_SPEC])
except KeyError:
spec = None
try:
state = (endpoints['metadata']['annotations']
[k_const.K8S_ANNOTATION_LBAAS_STATE])
except KeyError:
state = None
if not state and not spec:
# No annotations, return
return False
if state or spec:
if state:
_dict = jsonutils.loads(state)
# This is strongly using the fact that annotation's o.vo
# and CRD has the same structure.
state = obj_lbaas.flatten_object(_dict)
# Endpoints should always have the spec in the annotation
spec_dict = jsonutils.loads(spec)
spec = obj_lbaas.flatten_object(spec_dict)
if state and state['service_pub_ip_info'] is None:
del state['service_pub_ip_info']
for spec_port in spec['ports']:
if not spec_port.get('name'):
del spec_port['name']
if not spec['lb_ip']:
del spec['lb_ip']
try:
self._create_crd_spec(endpoints, spec, state)
except k_exc.ResourceNotReady:
LOG.info('KuryrLoadBalancer CRD %s already exists.',
utils.get_res_unique_name(endpoints))
except k_exc.K8sClientException:
raise k_exc.ResourceNotReady(endpoints)
# In this step we only need to make sure all annotations are
# removed. It may happen that the Endpoints only had spec set,
# in which case we just remove it and let the normal flow handle
# creation of the LB.
k8s = clients.get_kubernetes_client()
service_link = utils.get_service_link(endpoints)
to_remove = [
(utils.get_res_link(endpoints),
k_const.K8S_ANNOTATION_LBAAS_SPEC),
(service_link,
k_const.K8S_ANNOTATION_LBAAS_SPEC),
]
if state:
to_remove.append((utils.get_res_link(endpoints),
k_const.K8S_ANNOTATION_LBAAS_STATE))
for path, name in to_remove:
try:
k8s.remove_annotations(path, name)
except k_exc.K8sClientException:
LOG.warning('Error removing %s annotation from %s', name,
path)
return True

View File

@ -32,44 +32,6 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
def __init__(self):
super(NamespaceHandler, self).__init__()
self._drv_project = drivers.NamespaceProjectDriver.get_instance()
self._upgrade_crds()
def _upgrade_crds(self):
k8s = clients.get_kubernetes_client()
try:
net_crds = k8s.get(constants.K8S_API_CRD_KURYRNETS)
namespaces = k8s.get(constants.K8S_API_NAMESPACES)
except exceptions.K8sResourceNotFound:
return
except exceptions.K8sClientException:
LOG.warning("Error retriving namespace information")
raise
ns_dict = {'ns-' + ns['metadata']['name']: ns
for ns in namespaces.get('items')}
for net_crd in net_crds.get('items'):
try:
ns = ns_dict[net_crd['metadata']['name']]
except KeyError:
# Note(ltomasbo): The CRD does not have an associated
# namespace. It must be deleted
LOG.debug('No namespace associated, deleting kuryrnet crd: '
'%s', net_crd)
else:
try:
ns_net_annotations = ns['metadata']['annotations'][
constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
LOG.debug('Namespace associated is not annotated: %s', ns)
else:
LOG.debug('Removing annotation: %', ns_net_annotations)
k8s.remove_annotations(utils.get_res_link(ns),
constants.K8S_ANNOTATION_NET_CRD)
try:
k8s.delete(utils.get_res_link(net_crd))
except exceptions.K8sResourceNotFound:
LOG.debug('Kuryrnet object already deleted: %s', net_crd)
def on_present(self, namespace):
ns_labels = namespace['metadata'].get('labels', {})

View File

@ -13,9 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from os_vif import objects
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
@ -88,9 +86,6 @@ class VIFHandler(k8s_base.ResourceEventHandler):
LOG.exception("Failed to add finalizer to pod object: %s", ex)
raise
if self._move_annotations_to_crd(pod):
return
kp = driver_utils.get_kuryrport(pod)
if self._is_pod_completed(pod):
if kp:
@ -210,37 +205,3 @@ class VIFHandler(k8s_base.ResourceEventHandler):
k8s = clients.get_kubernetes_client()
k8s.post(KURYRPORT_URI.format(ns=pod["metadata"]["namespace"],
crd=''), kuryr_port)
def _move_annotations_to_crd(self, pod):
"""Support upgrade from annotations to KuryrPort CRD."""
try:
state = (pod['metadata']['annotations']
[constants.K8S_ANNOTATION_VIF])
except KeyError:
return False
_dict = jsonutils.loads(state)
state = objects.base.VersionedObject.obj_from_primitive(_dict)
vifs = {ifname: {'default': state.default_vif == vif,
'vif': objects.base.VersionedObject
.obj_to_primitive(vif)}
for ifname, vif in state.vifs.items()}
try:
self._add_kuryrport_crd(pod, vifs)
except k_exc.K8sNamespaceTerminating:
# The underlying namespace is being terminated, we can
# ignore this and let `on_finalize` handle this now. Still
# returning True to make sure `on_present` won't continue.
return True
except k_exc.K8sClientException as ex:
LOG.exception("Kubernetes Client Exception recreating "
"KuryrPort CRD from annotation: %s", ex)
raise k_exc.ResourceNotReady(pod)
k8s = clients.get_kubernetes_client()
k8s.remove_annotations(utils.get_res_link(pod),
constants.K8S_ANNOTATION_VIF)
return True

View File

@ -242,13 +242,6 @@ class TestNetworkPolicyDriver(test_base.TestCase):
self.assertEqual([], resp)
self.kubernetes.get.assert_called_once()
def test_get_from_old_crd(self):
knp = self._driver.get_from_old_crd(self.old_crd)
self.assertEqual(self.crd['spec'], knp['spec'])
self.assertEqual(self.crd['status'], knp['status'])
for k in ['name', 'namespace']:
self.assertEqual(self.crd['metadata'][k], knp['metadata'][k])
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_services')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_resource_details')

View File

@ -95,21 +95,3 @@ class TestPolicyHandler(test_base.TestCase):
self.assertEqual(self.k8s, self.handler.k8s)
self.assertEqual(self.os_net, self.handler.os_net)
self.assertEqual(self.lbaas_driver, self.handler._drv_lbaas)
def test_convert(self):
self_link = ('/apis/openstack.org/v1/namespaces/ns/'
'kuryrnetpolicies/old-knp')
self.k8s.get.return_value = {'items': [{
'apiVersion': 'openstack.org/v1',
'kind': 'KuryrNetPolicy',
'metadata': {
'namespace': 'ns',
'name': 'old-knp'
}
}]}
self.np_driver.get_from_old_crd.return_value = mock.sentinel.new_crd
self.handler._convert_old_crds()
self.k8s.post.assert_called_once_with(mock.ANY, mock.sentinel.new_crd)
self.k8s.delete.assert_called_once_with(self_link)

View File

@ -63,15 +63,13 @@ class TestNamespaceHandler(test_base.TestCase):
}
return crd
@mock.patch.object(namespace.NamespaceHandler, '_upgrade_crds')
@mock.patch.object(drivers.NamespaceProjectDriver, 'get_instance')
def test_init(self, m_get_project_driver, m_upgrade_crds):
def test_init(self, m_get_project_driver):
project_driver = mock.sentinel.project_driver
m_get_project_driver.return_value = project_driver
handler = namespace.NamespaceHandler()
self.assertEqual(project_driver, handler._drv_project)
m_upgrade_crds.assert_called_once()
def test_on_present(self):
self._get_kns_crd.return_value = None

View File

@ -16,7 +16,6 @@
from unittest import mock
from os_vif import objects as os_obj
from oslo_serialization import jsonutils
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drivers
@ -79,7 +78,6 @@ class TestVIFHandler(test_base.TestCase):
self._request_vif = self._handler._drv_vif_pool.request_vif
self._release_vif = self._handler._drv_vif_pool.release_vif
self._activate_vif = self._handler._drv_vif_pool.activate_vif
self._matc = self._handler._move_annotations_to_crd
self._is_pod_scheduled = self._handler._is_pod_scheduled
self._is_pod_completed = self._handler._is_pod_completed
self._request_additional_vifs = \
@ -128,14 +126,12 @@ class TestVIFHandler(test_base.TestCase):
m_get_k8s_client):
m_get_kuryrport.return_value = self._kp
m_host_network.return_value = True
self._matc.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_not_called()
self._matc.assert_not_called()
m_get_kuryrport.assert_not_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
@ -149,46 +145,23 @@ class TestVIFHandler(test_base.TestCase):
m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
self._is_pod_scheduled.return_value = False
self._matc.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_not_called()
self._matc.assert_not_called()
m_get_kuryrport.assert_not_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_on_completed_with_annotation(self, m_get_kuryrport,
m_get_k8s_client):
self._is_pod_completed.return_value = True
m_get_kuryrport.return_value = self._kp
self._matc.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
h_vif.VIFHandler.on_present(self._handler, self._pod)
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
self._matc.assert_called_once_with(self._pod)
self._handler.on_finalize.assert_called_once_with(self._pod)
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_kuryrport')
def test_on_present_on_completed_without_annotation(self, m_get_kuryrport,
m_get_k8s_client):
self._is_pod_completed.return_value = True
m_get_kuryrport.return_value = None
self._matc.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
@ -196,7 +169,6 @@ class TestVIFHandler(test_base.TestCase):
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
self._matc.assert_called_once_with(self._pod)
self._handler.on_finalize.assert_not_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
@ -209,7 +181,6 @@ class TestVIFHandler(test_base.TestCase):
m_get_k8s_client):
m_get_kuryrport.return_value = None
m_host_network.return_value = False
self._matc.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
@ -218,7 +189,6 @@ class TestVIFHandler(test_base.TestCase):
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
m_get_kuryrport.assert_called_once_with(self._pod)
self._matc.assert_called_once_with(self._pod)
self._handler._add_kuryrport_crd.assert_called_once_with(self._pod)
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
@ -228,7 +198,6 @@ class TestVIFHandler(test_base.TestCase):
m_get_k8s_client):
m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
self._matc.return_value = False
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
@ -236,7 +205,6 @@ class TestVIFHandler(test_base.TestCase):
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
self._matc.assert_called_once_with(self._pod)
m_get_kuryrport.assert_called_once_with(self._pod)
self._handler._add_kuryrport_crd.assert_not_called()
@ -247,7 +215,6 @@ class TestVIFHandler(test_base.TestCase):
m_get_k8s_client):
m_get_kuryrport.return_value = self._kp
m_host_network.return_value = False
self._matc.return_value = True
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
@ -255,8 +222,7 @@ class TestVIFHandler(test_base.TestCase):
k8s.add_finalizer.assert_called_once_with(self._pod,
k_const.POD_FINALIZER)
self._matc.assert_called_once_with(self._pod)
m_get_kuryrport.assert_not_called()
m_get_kuryrport.assert_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
@ -266,7 +232,6 @@ class TestVIFHandler(test_base.TestCase):
def test_on_present_pod_finalizer_exception(self, m_host_network,
m_get_k8s_client):
m_host_network.return_value = False
self._matc.return_value = True
k8s = mock.MagicMock()
k8s.add_finalizer.side_effect = k_exc.K8sClientException
m_get_k8s_client.return_value = k8s
@ -326,31 +291,3 @@ class TestVIFHandler(test_base.TestCase):
crd=self._pod["metadata"]["name"]))
(k8s.remove_finalizer
.assert_called_once_with(self._pod, k_const.POD_FINALIZER))
def test_move_annotations_to_crd_no_annotations(self):
res = h_vif.VIFHandler._move_annotations_to_crd(self._handler,
self._pod)
self.assertFalse(res)
@mock.patch('kuryr_kubernetes.clients.get_kubernetes_client')
def test_move_annotations_to_crd_with_annotations(self, m_get_k8s_client):
vifobj = os_obj.vif.VIFOpenVSwitch()
state = vif.PodState(default_vif=vifobj)
annotation = jsonutils.dumps(state.obj_to_primitive())
self._pod['metadata']['annotations'] = {
k_const.K8S_ANNOTATION_VIF: annotation}
vifs = {'eth0': {'default': True, 'vif': vifobj.obj_to_primitive()}}
k8s = mock.MagicMock()
m_get_k8s_client.return_value = k8s
res = h_vif.VIFHandler._move_annotations_to_crd(self._handler,
self._pod)
self.assertTrue(res)
self._handler._add_kuryrport_crd.assert_called_once_with(self._pod,
vifs)
m_get_k8s_client.assert_called_once()
k8s.remove_annotations.assert_called_once_with(
f'/api/v1/namespaces/{self._pod["metadata"]["namespace"]}/'
f'pods/{self._pod["metadata"]["name"]}',
k_const.K8S_ANNOTATION_VIF)