Ensure existing pods use the right network policy

This patch set ensures that:
- A new network policy is applied to existing pods
- A modification on the network policy selector gets applied on the
associated pods
- Deleting a network policy updated the access policies on the
associated pods
- There is no race at deleting the network policy, ensuring the
security group is first deleted from the ports and then removed
as part of the network policy deletion process

Partially Implements: blueprint k8s-network-policies

Change-Id: I25aa23b87947662333c021b9df3e83b9de2515e2
This commit is contained in:
Luis Tomas Bolivar 2018-11-07 18:46:07 +01:00
parent 6dfd4067f5
commit d029ddb277
12 changed files with 482 additions and 95 deletions

View File

@ -379,6 +379,20 @@ class PodVIFDriver(DriverBase):
"""
raise NotImplementedError()
@abc.abstractmethod
def update_vif_sgs(self, pod, security_groups):
"""Update VIF security groups.
Implementing drivers should update the port associated to the pod
with the specified security groups.
:param pod: dict containing Kubernetes Pod object
:param security_groups: list containing security groups' IDs as
returned by
`PodSecurityGroupsDriver.get_security_groups`
"""
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class MultiVIFDriver(DriverBase):
@ -718,15 +732,62 @@ class NetworkPolicyDriver(DriverBase):
:param policy: dict containing Kubernetes NP object
:param project_id: openstack project_id
:returns: list of Pod objects affected by the network policy
creation or its podSelector modification
"""
raise NotImplementedError()
@abc.abstractmethod
def release_network_policy(self, policy, project_id):
def release_network_policy(self, kuryrnetpolicy):
"""Delete a network policy
:param kuryrnetpolicy: dict containing Kuryrnetpolicy CRD object
"""
raise NotImplementedError()
@abc.abstractmethod
def affected_pods(self, policy, selector=None):
"""Return affected pods by the policy
This method returns the list of pod objects affected by the policy, or
by the selector if it is specified.
:param policy: dict containing Kubernetes NP object
:param project_id
:param selector: (optional) specifc pod selector
:returns: list of Pods objects affected by the policy or the selector
if it is pased
"""
raise NotImplementedError()
@abc.abstractmethod
def knps_on_namespace(self, namespace):
"""Check if there si kuryr network policy CRDs on the namespace
This method returns true if there are knps on the specified namespace
or false otherwise
:param namespace: namespace name where the knps CRDs should be
:returns: true if knps CRDs on the namespace, false otherwise
"""
raise NotImplementedError()
@abc.abstractmethod
def namespaced_pods(self, policy):
"""Return pods on the policy namespace
This method returns the pods on the network policy namespace
:param policy: dict containing Kubernetes NP object
:returns: list of Pods objects on the policy namespace
"""
raise NotImplementedError()
@abc.abstractmethod
def get_kuryrnetpolicy_crd(self, policy):
"""Return kuryrnetpolicy CRD object associated to the policy
:param policy: dict containing Kubernetes NP object
:returns: kuryrnetpolicy CRD object associated to the policy
"""
raise NotImplementedError()

View File

@ -13,6 +13,8 @@
# limitations under the License.
from six.moves.urllib.parse import urlencode
from oslo_log import log as logging
from neutronclient.common import exceptions as n_exc
@ -38,11 +40,22 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
Triggered by events from network policies, this method ensures that
security groups and security group rules are created or updated in
reaction to kubernetes network policies events.
In addition it returns the pods affected by the policy:
- Creation: pods on the namespace of the created policy
- Update: pods that needs to be updated in case of PodSelector
modification, i.e., the pods that were affected by the previous
PodSelector
"""
LOG.debug("Creating network policy %s", policy['metadata']['name'])
if self._get_kuryrnetpolicy_crd(policy):
self.update_security_group_rules_from_network_policy(policy)
if self.get_kuryrnetpolicy_crd(policy):
previous_selector = (
self.update_security_group_rules_from_network_policy(policy))
if previous_selector:
return self.affected_pods(policy, previous_selector)
if previous_selector is None:
return self.namespaced_pods(policy)
else:
self.create_security_group_rules_from_network_policy(policy,
project_id)
@ -53,7 +66,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
This method updates security group rules based on CRUD events gotten
from a configuration or patch to an existing network policy
"""
crd = self._get_kuryrnetpolicy_crd(policy)
crd = self.get_kuryrnetpolicy_crd(policy)
crd_name = crd['metadata']['name']
LOG.debug("Already existing CRD %s", crd_name)
sg_id = crd['spec']['securityGroupId']
@ -63,6 +76,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
existing_e_rules = crd['spec'].get('egressSgRules')
if existing_i_rules or existing_e_rules:
existing_sg_rules = existing_i_rules + existing_e_rules
existing_pod_selector = crd['spec'].get('podSelector')
# Parse network policy update and get new ruleset
i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id)
current_sg_rules = i_rules + e_rules
@ -98,15 +112,24 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
if sg_rule == e_rule:
e_rule["security_group_rule"]["id"] = sgr_id
# Annotate kuryrnetpolicy CRD with current policy and ruleset
LOG.debug('Patching KuryrNetPolicy CRD %s', crd_name)
pod_selector = policy['spec'].get('podSelector')
LOG.debug('Patching KuryrNetPolicy CRD %s' % crd_name)
try:
self.kubernetes.patch('spec', crd['metadata']['selfLink'],
{'ingressSgRules': i_rules,
'egressSgRules': e_rules,
'podSelector': pod_selector,
'networkpolicy_spec': policy['spec']})
# TODO(ltomasbo): allow patching both spec and metadata in the
# same call
self.kubernetes.patch('metadata', crd['metadata']['selfLink'],
{'labels': pod_selector.get('matchLabels')})
except exceptions.K8sClientException:
LOG.exception('Error updating kuryrnetpolicy CRD %s', crd_name)
raise
if existing_pod_selector != pod_selector:
return existing_pod_selector
return False
def create_security_group_rules_from_network_policy(self, policy,
project_id):
@ -154,7 +177,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
self.neutron.delete_security_group(sg['security_group']['id'])
raise
try:
crd = self._get_kuryrnetpolicy_crd(policy)
crd = self.get_kuryrnetpolicy_crd(policy)
self.kubernetes.annotate(policy['metadata']['selfLink'],
{"kuryrnetpolicy_selfLink":
crd['metadata']['selfLink']})
@ -260,18 +283,19 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
security_group_rule_id)
raise
def release_network_policy(self, policy, project_id):
netpolicy_crd = self._get_kuryrnetpolicy_crd(policy)
def release_network_policy(self, netpolicy_crd):
if netpolicy_crd is not None:
try:
sg_id = netpolicy_crd['spec']['securityGroupId']
self.neutron.delete_security_group(sg_id)
except n_exc.NotFound:
LOG.debug("Security Group not found: %s", sg_id)
raise
except n_exc.Conflict:
LOG.debug("Segurity Group already in use: %s", sg_id)
raise
LOG.debug("Security Group already in use: %s", sg_id)
# raising ResourceNotReady to retry this action in case ports
# associated to affected pods are not updated on time, i.e.,
# they are still using the security group to be removed
raise exceptions.ResourceNotReady(sg_id)
except n_exc.NeutronClientException:
LOG.exception("Error deleting security group %s.", sg_id)
raise
@ -279,7 +303,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
netpolicy_crd['metadata']['name'],
netpolicy_crd['metadata']['namespace'])
def _get_kuryrnetpolicy_crd(self, policy):
def get_kuryrnetpolicy_crd(self, policy):
netpolicy_crd_name = "np-" + policy['metadata']['name']
netpolicy_crd_namespace = policy['metadata']['namespace']
try:
@ -294,6 +318,19 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
raise
return netpolicy_crd
def knps_on_namespace(self, namespace):
try:
netpolicy_crds = self.kubernetes.get(
'{}/{}/kuryrnetpolicies'.format(
constants.K8S_API_CRD_NAMESPACES,
namespace))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
if netpolicy_crds.get('items'):
return True
return False
def _add_kuryrnetpolicy_crd(self, policy, project_id, sg_id, i_rules,
e_rules):
networkpolicy_name = policy['metadata']['name']
@ -318,6 +355,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
'securityGroupId': sg_id,
'ingressSgRules': i_rules,
'egressSgRules': e_rules,
'podSelector': pod_selector,
'networkpolicy_spec': policy['spec']
},
}
@ -353,3 +391,32 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
LOG.exception("Kubernetes Client Exception deleting kuryrnetpolicy"
" CRD.")
raise
def affected_pods(self, policy, selector=None):
if selector:
pod_selector = selector
else:
pod_selector = policy['spec'].get('podSelector')
if pod_selector:
pod_label = pod_selector['matchLabels']
pod_namespace = policy['metadata']['namespace']
# Removing pod-template-hash as pods will not have it and
# otherwise there will be no match
pod_label.pop('pod-template-hash', None)
pod_label = urlencode(pod_label)
# NOTE(ltomasbo): K8s API does not accept &, so we need to AND
# the matchLabels with ',' or '%2C' instead
pod_label = pod_label.replace('&', ',')
pods = self.kubernetes.get(
'{}/namespaces/{}/pods?labelSelector={}'.format(
constants.K8S_API_BASE, pod_namespace, pod_label))
return pods.get('items')
else:
# NOTE(ltomasbo): It affects all the pods on the namespace
return self.namespaced_pods(policy)
def namespaced_pods(self, policy):
pod_namespace = policy['metadata']['namespace']
pods = self.kubernetes.get('{}/namespaces/{}/pods'.format(
constants.K8S_API_BASE, pod_namespace))
return pods.get('items')

View File

@ -60,21 +60,20 @@ class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver):
pod_labels = pod['metadata'].get('labels')
LOG.debug("Using labels %s", pod_labels)
knp_crds = _get_kuryrnetpolicy_crds(pod_labels,
namespace=pod_namespace)
for crd in knp_crds.get('items'):
LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
sg_list.append(str(crd['spec']['securityGroupId']))
if pod_labels:
knp_crds = _get_kuryrnetpolicy_crds(pod_labels,
namespace=pod_namespace)
for crd in knp_crds.get('items'):
LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
sg_list.append(str(crd['spec']['securityGroupId']))
knp_namespace_crds = _get_kuryrnetpolicy_crds(namespace=pod_namespace)
for crd in knp_namespace_crds.get('items'):
if not crd['metadata'].get('labels'):
LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
sg_list.append(str(crd['spec']['securityGroupId']))
if not sg_list:
if not knp_namespace_crds.get('items') and not sg_list:
sg_list = config.CONF.neutron_defaults.pod_security_groups
if not sg_list:
raise cfg.RequiredOptError('pod_security_groups',
@ -102,21 +101,20 @@ class NetworkPolicyServiceSecurityGroupsDriver(
svc_labels = service['metadata'].get('labels')
LOG.debug("Using labels %s", svc_labels)
knp_crds = _get_kuryrnetpolicy_crds(svc_labels,
namespace=svc_namespace)
for crd in knp_crds.get('items'):
LOG.debug("Appending %s" % str(crd['spec']['securityGroupId']))
sg_list.append(str(crd['spec']['securityGroupId']))
if svc_labels:
knp_crds = _get_kuryrnetpolicy_crds(svc_labels,
namespace=svc_namespace)
for crd in knp_crds.get('items'):
LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
sg_list.append(str(crd['spec']['securityGroupId']))
knp_namespace_crds = _get_kuryrnetpolicy_crds(namespace=svc_namespace)
for crd in knp_namespace_crds.get('items'):
if not crd['metadata'].get('labels'):
LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
sg_list.append(str(crd['spec']['securityGroupId']))
if not sg_list:
if not knp_namespace_crds.get('items') and not sg_list:
sg_list = config.CONF.neutron_defaults.pod_security_groups
if not sg_list:
raise cfg.RequiredOptError('pod_security_groups',

View File

@ -92,6 +92,18 @@ class NeutronPodVIFDriver(base.PodVIFDriver):
vif.active = True
def update_vif_sgs(self, pod, security_groups):
neutron = clients.get_neutron_client()
pod_state = utils.get_pod_state(pod)
# NOTE(ltomasbo): It just updates the default_vif security group
port_id = pod_state.vifs[constants.DEFAULT_IFNAME].id
neutron.update_port(port_id,
{
"port": {
'security_groups': list(security_groups)
}
})
def _get_port_request(self, pod, project_id, subnets, security_groups,
unbound=False):
port_req_body = {'project_id': project_id,

View File

@ -13,8 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from kuryr_kubernetes import constants
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes import os_vif_util as ovu
from kuryr_kubernetes import utils
def get_network_id(subnets):
@ -39,3 +43,14 @@ def get_device_id(pod):
def get_host_id(pod):
return pod['spec']['nodeName']
def get_pod_state(pod):
try:
annotations = pod['metadata']['annotations']
state_annotation = annotations[constants.K8S_ANNOTATION_VIF]
except KeyError:
return None
state_annotation = jsonutils.loads(state_annotation)
state = utils.extract_pod_annotation(state_annotation)
return state

View File

@ -106,6 +106,9 @@ class NoopVIFPool(base.VIFPoolDriver):
def activate_vif(self, pod, vif):
self._drv_vif.activate_vif(pod, vif)
def update_vif_sgs(self, pod, sgs):
self._drv_vif.update_vif_sgs(pod, sgs)
def sync_pools(self):
pass
@ -149,6 +152,9 @@ class BaseVIFPool(base.VIFPoolDriver):
def activate_vif(self, pod, vif):
self._drv_vif.activate_vif(pod, vif)
def update_vif_sgs(self, pod, sgs):
self._drv_vif.update_vif_sgs(pod, sgs)
def _get_pool_size(self, pool_key=None):
return len(self._available_ports_pools.get(pool_key, []))
@ -834,6 +840,10 @@ class MultiVIFPool(base.VIFPoolDriver):
vif_drv_alias = self._get_vif_drv_alias(vif)
self._vif_drvs[vif_drv_alias].activate_vif(pod, vif)
def update_vif_sgs(self, pod, sgs):
pod_vif_type = self._get_pod_vif_type(pod)
self._vif_drvs[pod_vif_type].update_vif_sgs(pod, sgs)
def delete_network_pools(self, net_id):
for vif_drv in self._vif_drvs.values():
if str(vif_drv) == 'NoopVIFPool':

View File

@ -50,16 +50,57 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
super(NetworkPolicyHandler, self).__init__()
self._drv_policy = drivers.NetworkPolicyDriver.get_instance()
self._drv_project = drivers.NetworkPolicyProjectDriver.get_instance()
self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
specific_driver='multi_pool')
self._drv_vif_pool.set_vif_driver()
self._drv_pod_sg = drivers.PodSecurityGroupsDriver.get_instance()
self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance()
def on_present(self, policy):
LOG.debug("Created or updated: %s", policy)
project_id = self._drv_project.get_project(policy)
self._drv_policy.ensure_network_policy(policy, project_id)
pods_to_update = []
knps_on_namespace = self._drv_policy.knps_on_namespace(
policy['metadata']['namespace'])
if not knps_on_namespace:
namespace_pods = self._drv_policy.namespaced_pods(policy)
pods_to_update.extend(namespace_pods)
modified_pods = self._drv_policy.ensure_network_policy(policy,
project_id)
if modified_pods:
pods_to_update.extend(modified_pods)
matched_pods = self._drv_policy.affected_pods(policy)
pods_to_update.extend(matched_pods)
for pod in pods_to_update:
pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
def on_deleted(self, policy):
LOG.debug("Deleted network policy: %s", policy)
project_id = self._drv_project.get_project(policy)
self._drv_policy.release_network_policy(policy, project_id)
pods_to_update = self._drv_policy.affected_pods(policy)
netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
crd_sg = netpolicy_crd['spec'].get('securityGroupId')
for pod in pods_to_update:
pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
if crd_sg in pod_sgs:
pod_sgs.remove(crd_sg)
self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
self._drv_policy.release_network_policy(netpolicy_crd)
# re-apply original security groups for the namespace
knps_on_namespace = self._drv_policy.knps_on_namespace(
policy['metadata']['namespace'])
if not knps_on_namespace:
namespace_pods = self._drv_policy.namespaced_pods(policy)
for pod in namespace_pods:
pod_sgs = self._drv_pod_sg.get_security_groups(pod,
project_id)
self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
@MEMOIZE
def is_ready(self, quota):

View File

@ -21,6 +21,7 @@ from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import objects
@ -80,7 +81,8 @@ class VIFHandler(k8s_base.ResourceEventHandler):
# where certain pods/namespaces/nodes can be managed by other
# networking solutions/CNI drivers.
return
state = self._get_pod_state(pod)
state = driver_utils.get_pod_state(pod)
LOG.debug("Got VIFs from annotation: %r", state)
if not state:
project_id = self._drv_project.get_project(pod)
@ -139,7 +141,8 @@ class VIFHandler(k8s_base.ResourceEventHandler):
# released.
security_groups = []
state = self._get_pod_state(pod)
state = driver_utils.get_pod_state(pod)
LOG.debug("Got VIFs from annotation: %r", state)
if state:
for ifname, vif in state.vifs.items():
self._drv_vif_pool.release_vif(pod, vif, project_id,
@ -186,15 +189,3 @@ class VIFHandler(k8s_base.ResourceEventHandler):
k8s.annotate(pod['metadata']['selfLink'],
{constants.K8S_ANNOTATION_VIF: annotation},
resource_version=pod['metadata']['resourceVersion'])
def _get_pod_state(self, pod):
# TODO(ivc): same as '_set_vif'
try:
annotations = pod['metadata']['annotations']
state_annotation = annotations[constants.K8S_ANNOTATION_VIF]
except KeyError:
return None
state_annotation = jsonutils.loads(state_annotation)
state = utils.extract_pod_annotation(state_annotation)
LOG.debug("Got VIFs from annotation: %r", state)
return state

View File

@ -90,7 +90,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
self._driver = network_policy.NetworkPolicyDriver()
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_kuryrnetpolicy_crd', return_value=False)
'get_kuryrnetpolicy_crd', return_value=False)
@mock.patch.object(network_policy.NetworkPolicyDriver,
'create_security_group_rules_from_network_policy')
@mock.patch.object(network_policy.NetworkPolicyDriver,
@ -102,22 +102,47 @@ class TestNetworkPolicyDriver(test_base.TestCase):
m_create.assert_called_once_with(self._policy, self._project_id)
m_update.assert_not_called()
@mock.patch.object(network_policy.NetworkPolicyDriver, 'affected_pods')
@mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_kuryrnetpolicy_crd', return_value=True)
'get_kuryrnetpolicy_crd', return_value=True)
@mock.patch.object(network_policy.NetworkPolicyDriver,
'create_security_group_rules_from_network_policy')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'update_security_group_rules_from_network_policy')
def test_ensure_network_policy_with_existing_crd(self, m_update, m_create,
m_get_crd):
def test_ensure_network_policy_with_existing_crd(
self, m_update, m_create, m_get_crd, m_namespaced, m_affected):
previous_selector = mock.sentinel.previous_selector
m_update.return_value = previous_selector
self._driver.ensure_network_policy(self._policy, self._project_id)
m_get_crd.assert_called_once_with(self._policy)
m_create.assert_not_called()
m_update.assert_called_once_with(self._policy)
m_affected.assert_called_once_with(self._policy, previous_selector)
m_namespaced.assert_not_called()
@mock.patch.object(network_policy.NetworkPolicyDriver, 'affected_pods')
@mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'get_kuryrnetpolicy_crd', return_value=True)
@mock.patch.object(network_policy.NetworkPolicyDriver,
'create_security_group_rules_from_network_policy')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'update_security_group_rules_from_network_policy')
def test_ensure_network_policy_with_existing_crd_no_selector(
self, m_update, m_create, m_get_crd, m_namespaced, m_affected):
m_update.return_value = None
self._driver.ensure_network_policy(self._policy, self._project_id)
m_get_crd.assert_called_once_with(self._policy)
m_create.assert_not_called()
m_update.assert_called_once_with(self._policy)
m_affected.assert_not_called()
m_namespaced.assert_called_once_with(self._policy)
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_kuryrnetpolicy_crd')
'get_kuryrnetpolicy_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_add_kuryrnetpolicy_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
@ -136,7 +161,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
m_add_crd.assert_called_once()
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_kuryrnetpolicy_crd')
'get_kuryrnetpolicy_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_add_kuryrnetpolicy_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
@ -156,7 +181,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
m_add_crd.assert_called_once()
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_kuryrnetpolicy_crd')
'get_kuryrnetpolicy_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_add_kuryrnetpolicy_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
@ -186,21 +211,23 @@ class TestNetworkPolicyDriver(test_base.TestCase):
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_create_security_group_rule')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_kuryrnetpolicy_crd')
'get_kuryrnetpolicy_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'parse_network_policy_rules')
def test_update_security_group_rules(self, m_parse, m_get_crd,
m_create_sgr):
policy = self._policy.copy()
policy['spec']['podSelector'] = {'matchLabels': {'test': 'test'}}
m_get_crd.return_value = self._crd
m_parse.return_value = (self._i_rules, self._e_rules)
self._driver.update_security_group_rules_from_network_policy(
self._policy)
m_parse.assert_called_with(self._policy, self._sg_id)
policy)
m_parse.assert_called_with(policy, self._sg_id)
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_create_security_group_rule')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_get_kuryrnetpolicy_crd')
'get_kuryrnetpolicy_crd')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'parse_network_policy_rules')
def test_update_security_group_rules_with_k8s_exc(self, m_parse, m_get_crd,
@ -237,3 +264,36 @@ class TestNetworkPolicyDriver(test_base.TestCase):
self._policy['spec'] = {}
self._driver.parse_network_policy_rules(self._policy, self._sg_id)
m_create.assert_not_called()
def test_knps_on_namespace(self):
self.kubernetes.get.return_value = {'items': ['not-empty']}
namespace = 'test1'
resp = self._driver.knps_on_namespace(namespace)
self.assertTrue(resp)
def test_knps_on_namespace_empty(self):
self.kubernetes.get.return_value = {'items': []}
namespace = 'test1'
resp = self._driver.knps_on_namespace(namespace)
self.assertFalse(resp)
@mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
def test_affected_pods(self, m_namespaced):
self._driver.affected_pods(self._policy)
m_namespaced.assert_called_once_with(self._policy)
self.kubernetes.assert_not_called()
@mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
def test_affected_pods_with_podselector(self, m_namespaced):
self.kubernetes.get.return_value = {'items': []}
selector = {'matchLabels': {'test': 'test'}}
self._driver.affected_pods(self._policy, selector)
m_namespaced.assert_not_called()
def test_namespaced_pods(self):
self.kubernetes.get.return_value = {'items': []}
resp = self._driver.namespaced_pods(self._policy)
self.assertEqual([], resp)

View File

@ -99,16 +99,21 @@ class TestNetworkPolicySecurityGroupsDriver(test_base.TestCase):
def test_get_security_groups(self, m_get_crds):
m_get_crds.return_value = self._crds
self._driver.get_security_groups(self._pod, self._project_id)
m_get_crds.assert_called_with(namespace=self._namespace)
calls = [mock.call(self._pod['metadata']['labels'],
namespace=self._namespace),
mock.call(namespace=self._namespace)]
m_get_crds.assert_has_calls(calls)
@mock.patch.object(network_policy_security_groups,
'_get_kuryrnetpolicy_crds')
def test_get_security_groups_with_label(self, m_get_crds):
def test_get_security_groups_without_label(self, m_get_crds):
pod = self._pod.copy()
del pod['metadata']['labels']
labels = {'run': 'demo'}
self._crds['metadata']['labels'] = labels
self._crds['items'][0]['metadata']['labels'] = labels
m_get_crds.return_value = self._crds
self._driver.get_security_groups(self._pod, self._project_id)
m_get_crds.assert_called()
self._driver.get_security_groups(pod, self._project_id)
m_get_crds.assert_called_once_with(namespace=self._namespace)
@mock.patch.object(network_policy_security_groups,
'_get_kuryrnetpolicy_crds')
@ -117,3 +122,7 @@ class TestNetworkPolicySecurityGroupsDriver(test_base.TestCase):
self.assertRaises(cfg.RequiredOptError,
self._driver.get_security_groups, self._pod,
self._project_id)
calls = [mock.call(self._pod['metadata']['labels'],
namespace=self._namespace),
mock.call(namespace=self._namespace)]
m_get_crds.assert_has_calls(calls)

View File

@ -28,6 +28,7 @@ class TestPolicyHandler(test_base.TestCase):
self._policy_name = 'np-test'
self._policy_uid = mock.sentinel.policy_uid
self._policy_link = mock.sentinel.policy_link
self._pod_sg = mock.sentinel.pod_sg
self._policy = {
u'apiVersion': u'networking.k8s.io/v1',
@ -57,32 +58,139 @@ class TestPolicyHandler(test_base.TestCase):
spec=drivers.NetworkPolicyProjectDriver)
self._handler._drv_policy = mock.MagicMock(
spec=drivers.NetworkPolicyDriver)
self._handler._drv_pod_sg = mock.Mock(
spec=drivers.PodSecurityGroupsDriver)
self._handler._drv_svc_sg = mock.Mock(
spec=drivers.ServiceSecurityGroupsDriver)
self._handler._drv_vif_pool = mock.MagicMock(
spec=drivers.VIFPoolDriver)
self._get_project = self._handler._drv_project.get_project
self._get_project.return_value = self._project_id
self._get_security_groups = (
self._handler._drv_pod_sg.get_security_groups)
self._set_vifs_driver = self._handler._drv_vif_pool.set_vif_driver
self._set_vifs_driver.return_value = mock.Mock(
spec=drivers.PodVIFDriver)
self._update_vif_sgs = self._handler._drv_vif_pool.update_vif_sgs
self._update_vif_sgs.return_value = None
def _get_knp_obj(self):
knp_obj = {
'apiVersion': 'openstack.org/v1',
'kind': 'KuryrNetPolicy',
'metadata': {
'name': 'np-test-network-policy',
'namespace': 'test-1'
},
'spec': {
'securityGroupId': 'c1ac16f5-e198-4628-9d84-253c6001be8e',
'securityGroupName': 'sg-test-network-policy'
}}
return knp_obj
@mock.patch.object(drivers.ServiceSecurityGroupsDriver, 'get_instance')
@mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance')
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
@mock.patch.object(drivers.NetworkPolicyDriver, 'get_instance')
@mock.patch.object(drivers.NetworkPolicyProjectDriver, 'get_instance')
def test_init(self, m_get_project_driver, m_get_policy_driver):
def test_init(self, m_get_project_driver, m_get_policy_driver,
m_get_vif_driver, m_get_pod_sg_driver, m_get_svc_sg_driver):
handler = policy.NetworkPolicyHandler()
m_get_project_driver.assert_called_once()
m_get_policy_driver.assert_called_once()
m_get_vif_driver.assert_called_once()
m_get_pod_sg_driver.assert_called_once()
m_get_svc_sg_driver.assert_called_once()
self.assertEqual(m_get_project_driver.return_value,
handler._drv_project)
self.assertEqual(m_get_policy_driver.return_value, handler._drv_policy)
def test_on_present(self):
policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
modified_pod = mock.sentinel.modified_pod
match_pod = mock.sentinel.match_pod
knp_on_ns = self._handler._drv_policy.knps_on_namespace
knp_on_ns.return_value = True
namespaced_pods = self._handler._drv_policy.namespaced_pods
ensure_nw_policy = self._handler._drv_policy.ensure_network_policy
ensure_nw_policy.return_value = [modified_pod]
affected_pods = self._handler._drv_policy.affected_pods
affected_pods.return_value = [match_pod]
sg1 = [mock.sentinel.sg1]
sg2 = [mock.sentinel.sg2]
self._get_security_groups.side_effect = [sg1, sg2]
policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
namespaced_pods.assert_not_called()
ensure_nw_policy.assert_called_once_with(self._policy,
self._project_id)
affected_pods.assert_called_once_with(self._policy)
calls = [mock.call(modified_pod, self._project_id),
mock.call(match_pod, self._project_id)]
self._get_security_groups.assert_has_calls(calls)
calls = [mock.call(modified_pod, sg1), mock.call(match_pod, sg2)]
self._update_vif_sgs.assert_has_calls(calls)
def test_on_present_without_knps_on_namespace(self):
modified_pod = mock.sentinel.modified_pod
match_pod = mock.sentinel.match_pod
namespace_pod = mock.sentinel.namespace_pod
knp_on_ns = self._handler._drv_policy.knps_on_namespace
knp_on_ns.return_value = False
namespaced_pods = self._handler._drv_policy.namespaced_pods
namespaced_pods.return_value = [namespace_pod]
ensure_nw_policy = self._handler._drv_policy.ensure_network_policy
ensure_nw_policy.return_value = [modified_pod]
affected_pods = self._handler._drv_policy.affected_pods
affected_pods.return_value = [match_pod]
sg1 = [mock.sentinel.sg1]
sg2 = [mock.sentinel.sg2]
sg3 = [mock.sentinel.sg3]
self._get_security_groups.side_effect = [sg1, sg2, sg3]
policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
namespaced_pods.assert_called_once_with(self._policy)
ensure_nw_policy.assert_called_once_with(self._policy,
self._project_id)
affected_pods.assert_called_once_with(self._policy)
calls = [mock.call(namespace_pod, self._project_id),
mock.call(modified_pod, self._project_id),
mock.call(match_pod, self._project_id)]
self._get_security_groups.assert_has_calls(calls)
calls = [mock.call(namespace_pod, sg1),
mock.call(modified_pod, sg2),
mock.call(match_pod, sg3)]
self._update_vif_sgs.assert_has_calls(calls)
def test_on_deleted(self):
policy.NetworkPolicyHandler.on_deleted(self._handler, self._policy)
namespace_pod = mock.sentinel.namespace_pod
match_pod = mock.sentinel.match_pod
affected_pods = self._handler._drv_policy.affected_pods
affected_pods.return_value = [match_pod]
get_knp_crd = self._handler._drv_policy.get_kuryrnetpolicy_crd
knp_obj = self._get_knp_obj()
get_knp_crd.return_value = knp_obj
sg1 = [mock.sentinel.sg1]
sg2 = [mock.sentinel.sg2]
self._get_security_groups.side_effect = [sg1, sg2]
release_nw_policy = self._handler._drv_policy.release_network_policy
release_nw_policy.assert_called_once_with(self._policy,
self._project_id)
policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
knp_on_ns = self._handler._drv_policy.knps_on_namespace
knp_on_ns.return_value = False
ns_pods = self._handler._drv_policy.namespaced_pods
ns_pods.return_value = [namespace_pod]
policy.NetworkPolicyHandler.on_deleted(self._handler, self._policy)
release_nw_policy.assert_called_once_with(knp_obj)
calls = [mock.call(match_pod, self._project_id),
mock.call(namespace_pod, self._project_id)]
self._get_security_groups.assert_has_calls(calls)
calls = [mock.call(match_pod, sg1), mock.call(namespace_pod, sg2)]
self._update_vif_sgs.assert_has_calls(calls)

View File

@ -66,7 +66,6 @@ class TestVIFHandler(test_base.TestCase):
self._request_vif = self._handler._drv_vif_pool.request_vif
self._release_vif = self._handler._drv_vif_pool.release_vif
self._activate_vif = self._handler._drv_vif_pool.activate_vif
self._get_pod_state = self._handler._get_pod_state
self._set_pod_state = self._handler._set_pod_state
self._is_host_network = self._handler._is_host_network
self._is_pending_node = self._handler._is_pending_node
@ -75,7 +74,6 @@ class TestVIFHandler(test_base.TestCase):
self._request_vif.return_value = self._vif
self._request_additional_vifs.return_value = self._additioan_vifs
self._get_pod_state.return_value = self._state
self._is_host_network.return_value = False
self._is_pending_node.return_value = True
self._get_project.return_value = self._project_id
@ -142,54 +140,63 @@ class TestVIFHandler(test_base.TestCase):
self.assertFalse(h_vif.VIFHandler._is_pending_node({'spec': {},
'status': {}}))
def test_on_present(self):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_present(self, m_get_pod_state):
m_get_pod_state.return_value = self._state
h_vif.VIFHandler.on_present(self._handler, self._pod)
self._get_pod_state.assert_called_once_with(self._pod)
m_get_pod_state.assert_called_once_with(self._pod)
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
self._set_pod_state.assert_not_called()
def test_on_present_host_network(self):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_present_host_network(self, m_get_pod_state):
m_get_pod_state.return_value = self._state
self._is_host_network.return_value = True
h_vif.VIFHandler.on_present(self._handler, self._pod)
self._get_pod_state.assert_not_called()
m_get_pod_state.assert_not_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
self._set_pod_state.assert_not_called()
def test_on_present_not_pending(self):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_present_not_pending(self, m_get_pod_state):
m_get_pod_state.return_value = self._state
self._is_pending_node.return_value = False
h_vif.VIFHandler.on_present(self._handler, self._pod)
self._get_pod_state.assert_not_called()
m_get_pod_state.assert_not_called()
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
self._activate_vif.assert_not_called()
self._set_pod_state.assert_not_called()
def test_on_present_activate(self):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_present_activate(self, m_get_pod_state):
m_get_pod_state.return_value = self._state
self._vif.active = False
h_vif.VIFHandler.on_present(self._handler, self._pod)
self._get_pod_state.assert_called_once_with(self._pod)
m_get_pod_state.assert_called_once_with(self._pod)
self._activate_vif.assert_called_once_with(self._pod, self._vif)
self._set_pod_state.assert_called_once_with(self._pod, self._state)
self._request_vif.assert_not_called()
self._request_additional_vifs.assert_not_called()
def test_on_present_create(self):
self._get_pod_state.return_value = None
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_present_create(self, m_get_pod_state):
m_get_pod_state.return_value = None
h_vif.VIFHandler.on_present(self._handler, self._pod)
self._get_pod_state.assert_called_once_with(self._pod)
m_get_pod_state.assert_called_once_with(self._pod)
self._request_vif.assert_called_once_with(
self._pod, self._project_id, self._subnets, self._security_groups)
self._request_additional_vifs.assert_called_once_with(
@ -197,15 +204,16 @@ class TestVIFHandler(test_base.TestCase):
self._set_pod_state.assert_called_once_with(self._pod, self._state)
self._activate_vif.assert_not_called()
def test_on_present_create_with_additional_vifs(self):
self._get_pod_state.return_value = None
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_present_create_with_additional_vifs(self, m_get_pod_state):
m_get_pod_state.return_value = None
additional_vif = os_obj.vif.VIFBase()
self._state.additional_vifs = {'eth1': additional_vif}
self._request_additional_vifs.return_value = [additional_vif]
h_vif.VIFHandler.on_present(self._handler, self._pod)
self._get_pod_state.assert_called_once_with(self._pod)
m_get_pod_state.assert_called_once_with(self._pod)
self._request_vif.assert_called_once_with(
self._pod, self._project_id, self._subnets, self._security_groups)
self._request_additional_vifs.assert_called_once_with(
@ -213,13 +221,14 @@ class TestVIFHandler(test_base.TestCase):
self._set_pod_state.assert_called_once_with(self._pod, self._state)
self._activate_vif.assert_not_called()
def test_on_present_rollback(self):
self._get_pod_state.return_value = None
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_present_rollback(self, m_get_pod_state):
m_get_pod_state.return_value = None
self._set_pod_state.side_effect = k_exc.K8sClientException
h_vif.VIFHandler.on_present(self._handler, self._pod)
self._get_pod_state.assert_called_once_with(self._pod)
m_get_pod_state.assert_called_once_with(self._pod)
self._request_vif.assert_called_once_with(
self._pod, self._project_id, self._subnets, self._security_groups)
self._request_additional_vifs.assert_called_once_with(
@ -230,18 +239,21 @@ class TestVIFHandler(test_base.TestCase):
self._security_groups)
self._activate_vif.assert_not_called()
def test_on_deleted(self):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_deleted(self, m_get_pod_state):
m_get_pod_state.return_value = self._state
h_vif.VIFHandler.on_deleted(self._handler, self._pod)
self._get_pod_state.assert_called_once_with(self._pod)
m_get_pod_state.assert_called_once_with(self._pod)
self._release_vif.assert_called_once_with(self._pod, self._vif,
self._project_id,
self._security_groups)
def test_on_deleted_with_additional_vifs(self):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_deleted_with_additional_vifs(self, m_get_pod_state):
additional_vif = os_obj.vif.VIFBase()
self._state.additional_vifs = {'eth1': additional_vif}
self._get_pod_state.return_value = self._state
m_get_pod_state.return_value = self._state
h_vif.VIFHandler.on_deleted(self._handler, self._pod)
@ -252,18 +264,21 @@ class TestVIFHandler(test_base.TestCase):
self._project_id,
self._security_groups)
def test_on_deleted_host_network(self):
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_deleted_host_network(self, m_get_pod_state):
m_get_pod_state.return_value = self._state
self._is_host_network.return_value = True
h_vif.VIFHandler.on_deleted(self._handler, self._pod)
self._get_pod_state.assert_not_called()
m_get_pod_state.assert_not_called()
self._release_vif.assert_not_called()
def test_on_deleted_no_annotation(self):
self._get_pod_state.return_value = None
@mock.patch('kuryr_kubernetes.controller.drivers.utils.get_pod_state')
def test_on_deleted_no_annotation(self, m_get_pod_state):
m_get_pod_state.return_value = None
h_vif.VIFHandler.on_deleted(self._handler, self._pod)
self._get_pod_state.assert_called_once_with(self._pod)
m_get_pod_state.assert_called_once_with(self._pod)
self._release_vif.assert_not_called()