Merge "Remove namespace isolation support"

This commit is contained in:
Zuul 2020-02-05 19:27:42 +00:00 committed by Gerrit Code Review
commit b0779c76e1
14 changed files with 11 additions and 740 deletions

View File

@ -109,8 +109,8 @@
vars:
devstack_localrc:
KURYR_SUBNET_DRIVER: namespace
KURYR_SG_DRIVER: namespace
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace
KURYR_ENABLED_HANDLERS: vif,lb,lbaasspec,namespace,pod_label,policy,kuryrnetpolicy
KURYR_SG_DRIVER: policy
KURYR_USE_PORT_POOLS: true
KURYR_POD_VIF_DRIVER: neutron-vif
KURYR_VIF_POOL_DRIVER: neutron

View File

@ -43,10 +43,6 @@ function ovs_bind_for_kubelet() {
# Need to enable Amphorae subnet access to the kubelet iface for API
# access
openstack port set "$port_id" --security-group service_pod_access
if [[ "$KURYR_SG_DRIVER" == "namespace" ]]; then
openstack port set "$port_id" --security-group allow_from_namespace
openstack port set "$port_id" --security-group allow_from_default
fi
ifname="kubelet${port_id}"
ifname="${ifname:0:14}"

View File

@ -388,47 +388,7 @@ function configure_neutron_defaults {
iniset "$KURYR_CONFIG" namespace_subnet pod_subnet_pool "$subnetpool_id"
iniset "$KURYR_CONFIG" namespace_subnet pod_router "$router_id"
fi
if [ "$KURYR_SG_DRIVER" == "namespace" ]; then
local allow_namespace_sg_id
local allow_default_sg_id
allow_namespace_sg_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
security group create --project "$project_id" \
allow_from_namespace -f value -c id)
allow_default_sg_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
security group create --project "$project_id" \
allow_from_default -f value -c id)
for prot in icmp tcp udp ;
do
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "allow traffic from default namespace" \
--remote-group "$allow_namespace_sg_id" --ethertype IPv4 --protocol "$prot" \
"$allow_default_sg_id"
if [ "$prot" != "icmp" ] ; then
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "allow traffic from namespaces at default namespace" \
--remote-group "$allow_default_sg_id" --ethertype IPv4 --protocol "$prot" \
"$allow_namespace_sg_id"
fi
done
# NOTE(ltomasbo): Some tempest test are using FIP and depends on icmp
# traffic being allowed to the pods. To enable these tests we permit
# icmp traffic from everywhere on the default namespace. Note tcp
# traffic will be dropped, just icmp is permitted.
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "allow imcp traffic from everywhere to default namespace" \
--ethertype IPv4 --protocol icmp "$allow_namespace_sg_id"
iniset "$KURYR_CONFIG" namespace_sg sg_allow_from_namespaces "$allow_namespace_sg_id"
iniset "$KURYR_CONFIG" namespace_sg sg_allow_from_default "$allow_default_sg_id"
elif [[ "$KURYR_SG_DRIVER" == "policy" ]]; then
if [[ "$KURYR_SG_DRIVER" == "policy" ]]; then
# NOTE(dulek): Using the default DevStack's SG is not enough to match
# the NP specification. We need to open ingress to everywhere, so we
# create allow-all group.
@ -449,7 +409,7 @@ function configure_neutron_defaults {
fi
iniset "$KURYR_CONFIG" neutron_defaults pod_security_groups "$sg_ids"
if [[ "$KURYR_SG_DRIVER" == "namespace" || "$KURYR_SG_DRIVER" == "policy" ]]; then
if [[ "$KURYR_SG_DRIVER" == "policy" ]]; then
# NOTE(ltomasbo): As more security groups and rules are created, there
# is a need to increase the quota for it
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
@ -946,9 +906,6 @@ function update_tempest_conf_file {
if [[ "$KURYR_K8S_CONTAINERIZED_DEPLOYMENT" == "True" ]]; then
iniset $TEMPEST_CONFIG kuryr_kubernetes containerized True
fi
if [[ "$KURYR_SG_DRIVER" == "namespace" ]] && [[ "$KURYR_SUBNET_DRIVER" == "namespace" ]]; then
iniset $TEMPEST_CONFIG kuryr_kubernetes namespace_enabled True
fi
if [[ "$KURYR_SUBNET_DRIVER" == "namespace" ]]; then
iniset $TEMPEST_CONFIG kuryr_kubernetes subnet_per_namespace True
fi

View File

@ -32,17 +32,6 @@ the next steps are needed:
[kubernetes]
pod_subnets_driver = namespace
In addition, to ensure that pods and services at one given namespace
cannot reach (or be reached by) the ones at another namespace, except the
pods at the default namespace that can reach (and be reached by) any pod at
a different namespace, the next security group driver needs to be set too:
.. code-block:: ini
[kubernetes]
pod_security_groups_driver = namespace
service_security_groups_driver = namespace
#. Select (and create if needed) the subnet pool from where the new subnets
will get their CIDR (e.g., the default on devstack deployment is
shared-default-subnetpool-v4):
@ -64,16 +53,6 @@ the next steps are needed:
requirements between pod, service and public subnets, as in the case for
the default subnet driver.
#. Select (and create if needed) the security groups to be attached to the
pods at the default namespace and to the others, enabling the cross access
between them:
.. code-block:: ini
[namespace_sg]
sg_allow_from_namespaces = SG_ID_1 # Makes SG_ID_1 allow traffic from the sg sg_allow_from_default
sg_allow_from_default = SG_ID_2 # Makes SG_ID_2 allow traffic from the sg sg_allow_from_namespaces
Note you need to restart the kuryr controller after applying the above
detailed steps. For devstack non-containerized deployments:
@ -94,7 +73,6 @@ to add the namespace handler and state the namespace subnet driver with:
.. code-block:: console
KURYR_SUBNET_DRIVER=namespace
KURYR_SG_DRIVER=namespace
KURYR_ENABLED_HANDLERS=vif,lb,lbaasspec,namespace
.. note::
@ -173,10 +151,6 @@ Testing the network per namespace functionality
test-1-pod$ curl 10.0.0.141
demo-5995548848-lmmjc: HELLO! I AM ALIVE!!!
$ kubectl exec -n test2 -it demo-5135352253-dfghd /bin/sh
test-2-pod$ curl 10.0.0.141
## No response
#. And finally, to remove the namespace and all its resources, including
openstack networks, kuryrnet CRD, svc, pods, you just need to do:

View File

@ -34,8 +34,6 @@ spec:
type: boolean
routerId:
type: string
sgId:
type: string
subnetCIDR:
type: string
subnetId:

View File

@ -238,26 +238,6 @@ class PodSecurityGroupsDriver(DriverBase):
"""
raise NotImplementedError()
def create_namespace_sg(self, namespace, project_id, crd_spec):
"""Create security group resources for a namespace.
:param namespace: string with the namespace name
:param project_id: OpenStack project ID
:param crd_spec: dict with the keys and values for the CRD spec, such
as subnetId or subnetCIDR
:return: dict with the keys and values for the CRD spec, such as sgId.
If no security group need to be created for the namespace, it
should return an empty dict
"""
raise NotImplementedError()
def delete_sg(self, sg_id):
"""Delete security group associated to a namespace.
:param sg_id: OpenStack security group ID
"""
raise NotImplementedError()
def create_sg_rules(self, pod):
"""Create security group rules for a pod.

View File

@ -38,15 +38,6 @@ class DefaultPodSecurityGroupsDriver(base.PodSecurityGroupsDriver):
return sg_list[:]
def create_namespace_sg(self, namespace, project_id, crd_spec):
LOG.debug("Security group driver does not create SGs for the "
"namespaces.")
return {}
def delete_sg(self, sg_id):
LOG.debug("Security group driver does not implement deleting "
"SGs.")
def create_sg_rules(self, pod):
LOG.debug("Security group driver does not create SG rules for "
"the pods.")

View File

@ -365,110 +365,6 @@ class LBaaSv2Driver(base.LBaaSDriver):
not rule.get('remote_ip_prefix') and
'network-policy' not in rule.get('description'))
def _remove_default_octavia_rules(self, sg_id, listener):
os_net = clients.get_network_client()
for remaining in self._provisioning_timer(
_ACTIVATION_TIMEOUT, _LB_STS_POLL_SLOW_INTERVAL):
listener_rules = os_net.security_group_rules(
security_group_id=sg_id,
protocol=listener.protocol,
port_range_min=listener.port,
port_range_max=listener.port,
direction='ingress')
for rule in listener_rules:
if not (rule.remote_group_id or rule.remote_ip_prefix):
# remove default sg rules
os_net.delete_security_group_rule(rule.id)
return
def _extend_lb_security_group_rules(self, loadbalancer, listener):
os_net = clients.get_network_client()
if CONF.octavia_defaults.sg_mode == 'create':
sg_id = self._find_listeners_sg(loadbalancer)
# if an SG for the loadbalancer has not being created, create one
if not sg_id:
sg = os_net.create_security_group(
name=loadbalancer.name, project_id=loadbalancer.project_id)
c_utils.tag_neutron_resources([sg])
loadbalancer.security_groups.append(sg.id)
vip_port = self._get_vip_port(loadbalancer)
os_net.update_port(
vip_port.id,
security_groups=loadbalancer.security_groups)
else:
sg_id = self._get_vip_port(loadbalancer).security_group_ids[0]
# wait until octavia adds default sg rules
self._remove_default_octavia_rules(sg_id, listener)
for sg in loadbalancer.security_groups:
if sg != sg_id:
try:
os_net.create_security_group_rule(
direction='ingress',
port_range_min=listener.port,
port_range_max=listener.port,
protocol=listener.protocol,
security_group_id=sg_id,
remote_group_id=sg,
description=listener.name)
except os_exc.ConflictException:
pass
except os_exc.SDKException:
LOG.exception('Failed when creating security group '
'rule for listener %s.', listener.name)
# ensure routes have access to the services
service_subnet_cidr = utils.get_subnet_cidr(loadbalancer.subnet_id)
try:
# add access from service subnet
os_net.create_security_group_rule(
direction='ingress',
port_range_min=listener.port,
port_range_max=listener.port,
protocol=listener.protocol,
security_group_id=sg_id,
remote_ip_prefix=service_subnet_cidr,
description=listener.name)
# add access from worker node VM subnet for non-native route
# support
worker_subnet_id = CONF.pod_vif_nested.worker_nodes_subnet
if worker_subnet_id:
try:
worker_subnet_cidr = utils.get_subnet_cidr(
worker_subnet_id)
os_net.create_security_group_rule(
direction='ingress',
port_range_min=listener.port,
port_range_max=listener.port,
protocol=listener.protocol,
security_group_id=sg_id,
remote_ip_prefix=worker_subnet_cidr,
description=listener.name)
except os_exc.ResourceNotFound:
LOG.exception('Failed when creating security group rule '
'due to nonexistent worker_subnet_id: %s',
worker_subnet_id)
except os_exc.ConflictException:
pass
except os_exc.SDKException:
LOG.exception('Failed when creating security group rule to '
'enable routes for listener %s.', listener.name)
def _ensure_security_group_rules(self, loadbalancer, listener,
service_type):
namespace_isolation = (
'namespace' in CONF.kubernetes.enabled_handlers and
CONF.kubernetes.service_security_groups_driver == 'namespace')
create_sg = CONF.octavia_defaults.sg_mode == 'create'
if create_sg:
self._create_lb_security_group_rule(loadbalancer, listener)
if (namespace_isolation and service_type == 'ClusterIP' and
CONF.octavia_defaults.enforce_sg_rules):
self._extend_lb_security_group_rules(loadbalancer, listener)
def ensure_listener(self, loadbalancer, protocol, port,
service_type='ClusterIP'):
name = "%s:%s:%s" % (loadbalancer.name, protocol, port)
@ -486,7 +382,8 @@ class LBaaSv2Driver(base.LBaaSDriver):
"protocol %(prot)s is not supported", {'prot': protocol})
return None
self._ensure_security_group_rules(loadbalancer, result, service_type)
if CONF.octavia_defaults.sg_mode == 'create':
self._create_lb_security_group_rule(loadbalancer, result)
return result

View File

@ -1,170 +0,0 @@
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr.lib._i18n import _
from openstack import exceptions as os_exc
from oslo_config import cfg
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes.controller.drivers import utils
from kuryr_kubernetes import exceptions
LOG = logging.getLogger(__name__)
namespace_sg_driver_opts = [
cfg.StrOpt('sg_allow_from_namespaces',
help=_("Default security group to allow traffic from the "
"namespaces into the default namespace.")),
cfg.StrOpt('sg_allow_from_default',
help=_("Default security group to allow traffic from the "
"default namespaces into the other namespaces."))
]
cfg.CONF.register_opts(namespace_sg_driver_opts, "namespace_sg")
DEFAULT_NAMESPACE = 'default'
def _get_net_crd(namespace):
kubernetes = clients.get_kubernetes_client()
try:
ns = kubernetes.get('%s/namespaces/%s' % (constants.K8S_API_BASE,
namespace))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise exceptions.ResourceNotReady(namespace)
try:
annotations = ns['metadata']['annotations']
net_crd_name = annotations[constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
LOG.debug("Namespace missing CRD annotations for selecting the "
"corresponding security group. Action will be retried.")
raise exceptions.ResourceNotReady(namespace)
try:
net_crd = kubernetes.get('%s/kuryrnets/%s' % (constants.K8S_API_CRD,
net_crd_name))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return net_crd
class NamespacePodSecurityGroupsDriver(base.PodSecurityGroupsDriver):
"""Provides security groups for Pod based on a configuration option."""
def get_security_groups(self, pod, project_id):
namespace = pod['metadata']['namespace']
net_crd = _get_net_crd(namespace)
sg_list = [str(net_crd['spec']['sgId'])]
extra_sgs = self._get_extra_sg(namespace)
for sg in extra_sgs:
sg_list.append(str(sg))
sg_list.extend(config.CONF.neutron_defaults.pod_security_groups)
return sg_list[:]
def _get_extra_sg(self, namespace):
# Differentiates between default namespace and the rest
if namespace == DEFAULT_NAMESPACE:
return [cfg.CONF.namespace_sg.sg_allow_from_namespaces]
else:
return [cfg.CONF.namespace_sg.sg_allow_from_default]
def create_namespace_sg(self, namespace, project_id, crd_spec):
os_net = clients.get_network_client()
sg_name = "ns/" + namespace + "-sg"
# create the associated SG for the namespace
try:
# default namespace is different from the rest
# Default allows traffic from everywhere
# The rest can be accessed from the default one
sg = os_net.create_security_group(name=sg_name,
project_id=project_id)
utils.tag_neutron_resources([sg])
os_net.create_security_group_rule(
direction="ingress",
remote_ip_prefix=crd_spec['subnetCIDR'],
security_group_id=sg.id)
except os_exc.SDKException:
LOG.exception("Error creating security group for the namespace "
"%s", namespace)
raise
return {'sgId': sg.id}
def delete_sg(self, sg_id):
os_net = clients.get_network_client()
try:
os_net.delete_security_group(sg_id)
except os_exc.SDKException:
LOG.exception("Error deleting security group %s.", sg_id)
raise
def delete_namespace_sg_rules(self, namespace):
LOG.debug("Security group driver does not create SG rules for "
"namespace.")
def create_namespace_sg_rules(self, namespace):
LOG.debug("Security group driver does not create SG rules for "
"namespace.")
def update_namespace_sg_rules(self, namespace):
LOG.debug("Security group driver does not create SG rules for "
"namespace.")
def create_sg_rules(self, pod):
LOG.debug("Security group driver does not create SG rules for "
"the pods.")
def delete_sg_rules(self, pod):
LOG.debug("Security group driver does not delete SG rules for "
"the pods.")
def update_sg_rules(self, pod):
LOG.debug("Security group driver does not update SG rules for "
"the pods.")
class NamespaceServiceSecurityGroupsDriver(base.ServiceSecurityGroupsDriver):
"""Provides security groups for Service based on a configuration option."""
def get_security_groups(self, service, project_id):
namespace = service['metadata']['namespace']
net_crd = _get_net_crd(namespace)
sg_list = []
sg_list.append(str(net_crd['spec']['sgId']))
extra_sgs = self._get_extra_sg(namespace)
for sg in extra_sgs:
sg_list.append(str(sg))
return sg_list[:]
def _get_extra_sg(self, namespace):
# Differentiates between default namespace and the rest
if namespace == DEFAULT_NAMESPACE:
return [cfg.CONF.namespace_sg.sg_allow_from_default]
else:
return [cfg.CONF.namespace_sg.sg_allow_from_namespaces]

View File

@ -574,15 +574,6 @@ class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver):
crd_selectors.extend(self.create_namespace_sg_rules(namespace))
return crd_selectors
def create_namespace_sg(self, namespace, project_id, crd_spec):
LOG.debug("Security group driver does not create SGs for the "
"namespaces.")
return {}
def delete_sg(self, sg_id):
LOG.debug("Security group driver does not implement deleting "
"SGs.")
class NetworkPolicyServiceSecurityGroupsDriver(
base.ServiceSecurityGroupsDriver):

View File

@ -111,20 +111,6 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
LOG.debug("Creating network resources for namespace: %s", ns_name)
net_crd_spec = self._drv_subnets.create_namespace_network(ns_name,
project_id)
try:
net_crd_sg = self._drv_sg.create_namespace_sg(ns_name, project_id,
net_crd_spec)
except os_exc.SDKException:
LOG.exception("Error creating security group for the namespace. "
"Rolling back created network resources.")
self._drv_subnets.rollback_network_resources(net_crd_spec, ns_name)
raise
if net_crd_sg:
net_crd_spec.update(net_crd_sg)
else:
LOG.debug("No SG created for the namespace. Namespace isolation "
"will not be enforced.")
# create CRD resource for the network
try:
net_crd = self._add_kuryrnet_crd(ns_name, net_crd_spec)
@ -135,8 +121,6 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
LOG.exception("Kuryrnet CRD creation failed. Rolling back "
"resources created for the namespace.")
self._drv_subnets.rollback_network_resources(net_crd_spec, ns_name)
if net_crd_sg.get('sgId'):
self._drv_sg.delete_sg(net_crd_sg['sgId'])
try:
self._del_kuryrnet_crd(net_crd_name)
except exceptions.K8sClientException:
@ -174,12 +158,6 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
# associated to the namespace/subnet, ensuring next retry will be
# successful
raise
sg_id = net_crd['spec'].get('sgId')
if sg_id:
self._drv_sg.delete_sg(sg_id)
else:
LOG.debug("There is no security group associated with the "
"namespace to be deleted")
self._del_kuryrnet_crd(net_crd_name)
crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)

View File

@ -1,275 +0,0 @@
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import namespace_security_groups
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
import munch
from openstack import exceptions as os_exc
def get_pod_obj():
return {
'status': {
'qosClass': 'BestEffort',
'hostIP': '192.168.1.2',
},
'kind': 'Pod',
'spec': {
'schedulerName': 'default-scheduler',
'containers': [{
'name': 'busybox',
'image': 'busybox',
'resources': {}
}],
'nodeName': 'kuryr-devstack'
},
'metadata': {
'name': 'busybox-sleep1',
'namespace': 'default',
'resourceVersion': '53808',
'selfLink': '/api/v1/namespaces/default/pods/busybox-sleep1',
'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb',
'annotations': {
'openstack.org/kuryr-vif': {}
}
}}
def get_namespace_obj():
return {
'metadata': {
'annotations': {
constants.K8S_ANNOTATION_NET_CRD: 'net_crd_url_sample'
}
}
}
def get_no_match_crd_namespace_obj():
return {
"kind": "Namespace",
"metadata": {
"annotations": {
"openstack.org/kuryr-namespace-label": '{"name": "dev"}',
"openstack.org/kuryr-net-crd": "ns-dev"
},
"labels": {"name": "prod"},
"name": "prod",
"selfLink": "/api/v1/namespaces/dev"}}
def get_match_crd_namespace_obj():
return {
"kind": "Namespace",
"metadata": {
"annotations": {
"openstack.org/kuryr-namespace-label": '{"name": "dev"}',
"openstack.org/kuryr-net-crd": "ns-dev"
},
"labels": {
"name": "dev"
},
"name": "dev",
"selfLink": "/api/v1/namespaces/dev"}}
def get_match_crd_pod_obj():
return {
'kind': 'Pod',
'metadata': {
'name': mock.sentinel.pod_name,
'namespace': 'dev',
'labels': {
'tier': 'backend'},
'annotations': {
'openstack.org/kuryr-pod-label': '{"tier": "backend"}'}},
'status': {'podIP': mock.sentinel.podIP}}
def get_sg_rule():
pod_ip = get_match_crd_pod_obj()['status'].get('podIP')
return {
"namespace": 'dev',
"security_group_rule": {
"description": "Kuryr-Kubernetes NetPolicy SG rule",
"direction": "ingress",
"ethertype": "IPv4",
"id": 'f15ff50a-e8a4-4872-81bf-a04cbb8cb388',
"port_range_max": 6379,
"port_range_min": 6379,
"protocol": "tcp",
"remote_ip_prefix": pod_ip,
"security_group_id": '36923e76-026c-422b-8dfd-7292e7c88228'}}
def get_matched_crd_obj():
return {
"kind": "KuryrNetPolicy",
"metadata": {"name": "np-test-network-policy",
"namespace": "default"},
"spec": {
"egressSgRules": [],
"ingressSgRules": [get_sg_rule()],
"networkpolicy_spec": {
"ingress": [
{"from": [
{"namespaceSelector": {
"matchLabels": {"name": "dev"}}}],
"ports": [
{"port": 6379,
"protocol": "TCP"}]}],
"podSelector": {"matchLabels": {"app": "demo"}},
"policyTypes": ["Ingress"]},
"podSelector": {"matchLabels": {"app": "demo"}},
"securityGroupId": '36923e76-026c-422b-8dfd-7292e7c88228'}}
def get_crd_obj_no_match():
return {
"kind": "KuryrNetPolicy",
"metadata": {"name": "np-test-network-policy",
"namespace": "default"},
"spec": {
"egressSgRules": [],
"ingressSgRules": [],
"networkpolicy_spec": {
"ingress": [
{"from": [
{"namespaceSelector": {
"matchLabels": {"name": "dev"}}}],
"ports": [
{"port": 6379,
"protocol": "TCP"}]}],
"podSelector": {"matchLabels": {"app": "demo"}},
"policyTypes": ["Ingress"]},
"podSelector": {"matchLabels": {"app": "demo"}},
"securityGroupId": '36923e76-026c-422b-8dfd-7292e7c88228'}}
def get_crd_obj_with_all_selectors():
return {
"kind": "KuryrNetPolicy",
"metadata": {"name": "np-test-network-policy",
"namespace": "default"},
"spec": {
"egressSgRules": [],
"ingressSgRules": [],
"networkpolicy_spec": {
"ingress": [
{"from": [
{"namespaceSelector": {
"matchLabels": {"name": "dev"}},
"podSelector": {
"matchLabels": {"tier": "backend"}}}],
"ports": [
{"port": 6379,
"protocol": "TCP"}]}],
"podSelector": {"matchLabels": {"app": "demo"}},
"policyTypes": ["Ingress"]},
"podSelector": {"matchLabels": {"app": "demo"}},
"securityGroupId": '36923e76-026c-422b-8dfd-7292e7c88228'}}
class TestNamespacePodSecurityGroupsDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.controller.drivers.'
'namespace_security_groups._get_net_crd')
@mock.patch('kuryr_kubernetes.config.CONF')
def test_get_security_groups(self, m_cfg, m_get_crd):
cls = namespace_security_groups.NamespacePodSecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
pod = get_pod_obj()
project_id = mock.sentinel.project_id
sg_list = [mock.sentinel.sg_id]
m_cfg.neutron_defaults.pod_security_groups = sg_list
sg_id = mock.sentinel.sg_id
extra_sg = mock.sentinel.extra_sg
net_crd = {
'spec': {
'sgId': sg_id
}
}
m_get_crd.return_value = net_crd
m_driver._get_extra_sg.return_value = [extra_sg]
ret = cls.get_security_groups(m_driver, pod, project_id)
expected_sg = [str(sg_id), str(extra_sg), sg_list[0]]
self.assertEqual(ret, expected_sg)
m_get_crd.assert_called_once_with(pod['metadata']['namespace'])
def test_create_namespace_sg(self):
cls = namespace_security_groups.NamespacePodSecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
sg = munch.Munch({'id': mock.sentinel.sg})
subnet_cidr = mock.sentinel.subnet_cidr
crd_spec = {'subnetCIDR': subnet_cidr}
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.create_security_group.return_value = sg
create_sg_resp = cls.create_namespace_sg(m_driver, namespace,
project_id, crd_spec)
self.assertEqual(create_sg_resp, {'sgId': sg['id']})
os_net.create_security_group.assert_called_once()
os_net.create_security_group_rule.assert_called_once()
def test_create_namespace_sg_exception(self):
cls = namespace_security_groups.NamespacePodSecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
subnet_cidr = mock.sentinel.subnet_cidr
crd_spec = {
'subnetCIDR': subnet_cidr
}
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.create_security_group.side_effect = os_exc.SDKException
self.assertRaises(os_exc.SDKException, cls.create_namespace_sg,
m_driver, namespace, project_id, crd_spec)
os_net.create_security_group.assert_called_once()
os_net.create_security_group_rule.assert_not_called()
def test_delete_sg(self):
cls = namespace_security_groups.NamespacePodSecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
os_net = self.useFixture(k_fix.MockNetworkClient()).client
sg_id = mock.sentinel.sg_id
cls.delete_sg(m_driver, sg_id)
os_net.delete_security_group.assert_called_once_with(sg_id)
def test_delete_sg_exception(self):
cls = namespace_security_groups.NamespacePodSecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
os_net = self.useFixture(k_fix.MockNetworkClient()).client
sg_id = mock.sentinel.sg_id
os_net.delete_security_group.side_effect = os_exc.SDKException
self.assertRaises(os_exc.SDKException, cls.delete_sg, m_driver, sg_id)
os_net.delete_security_group.assert_called_once_with(sg_id)

View File

@ -60,10 +60,6 @@ class TestNamespaceHandler(test_base.TestCase):
self._handler._drv_subnets.create_namespace_network)
self._delete_namespace_subnet = (
self._handler._drv_subnets.delete_namespace_subnet)
self._create_namespace_sg = (
self._handler._drv_sg.create_namespace_sg)
self._delete_sg = (
self._handler._drv_sg.delete_sg)
self._delete_namespace_sg_rules = (
self._handler._drv_sg.delete_namespace_sg_rules)
self._cleanup_namespace_networks = (
@ -92,7 +88,6 @@ class TestNamespaceHandler(test_base.TestCase):
'routerId': mock.sentinel.router_id,
'netId': mock.sentinel.net_id,
'subnetId': mock.sentinel.subnet_id,
'sgId': mock.sentinel.sg_id,
}
}
return crd
@ -128,8 +123,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
self._create_namespace_sg.return_value = {'test_sg': 'uuid'}
net_crd_spec = {'test_net': 'uuid', 'test_sg': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.return_value = net_crd
namespace.NamespaceHandler.on_present(self._handler, self._namespace)
@ -140,8 +134,6 @@ class TestNamespaceHandler(test_base.TestCase):
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
@ -173,36 +165,13 @@ class TestNamespaceHandler(test_base.TestCase):
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_not_called()
self._set_net_crd.assert_not_called()
def test_on_present_create_sg_exception(self):
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
self._create_namespace_sg.side_effect = (
n_exc.NeutronClientException)
self.assertRaises(n_exc.NeutronClientException,
namespace.NamespaceHandler.on_present,
self._handler, self._namespace)
self._get_net_crd_id.assert_called_once_with(self._namespace)
self._get_net_crd.assert_called_once_with(self._crd_id)
self._cleanup_namespace_networks.assert_called_once_with(
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id, {'test_net': 'uuid'})
self._set_net_crd.assert_not_called()
def test_on_present_add_kuryrnet_crd_exception(self):
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
self._create_namespace_sg.return_value = {'sgId': 'uuid'}
net_crd_spec = {'test_net': 'uuid', 'sgId': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.ResourceNotReady,
@ -215,8 +184,6 @@ class TestNamespaceHandler(test_base.TestCase):
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_not_called()
@ -228,8 +195,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
self._create_namespace_sg.return_value = {'sgId': 'uuid'}
net_crd_spec = {'test_net': 'uuid', 'sgId': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.return_value = net_crd
self._set_net_crd.side_effect = k_exc.K8sClientException
@ -245,8 +211,6 @@ class TestNamespaceHandler(test_base.TestCase):
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
@ -258,8 +222,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
self._create_namespace_sg.return_value = {'sgId': 'uuid'}
net_crd_spec = {'test_net': 'uuid', 'sgId': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.return_value = net_crd
self._set_net_crd.side_effect = k_exc.K8sClientException
self._rollback_network_resources.side_effect = (
@ -275,8 +238,6 @@ class TestNamespaceHandler(test_base.TestCase):
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
@ -288,8 +249,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._get_net_crd_id.return_value = None
self._get_net_crd.return_value = None
self._create_namespace_network.return_value = {'test_net': 'uuid'}
self._create_namespace_sg.return_value = {'sgId': 'uuid'}
net_crd_spec = {'test_net': 'uuid', 'sgId': 'uuid'}
net_crd_spec = {'test_net': 'uuid'}
self._add_kuryrnet_crd.return_value = net_crd
self._set_net_crd.side_effect = k_exc.K8sClientException
self._del_kuryrnet_crd.side_effect = k_exc.K8sClientException
@ -304,8 +264,6 @@ class TestNamespaceHandler(test_base.TestCase):
self._namespace_name)
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
@ -328,7 +286,6 @@ class TestNamespaceHandler(test_base.TestCase):
self._delete_network_pools.assert_called_once_with(
net_crd['spec']['netId'])
self._delete_namespace_subnet.assert_called_once_with(net_crd)
self._delete_sg.assert_called_once_with(net_crd['spec']['sgId'])
self._del_kuryrnet_crd.assert_called_once_with(self._crd_id)
def test_on_deleted_missing_crd_annotation(self):
@ -340,7 +297,6 @@ class TestNamespaceHandler(test_base.TestCase):
self._get_net_crd.assert_not_called()
self._delete_network_pools.assert_not_called()
self._delete_namespace_subnet.assert_not_called()
self._delete_sg.assert_not_called()
self._del_kuryrnet_crd.assert_not_called()
def test_on_deleted_k8s_exception(self):

View File

@ -65,12 +65,10 @@ kuryr_kubernetes.controller.drivers.service_subnets =
kuryr_kubernetes.controller.drivers.pod_security_groups =
default = kuryr_kubernetes.controller.drivers.default_security_groups:DefaultPodSecurityGroupsDriver
namespace = kuryr_kubernetes.controller.drivers.namespace_security_groups:NamespacePodSecurityGroupsDriver
policy = kuryr_kubernetes.controller.drivers.network_policy_security_groups:NetworkPolicySecurityGroupsDriver
kuryr_kubernetes.controller.drivers.service_security_groups =
default = kuryr_kubernetes.controller.drivers.default_security_groups:DefaultServiceSecurityGroupsDriver
namespace = kuryr_kubernetes.controller.drivers.namespace_security_groups:NamespaceServiceSecurityGroupsDriver
policy = kuryr_kubernetes.controller.drivers.network_policy_security_groups:NetworkPolicyServiceSecurityGroupsDriver
kuryr_kubernetes.controller.drivers.network_policy =