Narrow connection to the cluster only on namespaceSelector.

For simple case, when operator wants to open connection to all the
namespaces in the cluster, i.e.:

kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
  name: networkpolicy-example
spec:
  podSelector: {}
  policyTypes:
  - Egress
  - Ingress
  egress:
  - to:
    - namespaceSelector: {}

there was false assumption, that we need to open it without any
restriction, while the truth is, that all we need to do is to open
egress network to all the namespaces within cluster.

Change-Id: Ibea039fa9c3b46b83e99237ce2ceb03f02d50727
Closes-Bug: 1915008
This commit is contained in:
Roman Dobosz 2021-02-04 16:46:06 +01:00
parent 2611dc3b3a
commit 4ba363d6c8
3 changed files with 76 additions and 62 deletions

View File

@ -247,7 +247,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
def _parse_selectors(self, rule_block, rule_direction, policy_namespace):
allowed_resources = []
allow_all = False
allowed_cidrs = None
selectors = False
for rule in rule_block.get(rule_direction, []):
namespace_selector = rule.get('namespaceSelector')
@ -259,8 +259,13 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
allowed_resources.extend(self._get_pods(
pod_selector))
else:
# allow from all
allow_all = True
# allow from all the cluster, which means pod subnets and
# service subnet.
allowed_cidrs = utils.get_subnetpool_cidrs(
CONF.namespace_subnet.pod_subnet_pool)
if CONF.octavia_defaults.enforce_sg_rules:
allowed_cidrs.append(utils.get_subnet_cidr(
CONF.neutron_defaults.service_subnet))
elif namespace_selector:
selectors = True
if pod_selector:
@ -288,10 +293,10 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
pod_selector,
namespace=policy_namespace))
return allow_all, selectors, allowed_resources
return allowed_cidrs, selectors, allowed_resources
def _create_sg_rules_with_container_ports(
self, container_ports, allow_all, resource, matched_pods,
self, container_ports, allowed_cidrs, resource, matched_pods,
crd_rules, direction, port, pod_selector=None,
policy_namespace=None):
cidr, ns = self._get_resource_details(resource)
@ -312,7 +317,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
matched_pods[container_port].update(pod_info)
else:
matched_pods[container_port] = pod_info
if not allow_all and matched_pods and cidr:
if not allowed_cidrs and matched_pods and cidr:
for container_port, pods in matched_pods.items():
sg_rule = driver_utils.create_security_group_rule_body(
direction, container_port,
@ -328,7 +333,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
def _create_sg_rule_body_on_text_port(self, direction, port,
resources, crd_rules, pod_selector,
policy_namespace, allow_all=False):
policy_namespace,
allowed_cidrs=None):
"""Create SG rules when named port is used in the NP rule
In case of ingress, the pods selected by NetworkPolicySpec's
@ -347,8 +353,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
param crd_rules: list of parsed SG rules
param pod_selector: dict with NetworkPolicySpec's podSelector
param policy_namespace: string with policy namespace
param allow_all: True if should parse a allow from/to all rule,
False otherwise
param allowed_cidrs: None, or a list of cidrs, where/from the traffic
should be allowed.
"""
matched_pods = {}
if direction == "ingress":
@ -358,7 +364,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
container_ports = driver_utils.get_ports(selected_pod, port)
for resource in resources:
self._create_sg_rules_with_container_ports(
container_ports, allow_all, resource, matched_pods,
container_ports, allowed_cidrs, resource, matched_pods,
crd_rules, direction, port)
elif direction == "egress":
for resource in resources:
@ -370,16 +376,16 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
continue
container_ports = driver_utils.get_ports(resource, port)
self._create_sg_rules_with_container_ports(
container_ports, allow_all, resource, matched_pods,
container_ports, allowed_cidrs, resource, matched_pods,
crd_rules, direction, port, pod_selector,
policy_namespace)
if allow_all:
if allowed_cidrs:
for container_port, pods in matched_pods.items():
for ethertype in (constants.IPv4, constants.IPv6):
for cidr in allowed_cidrs:
sg_rule = driver_utils.create_security_group_rule_body(
direction, container_port,
protocol=port.get('protocol'),
ethertype=ethertype,
cidr=cidr,
pods=pods)
crd_rules.append(sg_rule)
@ -408,13 +414,20 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
def _create_all_pods_sg_rules(self, port, direction,
sg_rule_body_list, pod_selector,
policy_namespace):
if type(port.get('port')) is not int:
policy_namespace, allowed_cidrs=None):
if not isinstance(port.get('port'), int):
all_pods = driver_utils.get_namespaced_pods().get('items')
self._create_sg_rule_body_on_text_port(
direction, port, all_pods,
sg_rule_body_list, pod_selector, policy_namespace,
allow_all=True)
allowed_cidrs=allowed_cidrs)
elif allowed_cidrs:
for cidr in allowed_cidrs:
sg_rule = driver_utils.create_security_group_rule_body(
direction, port.get('port'),
protocol=port.get('protocol'),
cidr=cidr)
sg_rule_body_list.append(sg_rule)
else:
for ethertype in (constants.IPv4, constants.IPv6):
sg_rule = (
@ -445,9 +458,11 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
It accounts for special cases, such as:
- PolicyTypes stating only Egress: ensuring ingress is not restricted
- PolicyTypes not including Egress: ensuring egress is not restricted
- {} ingress/egress rules: applying default open for all
- {} ingress/egress rules: applying default open for all the cluster
"""
_create_sg_rule_body = driver_utils.create_security_group_rule_body
rule_list = policy['spec'].get(direction)
if not rule_list:
policy_types = policy['spec'].get('policyTypes')
if direction == 'ingress':
@ -480,15 +495,16 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
LOG.debug('Applying default all open policy from %s',
utils.get_res_link(policy))
for ethertype in (constants.IPv4, constants.IPv6):
rule = driver_utils.create_security_group_rule_body(
direction, ethertype=ethertype)
rule = _create_sg_rule_body(direction, ethertype=ethertype)
sg_rule_body_list.append(rule)
for rule_block in rule_list:
LOG.debug('Parsing %(dir)s Rule %(rule)s', {'dir': direction,
'rule': rule_block})
allow_all, selectors, allowed_resources = self._parse_selectors(
rule_block, rule_direction, policy_namespace)
(allowed_cidrs, selectors,
allowed_resources) = self._parse_selectors(rule_block,
rule_direction,
policy_namespace)
ipblock_list = []
@ -510,8 +526,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
if 'ports' in rule_block:
for port in rule_block['ports']:
if allowed_resources or allow_all or selectors:
if type(port.get('port')) is not int:
if allowed_resources or allowed_cidrs or selectors:
if not isinstance(port.get('port'), int):
self._create_sg_rule_body_on_text_port(
direction, port, allowed_resources,
sg_rule_body_list, pod_selector,
@ -520,15 +536,15 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
self._create_sg_rule_on_number_port(
allowed_resources, direction, port,
sg_rule_body_list, policy_namespace)
if allow_all:
if allowed_cidrs:
self._create_all_pods_sg_rules(
port, direction, sg_rule_body_list,
pod_selector, policy_namespace)
pod_selector, policy_namespace, allowed_cidrs)
else:
self._create_all_pods_sg_rules(
port, direction, sg_rule_body_list,
pod_selector, policy_namespace)
elif allowed_resources or allow_all or selectors:
elif allowed_resources or allowed_cidrs or selectors:
for resource in allowed_resources:
cidr, namespace = self._get_resource_details(resource)
# NOTE(maysams): Skipping resource that do not have
@ -536,24 +552,16 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
# will be triggered again after the resource is running.
if not cidr:
continue
rule = driver_utils.create_security_group_rule_body(
direction,
port_range_min=1,
port_range_max=65535,
cidr=cidr,
namespace=namespace)
rule = _create_sg_rule_body(direction, cidr=cidr,
namespace=namespace)
sg_rule_body_list.append(rule)
if direction == 'egress':
self._create_svc_egress_sg_rule(
policy_namespace, sg_rule_body_list,
resource=resource)
if allow_all:
for ethertype in (constants.IPv4, constants.IPv6):
rule = driver_utils.create_security_group_rule_body(
direction,
port_range_min=1,
port_range_max=65535,
ethertype=ethertype)
if allowed_cidrs:
for cidr in allowed_cidrs:
rule = _create_sg_rule_body(direction, cidr=cidr)
sg_rule_body_list.append(rule)
else:
LOG.debug('This network policy specifies no %(direction)s '

View File

@ -341,12 +341,8 @@ class TestNetworkPolicyDriver(test_base.TestCase):
self._driver._parse_network_policy_rules(policy)
m_get_namespaces.assert_called()
m_get_resource_details.assert_called()
calls = [mock.call('ingress', port_range_min=1,
port_range_max=65535, cidr=subnet_cidr,
namespace=namespace),
mock.call('egress', port_range_min=1,
port_range_max=65535, cidr=subnet_cidr,
namespace=namespace)]
calls = [mock.call('ingress', cidr=subnet_cidr, namespace=namespace),
mock.call('egress', cidr=subnet_cidr, namespace=namespace)]
m_create.assert_has_calls(calls)
@mock.patch.object(network_policy.NetworkPolicyDriver, 'namespaced_pods')
@ -430,6 +426,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
pod_selector = {}
namespace = mock.sentinel.namespace
direction = 'ingress'
cidrs = ['0.0.0.0/0']
m_get_pods.return_value = {'items': [pod]}
m_get_ports.return_value = container_ports
@ -440,7 +437,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
crd_rules,
pod_selector,
namespace,
allow_all=True)
allowed_cidrs=cidrs)
m_get_pods.assert_called_with(pod_selector, namespace)
m_get_ports.assert_called_with(pod, port)
@ -468,6 +465,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
pod_selector = {}
namespace = mock.sentinel.namespace
direction = 'ingress'
cidrs = ['0.0.0.0/0']
self._driver._create_sg_rules_with_container_ports = _create_sgr_cont
m_get_pods.return_value = {'items': [pod]}
@ -479,17 +477,15 @@ class TestNetworkPolicyDriver(test_base.TestCase):
crd_rules,
pod_selector,
namespace,
allow_all=True)
allowed_cidrs=cidrs)
m_get_pods.assert_called_with(pod_selector, namespace)
m_get_ports.assert_called_with(pod, port)
calls = [mock.call(direction, container_ports[0][1],
protocol=port['protocol'], ethertype=e,
pods='foo') for e in ('IPv4', 'IPv6')]
m_create_sgr.assert_has_calls(calls)
self.assertEqual(len(crd_rules), 2)
m_create_sgr.assert_called_once_with(direction, container_ports[0][1],
protocol=port['protocol'],
cidr=cidrs[0],
pods='foo')
@mock.patch.object(network_policy.NetworkPolicyDriver,
'_create_sg_rules_with_container_ports')
@ -533,6 +529,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
pod_selector = {}
namespace = mock.sentinel.namespace
direction = 'egress'
cidrs = ['0.0.0.0/0']
m_get_ports.return_value = container_ports
@ -542,7 +539,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
crd_rules,
pod_selector,
namespace,
allow_all=True)
allowed_cidrs=cidrs)
m_get_ports.assert_called_with(resources[0], port)
self.assertEqual(len(crd_rules), 0)
@ -571,6 +568,7 @@ class TestNetworkPolicyDriver(test_base.TestCase):
pod_selector = {}
namespace = mock.sentinel.namespace
direction = 'egress'
cidrs = ['0.0.0.0/0']
self._driver._create_sg_rules_with_container_ports = _create_sgr_cont
m_get_subnet_cidr.return_value = '10.0.0.128/26'
m_create_sgr.side_effect = [mock.sentinel.sgr1, mock.sentinel.sgr2,
@ -585,14 +583,12 @@ class TestNetworkPolicyDriver(test_base.TestCase):
crd_rules,
pod_selector,
namespace,
allow_all=True)
allowed_cidrs=cidrs)
m_get_ports.assert_called_with(resources[0], port)
calls = [mock.call(direction, container_ports[0][1],
protocol=port['protocol'], ethertype=e,
pods='foo') for e in ('IPv4', 'IPv6')]
m_create_sgr.assert_has_calls(calls)
self.assertEqual(len(crd_rules), 2)
m_create_sgr.assert_called_once_with(direction, container_ports[0][1],
protocol=port['protocol'],
cidr=cidrs[0], pods='foo')
def test__create_all_pods_sg_rules(self):
port = {'protocol': 'TCP', 'port': 22}

View File

@ -329,16 +329,26 @@ def get_subnets_cidrs(subnet_ids):
@MEMOIZE
def get_subnetpool_version(subnetpool_id):
def _get_subnetpool(subnetpool_id):
os_net = clients.get_network_client()
try:
subnetpool_obj = os_net.get_subnet_pool(subnetpool_id)
except os_exc.ResourceNotFound:
LOG.exception("Subnetpool %s not found!", subnetpool_id)
raise
return subnetpool_obj
def get_subnetpool_version(subnetpool_id):
subnetpool_obj = _get_subnetpool(subnetpool_id)
return subnetpool_obj.ip_version
def get_subnetpool_cidrs(subnetpool_id):
subnetpool_obj = _get_subnetpool(subnetpool_id)
return subnetpool_obj.prefixes
def extract_pod_annotation(annotation):
obj = objects.base.VersionedObject.obj_from_primitive(annotation)
# FIXME(dulek): This is code to maintain compatibility with Queens. We can