[fedora-atomic][k8s][security] More strict security group rules

Security is always the key area we'd like to improve for Magnum. This patch
is adding more strict security group rules for k8s cluster to limit
the access from outside.

Task: 36258
Story: 2006391

Change-Id: I8e7eee920a2de2153954d7ae33565491f9286122
This commit is contained in:
Feilong Wang 2019-08-12 12:07:47 +12:00
parent 6212fc974d
commit 0a595b05ab
9 changed files with 361 additions and 79 deletions

View File

@ -94,3 +94,21 @@ def get_fixed_network_name(context, network):
target='name', external=False)
else:
return network
def get_subnet_cidr(context, subnet):
n_client = clients.OpenStackClients(context).neutron()
source = "id" if uuidutils.is_uuid_like(subnet) else "name"
filter = {source: subnet}
subnets = n_client.list_subnets(**filter).get('subnets', [])
if len(subnets) == 0:
raise exception.ObjectNotFound(name=subnet)
if len(subnets) > 1:
raise exception.Conflict(
"Multiple subnets exist with same name '%s'. Please use the "
"subnet ID instead." % subnet
)
return subnets[0]['cidr']

View File

@ -16,6 +16,7 @@ from oslo_log import log as logging
from oslo_utils import strutils
from magnum.common import exception
from magnum.common import neutron
from magnum.common.x509 import operations as x509
from magnum.conductor.handlers.common import cert_manager
from magnum.drivers.heat import k8s_template_def
@ -119,6 +120,7 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
extra_params['master_image'] = cluster_template.image_id
extra_params['minion_image'] = cluster_template.image_id
extra_params['master_lb_enabled'] = cluster_template.master_lb_enabled
label_list = ['coredns_tag',
'kube_tag', 'container_infra_prefix',
@ -163,12 +165,21 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
self._set_cert_manager_params(cluster, extra_params)
self._get_keystone_auth_default_policy(extra_params)
self._set_fixed_network_cidr(context, cluster,
cluster_template, extra_params)
return super(K8sFedoraTemplateDefinition,
self).get_params(context, cluster_template, cluster,
extra_params=extra_params,
**kwargs)
def _set_fixed_network_cidr(self, context, cluster,
cluster_template, extra_params):
subnet = cluster.fixed_subnet or cluster_template.fixed_subnet
if subnet:
extra_params['fixed_network_cidr'] = \
neutron.get_subnet_cidr(context, subnet)
def _set_cert_manager_params(self, cluster, extra_params):
cert_manager_api = cluster.labels.get('cert_manager_api')
if strutils.bool_from_string(cert_manager_api):

View File

@ -677,6 +677,16 @@ parameters:
default:
true
master_lb_enabled:
type: boolean
description: >
true if the master load balancer is enabled
conditions:
master_lb_enabled: {equals: [{get_param: master_lb_enabled}, True]}
resources:
######################################################################
@ -718,87 +728,177 @@ resources:
# sorts.
#
secgroup_kube_master:
secgroup_kube_cluster:
type: OS::Neutron::SecurityGroup
properties:
rules:
- protocol: icmp
#description: ICMP from PRIVATE_NET_CIDR, so that users can ping or trace routes to their master and worker nodes
remote_ip_prefix: {get_param: fixed_network_cidr}
remote_mode: 'remote_ip_prefix'
- protocol: tcp
port_range_min: 22
port_range_max: 22
#description: Pod network -> all TCP from CALICO_IPV4POOL_CIDR must be available to all of cluster
remote_ip_prefix: {get_param: calico_ipv4pool}
remote_mode: 'remote_ip_prefix'
- protocol: udp
#description: Pod network -> all UDP from CALICO_IPV4POOL_CIDR must be available to all of cluster
remote_ip_prefix: {get_param: calico_ipv4pool}
remote_mode: 'remote_ip_prefix'
- protocol: tcp
port_range_min: 7080
port_range_max: 7080
#description: Pod network -> all TCP from FLANNEL_NETWORK_CIDR must be available to all of cluster
remote_ip_prefix: {get_param: flannel_network_cidr}
remote_mode: 'remote_ip_prefix'
- protocol: udp
#description: Pod network -> all UDP from FLANNEL_NETWORK_CIDR must be available to all of cluster
remote_ip_prefix: {get_param: flannel_network_cidr}
remote_mode: 'remote_ip_prefix'
- protocol: tcp
port_range_min: 8080
port_range_max: 8080
- protocol: tcp
port_range_min: 2379
port_range_max: 2379
- protocol: tcp
port_range_min: 2380
port_range_max: 2380
- protocol: tcp
port_range_min: 6443
port_range_max: 6443
- protocol: tcp
port_range_min: 9100
port_range_max: 9100
- protocol: tcp
port_range_min: 10250
port_range_max: 10250
#description: Cluster IP network -> all TCP from CLUSTER_IP_CIDR must be available to all of cluster
remote_ip_prefix: {get_param: portal_network_cidr}
remote_mode: 'remote_ip_prefix'
- protocol: udp
#description: Cluster IP network -> all UDP from CLUSTER_IP_CIDR must be available to all of cluster
remote_ip_prefix: {get_param: portal_network_cidr}
remote_mode: 'remote_ip_prefix'
- protocol: tcp
#description: Allow users to interact with NodePorts from their private network
port_range_min: 30000
port_range_max: 32767
remote_ip_prefix: {get_param: fixed_network_cidr}
remote_mode: 'remote_ip_prefix'
- protocol: udp
port_range_min: 8472
port_range_max: 8472
#description: Allow users to interact with NodePorts from their private network
port_range_min: 30000
port_range_max: 32767
remote_ip_prefix: {get_param: fixed_network_cidr}
remote_mode: 'remote_ip_prefix'
secgroup_rule_kube_cluster_ssh:
type: OS::Neutron::SecurityGroupRule
properties:
protocol: tcp
description: SSH from PRIVATE_NET_CIDR, so that users can SSH to their cluster nodes. In future we may want to remove it or limit to worker only users can add explicit rule if wider access required
port_range_min: 22
port_range_max: 22
security_group: {get_resource: secgroup_kube_cluster}
remote_group: {get_resource: secgroup_kube_cluster}
secgroup_rule_kube_cluster_felix_healthz:
type: OS::Neutron::SecurityGroupRule
properties:
description: Felix HealthZ -> Remote group cluster wide - could be useful for users specific monitoring
protocol: tcp
port_range_min: 9099
port_range_max: 9099
security_group: {get_resource: secgroup_kube_cluster}
remote_group: {get_resource: secgroup_kube_cluster}
secgroup_rule_kube_cluster_node_exporter:
type: OS::Neutron::SecurityGroupRule
properties:
protocol: tcp
description: Prometheus node exporter in each node providing metrics -> remote group cluster wide runs in privileged mode so appears in host namespace
port_range_min: 9100
port_range_max: 9100
security_group: {get_resource: secgroup_kube_cluster}
remote_group: {get_resource: secgroup_kube_cluster}
secgroup_rule_kube_cluster_kubelet_api_healthz:
type: OS::Neutron::SecurityGroupRule
properties:
protocol: tcp
description: Kubelet API (not HealthZ) -> remote group cluster wide - used for communicaiton in the control plane
port_range_min: 10250
port_range_max: 10250
security_group: {get_resource: secgroup_kube_cluster}
remote_group: {get_resource: secgroup_kube_cluster}
secgroup_rule_kube_cluster_kube_proxy_healthz:
type: OS::Neutron::SecurityGroupRule
properties:
protocol: tcp
description: kube proxy healthz -> remote group cluster wide - could be used for independent health checking
port_range_min: 10256
port_range_max: 10256
security_group: {get_resource: secgroup_kube_cluster}
remote_group: {get_resource: secgroup_kube_cluster}
secgroup_rule_kube_cluster_calico_bird_bgp:
type: OS::Neutron::SecurityGroupRule
properties:
protocol: tcp
description: Calico BIRD BGP mesh -> remote group cluster wide - communicates route updates for POD CIDR
port_range_min: 179
port_range_max: 179
security_group: {get_resource: secgroup_kube_cluster}
remote_group: {get_resource: secgroup_kube_cluster}
secgroup_kube_master:
type: OS::Neutron::SecurityGroup
properties:
rules:
- protocol: tcp
#description: etcd replication needs to be visible to master nodes -> from group master-ID
port_range_min: 2380
port_range_max: 2380
remote_mode: 'remote_group_id'
remote_group_id: {get_resource: secgroup_kube_cluster}
- protocol: tcp
#description: etcd client all of cluster -> from PRIVATE_NET_CIDR (Currently CALICO is talking directly to etcd, but it will be changed in the future to talk to KubeAPI. When this happens, then this rule can be changed to from master-group). PRIVATE_NET_CIDR is required so that kubelets can talk to etcd
port_range_min: 2379
port_range_max: 2379
remote_ip_prefix: {get_param: fixed_network_cidr}
remote_mode: 'remote_ip_prefix'
- protocol: tcp
#description: Scheduler HealthZ (deprecated but still active) -> remote group cluster wide. We are working under the assumption that the APIs bind to these ports to provide HealtZ infomation only, and NOT other direct operations (bypassing the Kube API)
port_range_min: 10251
port_range_max: 10251
remote_mode: 'remote_group_id'
remote_group_id: {get_resource: secgroup_kube_cluster}
- protocol: tcp
#description: Controller manager healthz from remote group cluster wide
port_range_min: 10252
port_range_max: 10252
remote_mode: 'remote_group_id'
remote_group_id: {get_resource: secgroup_kube_cluster}
- protocol: tcp
#description: Openstack cloud controller from remote group cluster wide
port_range_min: 10253
port_range_max: 10253
remote_mode: 'remote_group_id'
remote_group_id: {get_resource: secgroup_kube_cluster}
secgroup_rule_kube_master_api_access:
condition: master_lb_enabled
type: OS::Neutron::SecurityGroupRule
properties:
protocol: tcp
description: k8s public API from PRIVATE_NET_CIDR
port_range_min: 6443
port_range_max: 6443
remote_ip_prefix: {get_param: fixed_network_cidr}
remote_mode: 'remote_ip_prefix'
security_group: {get_resource: secgroup_kube_master}
secgroup_kube_minion:
type: OS::Neutron::SecurityGroup
properties:
rules:
- protocol: icmp
# Default port range for external service ports.
# In future, if the option `manage-security-groups` for ccm works
# well, we could remove this rule here.
# The PR in ccm is
# https://github.com/kubernetes/cloud-provider-openstack/pull/491
- protocol: tcp
port_range_min: 22
port_range_max: 22
- protocol: tcp
port_range_min: 30000
port_range_max: 32767
# allow any traffic from master nodes
- protocol: tcp
port_range_min: 1
port_range_max: 65535
#description: For kube-proxy. Allow users to use the kubeproxy from -> remote group cluster wide. When users want to use kubeproxy, they connect to the master API port (6443) and the API proxies the connection to the appropriate node on port 8443 (internal to the cluster)
port_range_min: 8443
port_range_max: 8443
remote_mode: 'remote_group_id'
remote_group_id: {get_resource: secgroup_kube_master}
remote_group_id: {get_resource: secgroup_kube_cluster}
- protocol: tcp
#description: All of cluster group TCP traffic to worker group
remote_mode: 'remote_group_id'
remote_group_id: {get_resource: secgroup_kube_cluster}
- protocol: udp
port_range_min: 1
port_range_max: 65535
#description: All of cluster group UDP traffic to worker group
remote_mode: 'remote_group_id'
remote_group_id: {get_resource: secgroup_kube_master}
remote_group_id: {get_resource: secgroup_kube_cluster}
# allow any traffic between worker nodes
secgroup_rule_tcp_kube_minion:
type: OS::Neutron::SecurityGroupRule
properties:
protocol: tcp
port_range_min: 1
port_range_max: 65535
security_group: {get_resource: secgroup_kube_minion}
remote_group: {get_resource: secgroup_kube_minion}
secgroup_rule_udp_kube_minion:
type: OS::Neutron::SecurityGroupRule
properties:
protocol: udp
port_range_min: 1
port_range_max: 65535
security_group: {get_resource: secgroup_kube_minion}
remote_group: {get_resource: secgroup_kube_minion}
######################################################################
#
@ -910,6 +1010,7 @@ resources:
influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled}
verify_ca: {get_param: verify_ca}
secgroup_kube_master_id: {get_resource: secgroup_kube_master}
secgroup_kube_cluster_id: {get_resource: secgroup_kube_cluster}
http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy}
@ -1084,6 +1185,7 @@ resources:
tls_disabled: {get_param: tls_disabled}
verify_ca: {get_param: verify_ca}
secgroup_kube_minion_id: {get_resource: secgroup_kube_minion}
secgroup_kube_cluster_id: {get_resource: secgroup_kube_cluster}
http_proxy: {get_param: http_proxy}
https_proxy: {get_param: https_proxy}
no_proxy: {get_param: no_proxy}

View File

@ -190,6 +190,10 @@ parameters:
type: string
description: ID of the security group for kubernetes master.
secgroup_kube_cluster_id:
type: string
description: ID of the security group for kubernetes cluster wide.
api_pool_id:
type: string
description: ID of the load balancer pool of k8s API server.
@ -708,6 +712,7 @@ resources:
network: {get_param: fixed_network}
security_groups:
- {get_param: secgroup_kube_master_id}
- {get_param: secgroup_kube_cluster_id}
fixed_ips:
- subnet: {get_param: fixed_subnet}
allowed_address_pairs:

View File

@ -146,6 +146,10 @@ parameters:
type: string
description: ID of the security group for kubernetes minion.
secgroup_kube_cluster_id:
type: string
description: ID of the security group for kubernetes cluster wide.
volume_driver:
type: string
description: volume driver to use for container storage
@ -430,7 +434,8 @@ resources:
properties:
network: {get_param: fixed_network}
security_groups:
- get_param: secgroup_kube_minion_id
- {get_param: secgroup_kube_minion_id}
- {get_param: secgroup_kube_cluster_id}
fixed_ips:
- subnet: {get_param: fixed_subnet}
allowed_address_pairs:

View File

@ -272,3 +272,76 @@ class NeutronTest(base.TestCase):
self.context,
another_fake_id
)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_subnet_cidr(self, mock_clients):
fake_name = "fake_subnet"
fake_id_1 = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
cidr = "10.0.0.0/24"
mock_nclient = mock.MagicMock()
mock_nclient.list_subnets.return_value = {
'subnets': [
{
'id': fake_id_1,
'name': fake_name,
'cidr': cidr
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertEqual(neutron.get_subnet_cidr(self.context, "fake_name"),
cidr)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_subnet_cidr_duplicated(self, mock_clients):
fake_name = "fake_subnet"
fake_id_1 = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
fake_id_2 = "26ab2cae-12df-4c0e-8e87-3d8a7eb62930"
mock_nclient = mock.MagicMock()
mock_nclient.list_subnets.return_value = {
'subnets': [
{
'id': fake_id_1,
'name': fake_name,
'cidr': '10.0.0.0/24'
},
{
'id': fake_id_2,
'name': fake_name,
'cidr': '10.0.10.0/24'
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertRaises(
exception.Conflict,
neutron.get_subnet_cidr,
self.context,
"fake_subnet"
)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_subnet_cidr_not_found(self, mock_clients):
mock_nclient = mock.MagicMock()
mock_nclient.list_subnets.return_value = {
'subnets': []
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertRaises(
exception.ObjectNotFound,
neutron.get_subnet_cidr,
self.context,
"fake_subnet"
)

View File

@ -176,6 +176,7 @@ class TestClusterConductorWithK8s(base.TestCase):
self.mock_enable_octavia.return_value = False
self.addCleanup(octavia_patcher.stop)
@patch('magnum.common.neutron.get_subnet_cidr')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@patch('magnum.objects.NodeGroup.list')
@ -190,11 +191,12 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_driver,
mock_objects_nodegroup_list,
mock_objects_cluster_template_get_by_uuid,
mock_get):
mock_get,
mock_get_subnet_cidr):
self._test_extract_template_definition(
mock_generate_csr_and_key, mock_sign_node_certificate,
mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get,
mock_objects_nodegroup_list)
mock_objects_nodegroup_list, mock_get_subnet_cidr)
def _test_extract_template_definition(
self,
@ -204,6 +206,7 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_objects_cluster_template_get_by_uuid,
mock_get,
mock_objects_nodegroup_list,
mock_get_subnet_cidr,
missing_attr=None):
if missing_attr in self.cluster_template_dict:
self.cluster_template_dict[missing_attr] = None
@ -215,6 +218,8 @@ class TestClusterConductorWithK8s(base.TestCase):
self.master_ng_dict['node_count'] = None
cluster_template = objects.ClusterTemplate(
self.context, **self.cluster_template_dict)
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
@ -349,7 +354,9 @@ class TestClusterConductorWithK8s(base.TestCase):
'max_node_count': 2,
'master_image': 'image_id',
'minion_image': 'image_id',
'keystone_auth_default_policy': self.keystone_auth_default_policy
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'fixed_network_cidr': fixed_network_cidr,
'master_lb_enabled': cluster_template.master_lb_enabled,
}
if missing_attr is not None:
expected.pop(mapping[missing_attr], None)
@ -372,6 +379,7 @@ class TestClusterConductorWithK8s(base.TestCase):
],
env_files)
@patch('magnum.common.neutron.get_subnet_cidr')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@patch('magnum.objects.NodeGroup.list')
@ -386,10 +394,13 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_driver,
mock_objects_nodegroup_list,
mock_objects_cluster_template_get_by_uuid,
mock_get):
mock_get,
mock_get_subnet_cidr):
self.cluster_template_dict['registry_enabled'] = True
cluster_template = objects.ClusterTemplate(
self.context, **self.cluster_template_dict)
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
@ -489,7 +500,9 @@ class TestClusterConductorWithK8s(base.TestCase):
'max_node_count': 2,
'master_image': 'image_id',
'minion_image': 'image_id',
'keystone_auth_default_policy': self.keystone_auth_default_policy
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'fixed_network_cidr': fixed_network_cidr,
'master_lb_enabled': cluster_template.master_lb_enabled,
}
self.assertEqual(expected, definition)
@ -503,6 +516,7 @@ class TestClusterConductorWithK8s(base.TestCase):
],
env_files)
@patch('magnum.common.neutron.get_subnet_cidr')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@patch('magnum.objects.NodeGroup.list')
@ -517,7 +531,8 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_driver,
mock_objects_nodegroup_list,
mock_objects_cluster_template_get_by_uuid,
mock_get):
mock_get,
mock_get_subnet_cidr):
not_required = ['image_id', 'flavor_id', 'dns_nameserver',
'docker_volume_size', 'http_proxy',
@ -530,6 +545,8 @@ class TestClusterConductorWithK8s(base.TestCase):
cluster_template = objects.ClusterTemplate(
self.context, **self.cluster_template_dict)
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
@ -611,7 +628,9 @@ class TestClusterConductorWithK8s(base.TestCase):
'max_node_count': 2,
'master_image': None,
'minion_image': None,
'keystone_auth_default_policy': self.keystone_auth_default_policy
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'fixed_network_cidr': fixed_network_cidr,
'master_lb_enabled': cluster_template.master_lb_enabled,
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -834,6 +853,7 @@ class TestClusterConductorWithK8s(base.TestCase):
],
env_files)
@patch('magnum.common.neutron.get_subnet_cidr')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@patch('magnum.objects.NodeGroup.list')
@ -848,7 +868,8 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_driver,
mock_objects_nodegroup_list,
mock_objects_cluster_template_get_by_uuid,
mock_get):
mock_get,
mock_get_subnet_cidr):
mock_driver.return_value = k8s_dr.Driver()
self._test_extract_template_definition(
mock_generate_csr_and_key,
@ -857,8 +878,10 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_objects_cluster_template_get_by_uuid,
mock_get,
mock_objects_nodegroup_list,
mock_get_subnet_cidr,
missing_attr='dns_nameserver')
@patch('magnum.common.neutron.get_subnet_cidr')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@patch('magnum.objects.NodeGroup.list')
@ -873,7 +896,8 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_driver,
mock_objects_nodegroup_list,
mock_objects_cluster_template_get_by_uuid,
mock_get):
mock_get,
mock_get_subnet_cidr):
mock_driver.return_value = k8s_dr.Driver()
self._test_extract_template_definition(
mock_generate_csr_and_key,
@ -882,8 +906,10 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_objects_cluster_template_get_by_uuid,
mock_get,
mock_objects_nodegroup_list,
mock_get_subnet_cidr,
missing_attr='image_id')
@patch('magnum.common.neutron.get_subnet_cidr')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@patch('magnum.objects.NodeGroup.list')
@ -898,7 +924,8 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_driver,
mock_objects_nodegroup_list,
mock_objects_cluster_template_get_by_uuid,
mock_get):
mock_get,
mock_get_subnet_cidr):
mock_driver.return_value = k8s_dr.Driver()
self._test_extract_template_definition(
mock_generate_csr_and_key,
@ -907,8 +934,10 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_objects_cluster_template_get_by_uuid,
mock_get,
mock_objects_nodegroup_list,
mock_get_subnet_cidr,
missing_attr='docker_storage_driver')
@patch('magnum.common.neutron.get_subnet_cidr')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@patch('magnum.objects.NodeGroup.list')
@ -923,7 +952,8 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_driver,
mock_objects_nodegroup_list,
mock_objects_cluster_template_get_by_uuid,
mock_get):
mock_get,
mock_get_subnet_cidr):
mock_driver.return_value = k8s_dr.Driver()
self._test_extract_template_definition(
mock_generate_csr_and_key,
@ -932,8 +962,10 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_objects_cluster_template_get_by_uuid,
mock_get,
mock_objects_nodegroup_list,
mock_get_subnet_cidr,
missing_attr='apiserver_port')
@patch('magnum.common.neutron.get_subnet_cidr')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@patch('magnum.objects.NodeGroup.list')
@ -948,9 +980,12 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_driver,
mock_objects_nodegroup_list,
mock_objects_cluster_template_get_by_uuid,
reqget):
reqget,
mock_get_subnet_cidr):
cluster_template = objects.ClusterTemplate(
self.context, **self.cluster_template_dict)
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
@ -1044,7 +1079,9 @@ class TestClusterConductorWithK8s(base.TestCase):
'max_node_count': 2,
'master_image': 'image_id',
'minion_image': 'image_id',
'keystone_auth_default_policy': self.keystone_auth_default_policy
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'fixed_network_cidr': fixed_network_cidr,
'master_lb_enabled': cluster_template.master_lb_enabled,
}
self.assertEqual(expected, definition)
self.assertEqual(

View File

@ -388,6 +388,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
expected_scale_params = {'minions_to_remove': ['node1', 'node2']}
self.assertEqual(scale_params, expected_scale_params)
@mock.patch('magnum.common.neutron.get_subnet_cidr')
@mock.patch('magnum.common.neutron.get_fixed_network_name')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@ -405,7 +406,10 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
mock_get_output, mock_get_params,
mock_get_discovery_url, mock_osc_class,
mock_enable_octavia,
mock_get_fixed_network_name):
mock_get_fixed_network_name,
mock_get_subnet_cidr):
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
@ -627,6 +631,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'kube_version': kube_tag,
'master_kube_tag': kube_tag,
'minion_kube_tag': kube_tag,
'fixed_network_cidr': fixed_network_cidr,
'master_lb_enabled': mock_cluster_template.master_lb_enabled,
}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,
@ -653,6 +659,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
mock_cluster_template.fixed_network
)
@mock.patch('magnum.common.neutron.get_subnet_cidr')
@mock.patch('magnum.common.neutron.get_external_network_id')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@ -670,7 +677,10 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
mock_get_discovery_url,
mock_osc_class,
mock_enable_octavia,
mock_get_external_network_id):
mock_get_external_network_id,
mock_get_subnet_cidr):
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
@ -713,6 +723,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
mock_cluster_template.external_network_id
)
@mock.patch('magnum.common.neutron.get_subnet_cidr')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def'
@ -728,7 +739,10 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
mock_get_params,
mock_get_discovery_url,
mock_osc_class,
mock_enable_octavia):
mock_enable_octavia,
mock_get_subnet_cidr):
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
@ -767,6 +781,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
mock_cluster,
)
@mock.patch('magnum.common.neutron.get_subnet_cidr')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def'
@ -782,7 +797,10 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
mock_get_params,
mock_get_discovery_url,
mock_osc_class,
mock_enable_octavia):
mock_enable_octavia,
mock_get_subnet_cidr):
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
@ -820,6 +838,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
actual_params.get("ingress_controller")
)
@mock.patch('magnum.common.neutron.get_subnet_cidr')
@mock.patch('magnum.common.keystone.is_octavia_enabled')
@mock.patch('magnum.common.clients.OpenStackClients')
@mock.patch('magnum.drivers.heat.template_def'
@ -835,10 +854,13 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
mock_sign_node_certificate,
mock_get_output, mock_get_params,
mock_get_discovery_url, mock_osc_class,
mock_enable_octavia):
mock_enable_octavia,
mock_get_subnet_cidr):
mock_generate_csr_and_key.return_value = {'csr': 'csr',
'private_key': 'private_key',
'public_key': 'public_key'}
fixed_network_cidr = '10.0.10.0/24'
mock_get_subnet_cidr.return_value = fixed_network_cidr
mock_sign_node_certificate.return_value = 'signed_cert'
mock_enable_octavia.return_value = False
mock_context = mock.MagicMock()
@ -1057,6 +1079,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'kube_version': kube_tag,
'master_kube_tag': kube_tag,
'minion_kube_tag': kube_tag,
'fixed_network_cidr': fixed_network_cidr,
'master_lb_enabled': mock_cluster_template.master_lb_enabled,
}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,

View File

@ -0,0 +1,7 @@
---
security:
- |
Security is always the key area we'd like to improve for Magnum. This patch
is adding more strict security group rules for k8s cluster to limit
the access from outside. For example, any node of the cluster will only be
allowed to ssh from the same private network.