Fix calico regression issue caused by default ipv4pool change
With I13aa0c58bf168bc069edf1d5c0187f89011fffdb, we missed to update the default value of pods_network_cidr. As a result, there is a mismatch between the calico_ipv4pool and the cidr configured in kubernetes (kube-proxy and kube-controller-mananer). The mismatch will cause some connection issues between pods/nodes. This patch fixes it. Task: 39153 Story: 2007426 Change-Id: Ic560322f5009f28e7e72704508705c1572a9262d
This commit is contained in:
parent
ce70da25ad
commit
529b036e78
|
@ -55,7 +55,7 @@ class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||
cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
|
||||
if cluster_template.network_driver == 'calico':
|
||||
extra_params["pods_network_cidr"] = \
|
||||
cluster.labels.get('calico_ipv4pool', '192.168.0.0/16')
|
||||
cluster.labels.get('calico_ipv4pool', '10.100.0.0/16')
|
||||
|
||||
label_list = ['coredns_tag',
|
||||
'kube_tag', 'container_infra_prefix',
|
||||
|
|
|
@ -55,7 +55,7 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||
cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
|
||||
if cluster_template.network_driver == 'calico':
|
||||
extra_params["pods_network_cidr"] = \
|
||||
cluster.labels.get('calico_ipv4pool', '192.168.0.0/16')
|
||||
cluster.labels.get('calico_ipv4pool', '10.100.0.0/16')
|
||||
|
||||
# check cloud provider and cinder options. If cinder is selected,
|
||||
# the cloud provider needs to be enabled.
|
||||
|
|
Loading…
Reference in New Issue