driver: refactor k8s templatedef
There is a lot of duplicated templatedef code which is living in two places. This refactors it to the parent class to make life much easier. Change-Id: Ie3e863440d73414224bb7dbb1ab05a5bcf0b74b1
This commit is contained in:
parent
74430a58e5
commit
e755a420f4
|
@ -26,106 +26,12 @@ CONF = cfg.CONF
|
|||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServerAddressOutputMapping(template_def.NodeGroupOutputMapping):
|
||||
|
||||
public_ip_output_key = None
|
||||
private_ip_output_key = None
|
||||
|
||||
def __init__(self, dummy_arg, nodegroup_attr=None, nodegroup_uuid=None):
|
||||
self.nodegroup_attr = nodegroup_attr
|
||||
self.nodegroup_uuid = nodegroup_uuid
|
||||
self.heat_output = self.public_ip_output_key
|
||||
self.is_stack_param = False
|
||||
|
||||
def set_output(self, stack, cluster_template, cluster):
|
||||
if not cluster_template.floating_ip_enabled:
|
||||
self.heat_output = self.private_ip_output_key
|
||||
|
||||
LOG.debug("Using heat_output: %s", self.heat_output)
|
||||
super(ServerAddressOutputMapping,
|
||||
self).set_output(stack, cluster_template, cluster)
|
||||
|
||||
|
||||
class MasterAddressOutputMapping(ServerAddressOutputMapping):
|
||||
public_ip_output_key = 'kube_masters'
|
||||
private_ip_output_key = 'kube_masters_private'
|
||||
|
||||
|
||||
class NodeAddressOutputMapping(ServerAddressOutputMapping):
|
||||
public_ip_output_key = 'kube_minions'
|
||||
private_ip_output_key = 'kube_minions_private'
|
||||
|
||||
|
||||
class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
||||
"""Kubernetes template for a CoreOS."""
|
||||
|
||||
def __init__(self):
|
||||
super(CoreOSK8sTemplateDefinition, self).__init__()
|
||||
self.add_parameter('docker_volume_size',
|
||||
cluster_attr='docker_volume_size')
|
||||
self.add_parameter('docker_storage_driver',
|
||||
cluster_template_attr='docker_storage_driver')
|
||||
|
||||
def add_nodegroup_params(self, cluster):
|
||||
super(CoreOSK8sTemplateDefinition,
|
||||
self).add_nodegroup_params(cluster)
|
||||
worker_ng = cluster.default_ng_worker
|
||||
master_ng = cluster.default_ng_master
|
||||
self.add_parameter('number_of_minions',
|
||||
nodegroup_attr='node_count',
|
||||
nodegroup_uuid=worker_ng.uuid,
|
||||
param_class=template_def.NodeGroupParameterMapping)
|
||||
self.add_parameter('minion_flavor',
|
||||
nodegroup_attr='flavor_id',
|
||||
nodegroup_uuid=worker_ng.uuid,
|
||||
param_class=template_def.NodeGroupParameterMapping)
|
||||
self.add_parameter('master_flavor',
|
||||
nodegroup_attr='flavor_id',
|
||||
nodegroup_uuid=master_ng.uuid,
|
||||
param_class=template_def.NodeGroupParameterMapping)
|
||||
|
||||
def update_outputs(self, stack, cluster_template, cluster):
|
||||
worker_ng = cluster.default_ng_worker
|
||||
master_ng = cluster.default_ng_master
|
||||
|
||||
self.add_output('kube_minions',
|
||||
nodegroup_attr='node_addresses',
|
||||
nodegroup_uuid=worker_ng.uuid,
|
||||
mapping_type=NodeAddressOutputMapping)
|
||||
self.add_output('kube_masters',
|
||||
nodegroup_attr='node_addresses',
|
||||
nodegroup_uuid=master_ng.uuid,
|
||||
mapping_type=MasterAddressOutputMapping)
|
||||
self.add_output('number_of_minions',
|
||||
nodegroup_attr='node_count',
|
||||
nodegroup_uuid=worker_ng.uuid,
|
||||
is_stack_param=True)
|
||||
super(CoreOSK8sTemplateDefinition,
|
||||
self).update_outputs(stack, cluster_template, cluster)
|
||||
|
||||
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||
extra_params = kwargs.pop('extra_params', {})
|
||||
|
||||
extra_params['username'] = context.user_name
|
||||
osc = self.get_osc(context)
|
||||
extra_params['region_name'] = osc.cinder_region_name()
|
||||
|
||||
# set docker_volume_type
|
||||
# use the configuration default if None provided
|
||||
docker_volume_type = cluster.labels.get(
|
||||
'docker_volume_type', CONF.cinder.default_docker_volume_type)
|
||||
extra_params['docker_volume_type'] = docker_volume_type
|
||||
|
||||
extra_params['nodes_affinity_policy'] = \
|
||||
CONF.cluster.nodes_affinity_policy
|
||||
|
||||
if cluster_template.network_driver == 'flannel':
|
||||
extra_params["pods_network_cidr"] = \
|
||||
cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
|
||||
if cluster_template.network_driver == 'calico':
|
||||
extra_params["pods_network_cidr"] = \
|
||||
cluster.labels.get('calico_ipv4pool', '192.168.0.0/16')
|
||||
|
||||
label_list = ['coredns_tag',
|
||||
'kube_tag', 'container_infra_prefix',
|
||||
'availability_zone',
|
||||
|
@ -137,14 +43,6 @@ class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||
if label_value:
|
||||
extra_params[label] = label_value
|
||||
|
||||
cert_manager_api = cluster.labels.get('cert_manager_api')
|
||||
if strutils.bool_from_string(cert_manager_api):
|
||||
extra_params['cert_manager_api'] = cert_manager_api
|
||||
ca_cert = cert_manager.get_cluster_ca_certificate(cluster)
|
||||
extra_params['ca_key'] = x509.decrypt_key(
|
||||
ca_cert.get_private_key(),
|
||||
ca_cert.get_private_key_passphrase()).replace("\n", "\\n")
|
||||
|
||||
plain_openstack_ca = utils.get_openstack_ca()
|
||||
encoded_openstack_ca = base64.b64encode(plain_openstack_ca.encode())
|
||||
extra_params['openstack_ca_coreos'] = encoded_openstack_ca.decode()
|
||||
|
@ -153,14 +51,3 @@ class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||
self).get_params(context, cluster_template, cluster,
|
||||
extra_params=extra_params,
|
||||
**kwargs)
|
||||
|
||||
def get_env_files(self, cluster_template, cluster):
|
||||
env_files = []
|
||||
|
||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||
template_def.add_etcd_volume_env_file(env_files, cluster_template)
|
||||
template_def.add_volume_env_file(env_files, cluster)
|
||||
template_def.add_lb_env_file(env_files, cluster_template)
|
||||
template_def.add_fip_env_file(env_files, cluster_template, cluster)
|
||||
|
||||
return env_files
|
||||
|
|
|
@ -29,84 +29,12 @@ CONF = cfg.CONF
|
|||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServerAddressOutputMapping(template_def.NodeGroupOutputMapping):
|
||||
|
||||
public_ip_output_key = None
|
||||
private_ip_output_key = None
|
||||
|
||||
def __init__(self, dummy_arg, nodegroup_attr=None, nodegroup_uuid=None):
|
||||
self.nodegroup_attr = nodegroup_attr
|
||||
self.nodegroup_uuid = nodegroup_uuid
|
||||
self.heat_output = self.public_ip_output_key
|
||||
self.is_stack_param = False
|
||||
|
||||
def set_output(self, stack, cluster_template, cluster):
|
||||
if not cluster_template.floating_ip_enabled:
|
||||
self.heat_output = self.private_ip_output_key
|
||||
|
||||
LOG.debug("Using heat_output: %s", self.heat_output)
|
||||
super(ServerAddressOutputMapping,
|
||||
self).set_output(stack, cluster_template, cluster)
|
||||
|
||||
|
||||
class MasterAddressOutputMapping(ServerAddressOutputMapping):
|
||||
public_ip_output_key = 'kube_masters'
|
||||
private_ip_output_key = 'kube_masters_private'
|
||||
|
||||
|
||||
class NodeAddressOutputMapping(ServerAddressOutputMapping):
|
||||
public_ip_output_key = 'kube_minions'
|
||||
private_ip_output_key = 'kube_minions_private'
|
||||
|
||||
|
||||
class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
||||
"""Kubernetes template for a Fedora."""
|
||||
|
||||
def __init__(self):
|
||||
super(K8sFedoraTemplateDefinition, self).__init__()
|
||||
self.add_parameter('docker_volume_size',
|
||||
cluster_attr='docker_volume_size')
|
||||
self.add_parameter('docker_storage_driver',
|
||||
cluster_template_attr='docker_storage_driver')
|
||||
|
||||
def update_outputs(self, stack, cluster_template, cluster):
|
||||
worker_ng = cluster.default_ng_worker
|
||||
master_ng = cluster.default_ng_master
|
||||
|
||||
self.add_output('kube_minions',
|
||||
nodegroup_attr='node_addresses',
|
||||
nodegroup_uuid=worker_ng.uuid,
|
||||
mapping_type=NodeAddressOutputMapping)
|
||||
self.add_output('kube_masters',
|
||||
nodegroup_attr='node_addresses',
|
||||
nodegroup_uuid=master_ng.uuid,
|
||||
mapping_type=MasterAddressOutputMapping)
|
||||
super(K8sFedoraTemplateDefinition,
|
||||
self).update_outputs(stack, cluster_template, cluster)
|
||||
|
||||
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||
extra_params = kwargs.pop('extra_params', {})
|
||||
|
||||
extra_params['username'] = context.user_name
|
||||
osc = self.get_osc(context)
|
||||
extra_params['region_name'] = osc.cinder_region_name()
|
||||
|
||||
# set docker_volume_type
|
||||
# use the configuration default if None provided
|
||||
docker_volume_type = cluster.labels.get(
|
||||
'docker_volume_type', CONF.cinder.default_docker_volume_type)
|
||||
extra_params['docker_volume_type'] = docker_volume_type
|
||||
|
||||
extra_params['nodes_affinity_policy'] = \
|
||||
CONF.cluster.nodes_affinity_policy
|
||||
|
||||
if cluster_template.network_driver == 'flannel':
|
||||
extra_params["pods_network_cidr"] = \
|
||||
cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
|
||||
if cluster_template.network_driver == 'calico':
|
||||
extra_params["pods_network_cidr"] = \
|
||||
cluster.labels.get('calico_ipv4pool', '192.168.0.0/16')
|
||||
|
||||
# check cloud provider and cinder options. If cinder is selected,
|
||||
# the cloud provider needs to be enabled.
|
||||
cloud_provider_enabled = cluster.labels.get(
|
||||
|
@ -159,7 +87,6 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||
if not extra_params.get('max_node_count'):
|
||||
extra_params['max_node_count'] = cluster.node_count + 1
|
||||
|
||||
self._set_cert_manager_params(cluster, extra_params)
|
||||
self._get_keystone_auth_default_policy(extra_params)
|
||||
|
||||
return super(K8sFedoraTemplateDefinition,
|
||||
|
@ -167,22 +94,6 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||
extra_params=extra_params,
|
||||
**kwargs)
|
||||
|
||||
def _set_cert_manager_params(self, cluster, extra_params):
|
||||
cert_manager_api = cluster.labels.get('cert_manager_api')
|
||||
if strutils.bool_from_string(cert_manager_api):
|
||||
extra_params['cert_manager_api'] = cert_manager_api
|
||||
ca_cert = cert_manager.get_cluster_ca_certificate(cluster)
|
||||
if six.PY3 and isinstance(ca_cert.get_private_key_passphrase(),
|
||||
six.text_type):
|
||||
extra_params['ca_key'] = x509.decrypt_key(
|
||||
ca_cert.get_private_key(),
|
||||
ca_cert.get_private_key_passphrase().encode()
|
||||
).decode().replace("\n", "\\n")
|
||||
else:
|
||||
extra_params['ca_key'] = x509.decrypt_key(
|
||||
ca_cert.get_private_key(),
|
||||
ca_cert.get_private_key_passphrase()).replace("\n", "\\n")
|
||||
|
||||
def _get_keystone_auth_default_policy(self, extra_params):
|
||||
# NOTE(flwang): This purpose of this function is to make the default
|
||||
# policy more flexible for different cloud providers. Since the default
|
||||
|
@ -211,14 +122,3 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||
washed_policy = default_policy.replace('"', '\"') \
|
||||
.replace("$PROJECT_ID", extra_params["project_id"])
|
||||
extra_params["keystone_auth_default_policy"] = washed_policy
|
||||
|
||||
def get_env_files(self, cluster_template, cluster):
|
||||
env_files = []
|
||||
|
||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||
template_def.add_etcd_volume_env_file(env_files, cluster_template)
|
||||
template_def.add_volume_env_file(env_files, cluster)
|
||||
template_def.add_lb_env_file(env_files, cluster_template)
|
||||
template_def.add_fip_env_file(env_files, cluster_template, cluster)
|
||||
|
||||
return env_files
|
||||
|
|
|
@ -50,6 +50,36 @@ class K8sApiAddressOutputMapping(template_def.OutputMapping):
|
|||
setattr(cluster, self.cluster_attr, value)
|
||||
|
||||
|
||||
class ServerAddressOutputMapping(template_def.NodeGroupOutputMapping):
|
||||
|
||||
public_ip_output_key = None
|
||||
private_ip_output_key = None
|
||||
|
||||
def __init__(self, dummy_arg, nodegroup_attr=None, nodegroup_uuid=None):
|
||||
self.nodegroup_attr = nodegroup_attr
|
||||
self.nodegroup_uuid = nodegroup_uuid
|
||||
self.heat_output = self.public_ip_output_key
|
||||
self.is_stack_param = False
|
||||
|
||||
def set_output(self, stack, cluster_template, cluster):
|
||||
if not cluster_template.floating_ip_enabled:
|
||||
self.heat_output = self.private_ip_output_key
|
||||
|
||||
LOG.debug("Using heat_output: %s", self.heat_output)
|
||||
super(ServerAddressOutputMapping,
|
||||
self).set_output(stack, cluster_template, cluster)
|
||||
|
||||
|
||||
class MasterAddressOutputMapping(ServerAddressOutputMapping):
|
||||
public_ip_output_key = 'kube_masters'
|
||||
private_ip_output_key = 'kube_masters_private'
|
||||
|
||||
|
||||
class NodeAddressOutputMapping(ServerAddressOutputMapping):
|
||||
public_ip_output_key = 'kube_minions'
|
||||
private_ip_output_key = 'kube_minions_private'
|
||||
|
||||
|
||||
class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
||||
"""Base Kubernetes template."""
|
||||
|
||||
|
@ -87,6 +117,11 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||
self.add_output('kube_masters_private',
|
||||
cluster_attr=None)
|
||||
|
||||
self.add_parameter('docker_volume_size',
|
||||
cluster_attr='docker_volume_size')
|
||||
self.add_parameter('docker_storage_driver',
|
||||
cluster_template_attr='docker_storage_driver')
|
||||
|
||||
def add_nodegroup_params(self, cluster):
|
||||
super(K8sTemplateDefinition,
|
||||
self).add_nodegroup_params(cluster)
|
||||
|
@ -106,7 +141,16 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||
param_class=template_def.NodeGroupParameterMapping)
|
||||
|
||||
def update_outputs(self, stack, cluster_template, cluster):
|
||||
master_ng = cluster.default_ng_master
|
||||
worker_ng = cluster.default_ng_worker
|
||||
self.add_output('kube_masters',
|
||||
nodegroup_attr='node_addresses',
|
||||
nodegroup_uuid=master_ng.uuid,
|
||||
mapping_type=MasterAddressOutputMapping)
|
||||
self.add_output('kube_minions',
|
||||
nodegroup_attr='node_addresses',
|
||||
nodegroup_uuid=worker_ng.uuid,
|
||||
mapping_type=NodeAddressOutputMapping)
|
||||
self.add_output('number_of_minions',
|
||||
nodegroup_attr='node_count',
|
||||
nodegroup_uuid=worker_ng.uuid,
|
||||
|
@ -118,6 +162,28 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||
extra_params = kwargs.pop('extra_params', {})
|
||||
|
||||
extra_params['username'] = context.user_name
|
||||
osc = self.get_osc(context)
|
||||
extra_params['region_name'] = osc.cinder_region_name()
|
||||
|
||||
# set docker_volume_type
|
||||
# use the configuration default if None provided
|
||||
docker_volume_type = cluster.labels.get(
|
||||
'docker_volume_type', CONF.cinder.default_docker_volume_type)
|
||||
extra_params['docker_volume_type'] = docker_volume_type
|
||||
|
||||
extra_params['nodes_affinity_policy'] = \
|
||||
CONF.cluster.nodes_affinity_policy
|
||||
|
||||
if cluster_template.network_driver == 'flannel':
|
||||
extra_params["pods_network_cidr"] = \
|
||||
cluster.labels.get('flannel_network_cidr', '10.100.0.0/16')
|
||||
if cluster_template.network_driver == 'calico':
|
||||
extra_params["pods_network_cidr"] = \
|
||||
cluster.labels.get('calico_ipv4pool', '192.168.0.0/16')
|
||||
|
||||
self._set_cert_manager_params(cluster, extra_params)
|
||||
|
||||
extra_params['discovery_url'] = \
|
||||
self.get_discovery_url(cluster, cluster_template=cluster_template)
|
||||
osc = self.get_osc(context)
|
||||
|
@ -190,6 +256,22 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||
extra_params=extra_params,
|
||||
**kwargs)
|
||||
|
||||
def _set_cert_manager_params(self, cluster, extra_params):
|
||||
cert_manager_api = cluster.labels.get('cert_manager_api')
|
||||
if strutils.bool_from_string(cert_manager_api):
|
||||
extra_params['cert_manager_api'] = cert_manager_api
|
||||
ca_cert = cert_manager.get_cluster_ca_certificate(cluster)
|
||||
if six.PY3 and isinstance(ca_cert.get_private_key_passphrase(),
|
||||
six.text_type):
|
||||
extra_params['ca_key'] = x509.decrypt_key(
|
||||
ca_cert.get_private_key(),
|
||||
ca_cert.get_private_key_passphrase().encode()
|
||||
).decode().replace("\n", "\\n")
|
||||
else:
|
||||
extra_params['ca_key'] = x509.decrypt_key(
|
||||
ca_cert.get_private_key(),
|
||||
ca_cert.get_private_key_passphrase()).replace("\n", "\\n")
|
||||
|
||||
def get_scale_params(self, context, cluster, scale_manager=None,
|
||||
nodes_to_remove=None):
|
||||
scale_params = dict()
|
||||
|
@ -200,3 +282,14 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||
scale_params['minions_to_remove'] = (
|
||||
scale_manager.get_removal_nodes(hosts))
|
||||
return scale_params
|
||||
|
||||
def get_env_files(self, cluster_template, cluster):
|
||||
env_files = []
|
||||
|
||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||
template_def.add_etcd_volume_env_file(env_files, cluster_template)
|
||||
template_def.add_volume_env_file(env_files, cluster)
|
||||
template_def.add_lb_env_file(env_files, cluster_template)
|
||||
template_def.add_fip_env_file(env_files, cluster_template, cluster)
|
||||
|
||||
return env_files
|
||||
|
|
Loading…
Reference in New Issue