ng-7: Adapt parameter and output mappings

With this change parameter and output mappings can support multiple
stacks.

Change-Id: I45cf765977c7f5a92f28ae12c469b98435763163
This commit is contained in:
Theodoros Tsioutsias 2019-06-24 13:45:27 +02:00
parent cbe05aa97d
commit d4a52719f1
18 changed files with 343 additions and 337 deletions

View File

@ -26,36 +26,6 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ServerAddressOutputMapping(template_def.NodeGroupOutputMapping):
public_ip_output_key = None
private_ip_output_key = None
def __init__(self, dummy_arg, nodegroup_attr=None, nodegroup_uuid=None):
self.nodegroup_attr = nodegroup_attr
self.nodegroup_uuid = nodegroup_uuid
self.heat_output = self.public_ip_output_key
self.is_stack_param = False
def set_output(self, stack, cluster_template, cluster):
if not cluster_template.floating_ip_enabled:
self.heat_output = self.private_ip_output_key
LOG.debug("Using heat_output: %s", self.heat_output)
super(ServerAddressOutputMapping,
self).set_output(stack, cluster_template, cluster)
class MasterAddressOutputMapping(ServerAddressOutputMapping):
public_ip_output_key = 'kube_masters'
private_ip_output_key = 'kube_masters_private'
class NodeAddressOutputMapping(ServerAddressOutputMapping):
public_ip_output_key = 'kube_minions'
private_ip_output_key = 'kube_minions_private'
class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
"""Kubernetes template for a CoreOS."""
@ -66,43 +36,6 @@ class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
self.add_parameter('docker_storage_driver',
cluster_template_attr='docker_storage_driver')
def add_nodegroup_params(self, cluster):
super(CoreOSK8sTemplateDefinition,
self).add_nodegroup_params(cluster)
worker_ng = cluster.default_ng_worker
master_ng = cluster.default_ng_master
self.add_parameter('number_of_minions',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('minion_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('master_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=master_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
def update_outputs(self, stack, cluster_template, cluster):
worker_ng = cluster.default_ng_worker
master_ng = cluster.default_ng_master
self.add_output('kube_minions',
nodegroup_attr='node_addresses',
nodegroup_uuid=worker_ng.uuid,
mapping_type=NodeAddressOutputMapping)
self.add_output('kube_masters',
nodegroup_attr='node_addresses',
nodegroup_uuid=master_ng.uuid,
mapping_type=MasterAddressOutputMapping)
self.add_output('number_of_minions',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
is_stack_param=True)
super(CoreOSK8sTemplateDefinition,
self).update_outputs(stack, cluster_template, cluster)
def get_params(self, context, cluster_template, cluster, **kwargs):
extra_params = kwargs.pop('extra_params', {})

View File

@ -30,36 +30,6 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ServerAddressOutputMapping(template_def.NodeGroupOutputMapping):
public_ip_output_key = None
private_ip_output_key = None
def __init__(self, dummy_arg, nodegroup_attr=None, nodegroup_uuid=None):
self.nodegroup_attr = nodegroup_attr
self.nodegroup_uuid = nodegroup_uuid
self.heat_output = self.public_ip_output_key
self.is_stack_param = False
def set_output(self, stack, cluster_template, cluster):
if not cluster_template.floating_ip_enabled:
self.heat_output = self.private_ip_output_key
LOG.debug("Using heat_output: %s", self.heat_output)
super(ServerAddressOutputMapping,
self).set_output(stack, cluster_template, cluster)
class MasterAddressOutputMapping(ServerAddressOutputMapping):
public_ip_output_key = 'kube_masters'
private_ip_output_key = 'kube_masters_private'
class NodeAddressOutputMapping(ServerAddressOutputMapping):
public_ip_output_key = 'kube_minions'
private_ip_output_key = 'kube_minions_private'
class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
"""Kubernetes template for a Fedora."""
@ -70,21 +40,6 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
self.add_parameter('docker_storage_driver',
cluster_template_attr='docker_storage_driver')
def update_outputs(self, stack, cluster_template, cluster):
worker_ng = cluster.default_ng_worker
master_ng = cluster.default_ng_master
self.add_output('kube_minions',
nodegroup_attr='node_addresses',
nodegroup_uuid=worker_ng.uuid,
mapping_type=NodeAddressOutputMapping)
self.add_output('kube_masters',
nodegroup_attr='node_addresses',
nodegroup_uuid=master_ng.uuid,
mapping_type=MasterAddressOutputMapping)
super(K8sFedoraTemplateDefinition,
self).update_outputs(stack, cluster_template, cluster)
def get_params(self, context, cluster_template, cluster, **kwargs):
extra_params = kwargs.pop('extra_params', {})
@ -121,9 +76,6 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
'to be true or unset.'))
extra_params['cloud_provider_enabled'] = cloud_provider_enabled
extra_params['master_image'] = cluster_template.image_id
extra_params['minion_image'] = cluster_template.image_id
label_list = ['coredns_tag',
'kube_tag', 'container_infra_prefix',
'availability_zone',

View File

@ -11,6 +11,7 @@
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from magnum.common import exception
from magnum.common import keystone
@ -20,6 +21,9 @@ from magnum.drivers.heat import template_def
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
"""kubernetes ports """
KUBE_SECURE_PORT = '6443'
KUBE_INSECURE_PORT = '8080'
@ -49,6 +53,36 @@ class K8sApiAddressOutputMapping(template_def.OutputMapping):
setattr(cluster, self.cluster_attr, value)
class ServerAddressOutputMapping(template_def.NodeGroupOutputMapping):
public_ip_output_key = None
private_ip_output_key = None
def __init__(self, dummy_arg, nodegroup_attr=None, nodegroup_uuid=None):
self.nodegroup_attr = nodegroup_attr
self.nodegroup_uuid = nodegroup_uuid
self.heat_output = self.public_ip_output_key
self.is_stack_param = False
def set_output(self, stack, cluster_template, cluster):
if not cluster_template.floating_ip_enabled:
self.heat_output = self.private_ip_output_key
LOG.debug("Using heat_output: %s", self.heat_output)
super(ServerAddressOutputMapping,
self).set_output(stack, cluster_template, cluster)
class MasterAddressOutputMapping(ServerAddressOutputMapping):
public_ip_output_key = 'kube_masters'
private_ip_output_key = 'kube_masters_private'
class NodeAddressOutputMapping(ServerAddressOutputMapping):
public_ip_output_key = 'kube_minions'
private_ip_output_key = 'kube_minions_private'
class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
"""Base Kubernetes template."""
@ -86,33 +120,45 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
self.add_output('kube_masters_private',
cluster_attr=None)
def add_nodegroup_params(self, cluster):
super(K8sTemplateDefinition,
self).add_nodegroup_params(cluster)
worker_ng = cluster.default_ng_worker
master_ng = cluster.default_ng_master
self.add_parameter('number_of_minions',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('minion_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('master_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=master_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
def get_nodegroup_param_maps(self, master_params=None, worker_params=None):
master_params = master_params or dict()
worker_params = worker_params or dict()
master_params.update({
'master_flavor': 'flavor_id',
'master_image': 'image_id',
})
worker_params.update({
'number_of_minions': 'node_count',
'minion_flavor': 'flavor_id',
'minion_image': 'image_id',
})
return super(
K8sTemplateDefinition, self).get_nodegroup_param_maps(
master_params=master_params, worker_params=worker_params)
def update_outputs(self, stack, cluster_template, cluster):
worker_ng = cluster.default_ng_worker
self.add_output('number_of_minions',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
is_stack_param=True,
mapping_type=template_def.NodeGroupOutputMapping)
def update_outputs(self, stack, cluster_template, cluster,
nodegroups=None):
nodegroups = nodegroups or [cluster.default_ng_worker,
cluster.default_ng_master]
for nodegroup in nodegroups:
if nodegroup.role == 'master':
self.add_output('kube_masters',
nodegroup_attr='node_addresses',
nodegroup_uuid=nodegroup.uuid,
mapping_type=MasterAddressOutputMapping)
else:
self.add_output('kube_minions',
nodegroup_attr='node_addresses',
nodegroup_uuid=nodegroup.uuid,
mapping_type=NodeAddressOutputMapping)
self.add_output(
'number_of_minions', nodegroup_attr='node_count',
nodegroup_uuid=nodegroup.uuid,
mapping_type=template_def.NodeGroupOutputMapping,
is_stack_param=True)
super(K8sTemplateDefinition,
self).update_outputs(stack, cluster_template, cluster)
self).update_outputs(stack, cluster_template, cluster,
nodegroups=nodegroups)
def get_params(self, context, cluster_template, cluster, **kwargs):
extra_params = kwargs.pop('extra_params', {})

View File

@ -78,43 +78,45 @@ class SwarmFedoraTemplateDefinition(template_def.BaseTemplateDefinition):
self.add_output('discovery_url',
cluster_attr='discovery_url')
def add_nodegroup_params(self, cluster):
super(SwarmFedoraTemplateDefinition,
self).add_nodegroup_params(cluster)
master_ng = cluster.default_ng_master
worker_ng = cluster.default_ng_worker
self.add_parameter('number_of_nodes',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('node_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('master_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=master_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
def get_nodegroup_param_maps(self, master_params=None, worker_params=None):
master_params = master_params or dict()
worker_params = worker_params or dict()
master_params.update({
'master_flavor': 'flavor_id',
'master_image': 'image_id'
})
worker_params.update({
'number_of_nodes': 'node_count',
'node_flavor': 'flavor_id',
'node_image': 'image_id'
})
return super(
SwarmFedoraTemplateDefinition, self).get_nodegroup_param_maps(
master_params=master_params, worker_params=worker_params)
def update_outputs(self, stack, cluster_template, cluster):
worker_ng = cluster.default_ng_worker
master_ng = cluster.default_ng_master
def update_outputs(self, stack, cluster_template, cluster,
nodegroups=None):
nodegroups = nodegroups or [cluster.default_ng_worker,
cluster.default_ng_master]
self.add_output('swarm_masters',
nodegroup_attr='node_addresses',
nodegroup_uuid=master_ng.uuid,
mapping_type=template_def.NodeGroupOutputMapping)
self.add_output('swarm_nodes',
nodegroup_attr='node_addresses',
nodegroup_uuid=worker_ng.uuid,
mapping_type=template_def.NodeGroupOutputMapping)
self.add_output('number_of_nodes',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
is_stack_param=True,
mapping_type=template_def.NodeGroupOutputMapping)
for nodegroup in nodegroups:
if nodegroup.role == 'master':
self.add_output(
'swarm_masters', nodegroup_attr='node_addresses',
nodegroup_uuid=nodegroup.uuid,
mapping_type=template_def.NodeGroupOutputMapping)
else:
self.add_output(
'swarm_nodes', nodegroup_attr='node_addresses',
nodegroup_uuid=nodegroup.uuid,
mapping_type=template_def.NodeGroupOutputMapping)
self.add_output(
'number_of_nodes', nodegroup_attr='node_count',
nodegroup_uuid=nodegroup.uuid, is_stack_param=True,
mapping_type=template_def.NodeGroupOutputMapping)
super(SwarmFedoraTemplateDefinition,
self).update_outputs(stack, cluster_template, cluster)
self).update_outputs(stack, cluster_template, cluster,
nodegroups=nodegroups)
def get_params(self, context, cluster_template, cluster, **kwargs):
extra_params = kwargs.pop('extra_params', {})

View File

@ -141,43 +141,44 @@ class SwarmModeTemplateDefinition(template_def.BaseTemplateDefinition):
extra_params=extra_params,
**kwargs)
def add_nodegroup_params(self, cluster):
super(SwarmModeTemplateDefinition,
self).add_nodegroup_params(cluster)
worker_ng = cluster.default_ng_worker
master_ng = cluster.default_ng_master
self.add_parameter('number_of_nodes',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('node_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('master_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=master_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
def get_nodegroup_param_maps(self, master_params=None, worker_params=None):
master_params = master_params or dict()
worker_params = worker_params or dict()
master_params.update({
'master_flavor': 'flavor_id',
'master_image': 'image_id'
})
worker_params.update({
'number_of_nodes': 'node_count',
'node_flavor': 'flavor_id',
'node_image': 'image_id'
})
return super(
SwarmModeTemplateDefinition, self).get_nodegroup_param_maps(
master_params=master_params, worker_params=worker_params)
def update_outputs(self, stack, cluster_template, cluster):
worker_ng = cluster.default_ng_worker
master_ng = cluster.default_ng_master
self.add_output('swarm_masters',
nodegroup_attr='node_addresses',
nodegroup_uuid=master_ng.uuid,
mapping_type=MasterAddressOutputMapping)
self.add_output('swarm_nodes',
nodegroup_attr='node_addresses',
nodegroup_uuid=worker_ng.uuid,
mapping_type=NodeAddressOutputMapping)
self.add_output('number_of_nodes',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
is_stack_param=True,
mapping_type=template_def.NodeGroupOutputMapping)
def update_outputs(self, stack, cluster_template, cluster,
nodegroups=None):
nodegroups = nodegroups or [cluster.default_ng_worker,
cluster.default_ng_master]
for nodegroup in nodegroups:
if nodegroup.role == 'master':
self.add_output('swarm_masters',
nodegroup_attr='node_addresses',
nodegroup_uuid=nodegroup.uuid,
mapping_type=MasterAddressOutputMapping)
else:
self.add_output('swarm_nodes',
nodegroup_attr='node_addresses',
nodegroup_uuid=nodegroup.uuid,
mapping_type=NodeAddressOutputMapping)
self.add_output(
'number_of_nodes', nodegroup_attr='node_count',
nodegroup_uuid=nodegroup.uuid, is_stack_param=True,
mapping_type=template_def.NodeGroupOutputMapping)
super(SwarmModeTemplateDefinition,
self).update_outputs(stack, cluster_template, cluster)
self).update_outputs(stack, cluster_template, cluster,
nodegroups=nodegroups)
def get_env_files(self, cluster_template, cluster):
env_files = []

View File

@ -92,7 +92,7 @@ class NodeGroupParameterMapping(ParameterMapping):
def get_value(self, cluster_template, cluster):
value = None
for ng in cluster.nodegroups:
if ng.uuid == self.nodegroup_uuid:
if ng.uuid == self.nodegroup_uuid and self.nodegroup_attr in ng:
value = getattr(ng, self.nodegroup_attr)
break
return value
@ -187,6 +187,7 @@ class TemplateDefinition(object):
def __init__(self):
self.param_mappings = list()
self.output_mappings = list()
self.nodegroup_output_mappings = list()
def add_parameter(self, *args, **kwargs):
param_class = kwargs.pop('param_class', ParameterMapping)
@ -196,7 +197,10 @@ class TemplateDefinition(object):
def add_output(self, *args, **kwargs):
mapping_type = kwargs.pop('mapping_type', OutputMapping)
output = mapping_type(*args, **kwargs)
self.output_mappings.append(output)
if kwargs.get('cluster_attr', None):
self.output_mappings.append(output)
else:
self.nodegroup_output_mappings.append(output)
def get_output(self, *args, **kwargs):
for output in self.output_mappings:
@ -295,12 +299,15 @@ class TemplateDefinition(object):
def resolve_ambiguous_values(self, context, heat_param, heat_value, value):
return str(value)
def add_nodegroup_params(self, cluster):
def add_nodegroup_params(self, cluster, nodegroups=None):
pass
def update_outputs(self, stack, cluster_template, cluster):
def update_outputs(self, stack, cluster_template, cluster,
nodegroups=None):
for output in self.output_mappings:
output.set_output(stack, cluster_template, cluster)
for output in self.nodegroup_output_mappings:
output.set_output(stack, cluster_template, cluster)
@abc.abstractproperty
def driver_module_path(self):
@ -323,8 +330,6 @@ class BaseTemplateDefinition(TemplateDefinition):
self.add_parameter('ssh_key_name',
cluster_attr='keypair')
self.add_parameter('server_image',
cluster_template_attr='image_id')
self.add_parameter('dns_nameserver',
cluster_template_attr='dns_nameserver')
self.add_parameter('http_proxy',
@ -350,8 +355,9 @@ class BaseTemplateDefinition(TemplateDefinition):
def get_params(self, context, cluster_template, cluster, **kwargs):
osc = self.get_osc(context)
nodegroups = kwargs.pop('nodegroups', None)
# Add all the params from the cluster's nodegroups
self.add_nodegroup_params(cluster)
self.add_nodegroup_params(cluster, nodegroups=nodegroups)
extra_params = kwargs.pop('extra_params', {})
extra_params['trustee_domain_id'] = osc.keystone().trustee_domain_id
@ -394,25 +400,46 @@ class BaseTemplateDefinition(TemplateDefinition):
return super(BaseTemplateDefinition, self).resolve_ambiguous_values(
context, heat_param, heat_value, value)
def add_nodegroup_params(self, cluster):
# Assuming that all the drivers that will not override
# this method do not support more than two nodegroups.
# Meaning that we have one master and one worker.
master_ng = cluster.default_ng_master
self.add_parameter('number_of_masters',
nodegroup_attr='node_count',
nodegroup_uuid=master_ng.uuid,
param_class=NodeGroupParameterMapping)
def add_nodegroup_params(self, cluster, nodegroups=None):
master_params, worker_params = self.get_nodegroup_param_maps()
nodegroups = nodegroups or [cluster.default_ng_worker,
cluster.default_ng_master]
for nodegroup in nodegroups:
params = worker_params
if nodegroup.role == 'master':
params = master_params
self._handle_nodegroup_param_map(nodegroup, params)
def update_outputs(self, stack, cluster_template, cluster):
def get_nodegroup_param_maps(self, master_params=None, worker_params=None):
master_params = master_params or dict()
worker_params = worker_params or dict()
master_params.update({
'number_of_masters': 'node_count',
'role': 'role'
})
worker_params.update({'role': 'role'})
return master_params, worker_params
def _handle_nodegroup_param_map(self, nodegroup, param_map):
for template_attr, nodegroup_attr in param_map.items():
self.add_parameter(template_attr, nodegroup_attr=nodegroup_attr,
nodegroup_uuid=nodegroup.uuid,
param_class=NodeGroupParameterMapping)
def update_outputs(self, stack, cluster_template, cluster,
nodegroups=None):
master_ng = cluster.default_ng_master
self.add_output('number_of_masters',
nodegroup_attr='node_count',
nodegroup_uuid=master_ng.uuid,
is_stack_param=True,
mapping_type=NodeGroupOutputMapping)
nodegroups = nodegroups or [cluster.default_ng_master]
for nodegroup in nodegroups:
if nodegroup.role == 'master':
self.add_output('number_of_masters',
nodegroup_attr='node_count',
nodegroup_uuid=master_ng.uuid,
is_stack_param=True,
mapping_type=NodeGroupOutputMapping)
super(BaseTemplateDefinition,
self).update_outputs(stack, cluster_template, cluster)
self).update_outputs(stack, cluster_template, cluster,
nodegroups=nodegroups)
def validate_discovery_url(self, discovery_url, expect_size):
url = str(discovery_url)

View File

@ -31,7 +31,11 @@ parameters:
description: uuid/name of an existing subnet to use to provision machines
default: ""
server_image:
master_image:
type: string
description: glance image used to boot the server
minion_image:
type: string
description: glance image used to boot the server
@ -642,7 +646,7 @@ resources:
api_public_address: {get_attr: [api_lb, floating_address]}
api_private_address: {get_attr: [api_lb, address]}
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: master_image}
master_flavor: {get_param: master_flavor}
external_network: {get_param: external_network}
kube_allow_priv: {get_param: kube_allow_priv}
@ -733,7 +737,7 @@ resources:
- [{ get_param: 'OS::stack_name' }, 'minion', '%index%']
prometheus_monitoring: {get_param: prometheus_monitoring}
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: minion_image}
minion_flavor: {get_param: minion_flavor}
fixed_network: {get_attr: [network, fixed_network]}
fixed_subnet: {get_attr: [network, fixed_subnet]}

View File

@ -66,7 +66,6 @@ class Driver(driver.KubernetesDriver):
# SemanticVersion. For this case, let's just skip it.
LOG.debug("Failed to parse tag/version %s", str(e))
heat_params["server_image"] = cluster_template.image_id
heat_params["master_image"] = cluster_template.image_id
heat_params["minion_image"] = cluster_template.image_id
# NOTE(flwang): Overwrite the kube_tag as well to avoid a server

View File

@ -31,10 +31,6 @@ parameters:
description: uuid/name of an existing subnet to use to provision machines
default: ""
server_image:
type: string
description: glance image used to boot the server
master_image:
type: string
description: glance image used to boot the server

View File

@ -30,7 +30,11 @@ parameters:
description: Sub network from which to allocate fixed addresses.
default: private-subnet
server_image:
master_image:
type: string
description: glance image used to boot the server
minion_image:
type: string
description: glance image used to boot the server
@ -503,7 +507,7 @@ resources:
api_public_address: {get_attr: [api_lb, floating_address]}
api_private_address: {get_attr: [api_lb, address]}
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: master_image}
master_flavor: {get_param: master_flavor}
external_network: {get_param: external_network}
kube_allow_priv: {get_param: kube_allow_priv}
@ -585,7 +589,7 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'minion', '%index%']
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: minion_image}
minion_flavor: {get_param: minion_flavor}
fixed_network: {get_param: fixed_network}
fixed_subnet: {get_param: fixed_subnet}

View File

@ -43,42 +43,44 @@ class UbuntuMesosTemplateDefinition(template_def.BaseTemplateDefinition):
self.add_output('mesos_slaves_private',
cluster_attr=None)
def add_nodegroup_params(self, cluster):
super(UbuntuMesosTemplateDefinition,
self).add_nodegroup_params(cluster)
master_ng = cluster.default_ng_master
worker_ng = cluster.default_ng_worker
self.add_parameter('number_of_slaves',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('slave_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=worker_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
self.add_parameter('master_flavor',
nodegroup_attr='flavor_id',
nodegroup_uuid=master_ng.uuid,
param_class=template_def.NodeGroupParameterMapping)
def get_nodegroup_param_maps(self, master_params=None, worker_params=None):
master_params = master_params or dict()
worker_params = worker_params or dict()
master_params.update({
'master_flavor': 'flavor_id',
'master_image': 'image_id',
})
worker_params.update({
'number_of_slaves': 'node_count',
'slave_flavor': 'flavor_id',
'slave_image': 'image_id',
})
return super(
UbuntuMesosTemplateDefinition, self).get_nodegroup_param_maps(
master_params=master_params, worker_params=worker_params)
def update_outputs(self, stack, cluster_template, cluster):
worker_ng = cluster.default_ng_worker
master_ng = cluster.default_ng_master
self.add_output('mesos_master',
nodegroup_attr='node_addresses',
nodegroup_uuid=master_ng.uuid,
mapping_type=template_def.NodeGroupOutputMapping)
self.add_output('mesos_slaves',
nodegroup_attr='node_addresses',
nodegroup_uuid=worker_ng.uuid,
mapping_type=template_def.NodeGroupOutputMapping)
self.add_output('number_of_slaves',
nodegroup_attr='node_count',
nodegroup_uuid=worker_ng.uuid,
is_stack_param=True,
mapping_type=template_def.NodeGroupOutputMapping)
def update_outputs(self, stack, cluster_template, cluster,
nodegroups=None):
nodegroups = nodegroups or [cluster.default_ng_worker,
cluster.default_ng_master]
for nodegroup in nodegroups:
if nodegroup.role == 'master':
self.add_output(
'mesos_master', nodegroup_attr='node_addresses',
nodegroup_uuid=nodegroup.uuid,
mapping_type=template_def.NodeGroupOutputMapping)
else:
self.add_output(
'mesos_slaves', nodegroup_attr='node_addresses',
nodegroup_uuid=nodegroup.uuid,
mapping_type=template_def.NodeGroupOutputMapping)
self.add_output(
'number_of_slaves', nodegroup_attr='node_count',
nodegroup_uuid=nodegroup.uuid, is_stack_param=True,
mapping_type=template_def.NodeGroupOutputMapping)
super(UbuntuMesosTemplateDefinition,
self).update_outputs(stack, cluster_template, cluster)
self).update_outputs(stack, cluster_template, cluster,
nodegroups=nodegroups)
def get_params(self, context, cluster_template, cluster, **kwargs):
extra_params = kwargs.pop('extra_params', {})

View File

@ -28,7 +28,12 @@ parameters:
description: uuid/name of an existing subnet to use to provision machines
default: ""
server_image:
master_image:
type: string
default: ubuntu-mesos
description: glance image used to boot the server
slave_image:
type: string
default: ubuntu-mesos
description: glance image used to boot the server
@ -414,7 +419,7 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'master', '%index%']
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: master_image}
master_flavor: {get_param: master_flavor}
external_network: {get_param: external_network}
fixed_network: {get_attr: [network, fixed_network]}
@ -445,7 +450,7 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'slave', '%index%']
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: slave_image}
slave_flavor: {get_param: slave_flavor}
fixed_network: {get_attr: [network, fixed_network]}
fixed_subnet: {get_attr: [network, fixed_subnet]}

View File

@ -44,10 +44,13 @@ parameters:
type: string
description: endpoint to retrieve TLS certs from
server_image:
master_image:
type: string
description: glance image used to boot the server
node_image:
type: string
description: glance image used to boot the server
#
# OPTIONAL PARAMETERS
#
@ -388,7 +391,7 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'master', '%index%']
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: master_image}
server_flavor: {get_param: master_flavor}
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}
@ -440,7 +443,7 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'node', '%index%']
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: node_image}
server_flavor: {get_param: node_flavor}
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}

View File

@ -40,10 +40,13 @@ parameters:
type: string
description: endpoint to retrieve TLS certs from
server_image:
master_image:
type: string
description: glance image used to boot the server
node_image:
type: string
description: glance image used to boot the server
#
# OPTIONAL PARAMETERS
#
@ -328,7 +331,7 @@ resources:
- [{ get_param: 'OS::stack_name' }, 'primary-master', '%index%']
is_primary_master: True
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: master_image}
server_flavor: {get_param: master_flavor}
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}
@ -373,7 +376,7 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'secondary-master', '%index%']
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: master_image}
server_flavor: {get_param: master_flavor}
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}
@ -418,7 +421,7 @@ resources:
- '-'
- [{ get_param: 'OS::stack_name' }, 'node', '%index%']
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
server_image: {get_param: node_image}
server_flavor: {get_param: node_flavor}
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}

View File

@ -214,10 +214,9 @@ class TestClusterConductorWithK8s(base.TestCase):
self.cluster_template_dict[missing_attr] = None
elif missing_attr in self.cluster_dict:
self.cluster_dict[missing_attr] = None
elif missing_attr == 'node_count':
self.worker_ng_dict['node_count'] = None
elif missing_attr == 'master_count':
self.master_ng_dict['node_count'] = None
if missing_attr == 'image_id':
del self.worker_ng_dict['image_id']
del self.master_ng_dict['image_id']
cluster_template = objects.ClusterTemplate(
self.context, **self.cluster_template_dict)
mock_generate_csr_and_key.return_value = {'csr': 'csr',
@ -245,7 +244,6 @@ class TestClusterConductorWithK8s(base.TestCase):
mapping = {
'dns_nameserver': 'dns_nameserver',
'image_id': 'server_image',
'flavor_id': 'minion_flavor',
'docker_volume_size': 'docker_volume_size',
'docker_storage_driver': 'docker_storage_driver',
@ -288,6 +286,7 @@ class TestClusterConductorWithK8s(base.TestCase):
'magnum_url': self.mock_osc.magnum_url.return_value,
'tls_disabled': False,
'insecure_registry': '10.0.0.1:5000',
'image_id': ['master_image', 'minion_image']
}
expected = {
'cloud_provider_enabled': 'false',
@ -299,7 +298,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'network_driver': 'network_driver',
'volume_driver': 'volume_driver',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'minion_image': 'image_id',
'minion_flavor': 'flavor_id',
'master_flavor': 'master_flavor_id',
'number_of_minions': 1,
@ -360,18 +360,20 @@ class TestClusterConductorWithK8s(base.TestCase):
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'boot_volume_size': '60',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1'
'etcd_volume_type': 'lvmdriver-1',
'role': 'master'
}
if missing_attr is not None:
expected.pop(mapping[missing_attr], None)
attrs = mapping[missing_attr]
if not isinstance(attrs, list):
attrs = [attrs]
for attr in attrs:
expected.pop(attr, None)
if missing_attr == 'node_count':
expected['max_node_count'] = None
if missing_attr == 'image_id':
expected['master_image'] = None
expected['minion_image'] = None
self.assertEqual(expected, definition)
self.assertEqual(
['../../common/templates/environments/no_private_network.yaml',
@ -467,7 +469,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'region_name': 'RegionOne',
'registry_container': 'docker_registry',
'registry_enabled': True,
'server_image': 'image_id',
'master_image': 'image_id',
'minion_image': 'image_id',
'ssh_key_name': 'keypair_id',
'swift_region': 'RegionOne',
'tls_disabled': False,
@ -504,7 +507,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'boot_volume_size': '60',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1'
'etcd_volume_type': 'lvmdriver-1',
'role': 'master',
}
self.assertEqual(expected, definition)
@ -558,8 +562,11 @@ class TestClusterConductorWithK8s(base.TestCase):
mock_get.return_value = mock_resp
mock_driver.return_value = k8s_dr.Driver()
cluster = objects.Cluster(self.context, **self.cluster_dict)
del self.worker_ng_dict['image_id']
worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict)
del self.master_ng_dict['image_id']
master_ng = objects.NodeGroup(self.context, **self.master_ng_dict)
master_ng.image_id = None
mock_objects_nodegroup_list.return_value = [master_ng, worker_ng]
(template_path,
@ -625,12 +632,12 @@ class TestClusterConductorWithK8s(base.TestCase):
'portal_network_cidr': '10.254.0.0/16',
'project_id': 'project_id',
'max_node_count': 2,
'master_image': None,
'minion_image': None,
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'boot_volume_size': '60',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1'
'etcd_volume_type': 'lvmdriver-1',
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'role': 'master',
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -687,9 +694,10 @@ class TestClusterConductorWithK8s(base.TestCase):
'docker_storage_driver': 'devicemapper',
'docker_volume_size': 20,
'docker_volume_type': 'lvmdriver-1',
'server_image': 'image_id',
'minion_flavor': 'flavor_id',
'master_flavor': 'master_flavor_id',
'master_image': 'image_id',
'minion_image': 'image_id',
'number_of_minions': 1,
'number_of_masters': 1,
'network_driver': 'network_driver',
@ -737,6 +745,7 @@ class TestClusterConductorWithK8s(base.TestCase):
'kubeproxy_options': '--kubeproxy',
'octavia_enabled': False,
'portal_network_cidr': '10.254.0.0/16',
'role': 'master',
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -790,7 +799,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'docker_storage_driver': u'devicemapper',
'docker_volume_size': 20,
'docker_volume_type': u'lvmdriver-1',
'server_image': 'image_id',
'master_image': 'image_id',
'minion_image': 'image_id',
'minion_flavor': 'flavor_id',
'master_flavor': 'master_flavor_id',
'number_of_minions': 1,
@ -841,6 +851,7 @@ class TestClusterConductorWithK8s(base.TestCase):
'kubeproxy_options': '--kubeproxy',
'octavia_enabled': False,
'portal_network_cidr': '10.254.0.0/16',
'role': 'master',
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -1004,7 +1015,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'fixed_network_name': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'minion_image': 'image_id',
'master_flavor': 'master_flavor_id',
'minion_flavor': 'flavor_id',
'number_of_minions': 1,
@ -1067,7 +1079,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'boot_volume_size': '60',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1'
'etcd_volume_type': 'lvmdriver-1',
'role': 'master'
}
self.assertEqual(expected, definition)
self.assertEqual(

View File

@ -151,7 +151,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'slave_image': 'image_id',
'master_flavor': 'master_flavor_id',
'slave_flavor': 'flavor_id',
'number_of_slaves': 1,
@ -178,7 +179,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'mesos_slave_image_providers': 'docker',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': 'master'
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -206,6 +208,8 @@ class TestClusterConductorWithMesos(base.TestCase):
mock_objects_cluster_template_get_by_uuid.return_value = \
cluster_template
cluster = objects.Cluster(self.context, **self.cluster_dict)
del self.worker_ng_dict['image_id']
del self.master_ng_dict['image_id']
worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict)
master_ng = objects.NodeGroup(self.context, **self.master_ng_dict)
mock_objects_nodegroup_list.return_value = [master_ng, worker_ng]
@ -241,7 +245,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'verify_ca': True,
'slave_flavor': 'flavor_id',
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': 'master'
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -283,7 +288,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'slave_image': 'image_id',
'master_flavor': 'master_flavor_id',
'slave_flavor': 'flavor_id',
'number_of_slaves': 1,
@ -310,7 +316,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'mesos_slave_image_providers': 'docker',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': 'master'
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -356,7 +363,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'slave_image': 'image_id',
'master_flavor': 'master_flavor_id',
'slave_flavor': 'flavor_id',
'number_of_slaves': 1,
@ -383,7 +391,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'mesos_slave_image_providers': 'docker',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': 'master'
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -427,7 +436,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'slave_image': 'image_id',
'master_flavor': 'master_flavor_id',
'slave_flavor': 'flavor_id',
'number_of_slaves': 1,
@ -454,7 +464,8 @@ class TestClusterConductorWithMesos(base.TestCase):
'mesos_slave_image_providers': 'docker',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': 'master'
}
self.assertEqual(expected, definition)
self.assertEqual(

View File

@ -171,7 +171,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'node_image': 'image_id',
'master_flavor': 'master_flavor_id',
'node_flavor': 'flavor_id',
'number_of_masters': 1,
@ -203,7 +204,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'docker_volume_type': 'lvmdriver-1',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': u'master',
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -253,7 +255,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'node_image': 'image_id',
'master_flavor': 'master_flavor_id',
'node_flavor': 'flavor_id',
'number_of_masters': 1,
@ -287,7 +290,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'docker_volume_type': 'lvmdriver-1',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': u'master',
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -328,6 +332,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
mock_get.return_value = mock_resp
mock_driver.return_value = swarm_dr.Driver()
cluster = objects.Cluster(self.context, **self.cluster_dict)
del self.worker_ng_dict['image_id']
del self.master_ng_dict['image_id']
worker_ng = objects.NodeGroup(self.context, **self.worker_ng_dict)
master_ng = objects.NodeGroup(self.context, **self.master_ng_dict)
mock_objects_nodegroup_list.return_value = [master_ng, worker_ng]
@ -365,7 +371,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'verify_ca': True,
'node_flavor': 'flavor_id',
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': 'master'
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -415,7 +422,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'node_image': 'image_id',
'master_flavor': 'master_flavor_id',
'node_flavor': 'flavor_id',
'number_of_masters': 1,
@ -447,7 +455,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'docker_volume_type': 'lvmdriver-1',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': u'master',
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -501,7 +510,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'node_image': 'image_id',
'master_flavor': 'master_flavor_id',
'node_flavor': 'flavor_id',
'number_of_masters': 1,
@ -533,7 +543,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'docker_volume_type': 'lvmdriver-1',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': u'master',
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -585,7 +596,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'fixed_network': 'fixed_network',
'fixed_subnet': 'fixed_subnet',
'dns_nameserver': 'dns_nameserver',
'server_image': 'image_id',
'master_image': 'image_id',
'node_image': 'image_id',
'master_flavor': 'master_flavor_id',
'node_flavor': 'flavor_id',
'number_of_masters': 2,
@ -617,7 +629,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
'docker_volume_type': 'lvmdriver-1',
'verify_ca': True,
'openstack_ca': '',
'nodes_affinity_policy': 'soft-anti-affinity'
'nodes_affinity_policy': 'soft-anti-affinity',
'role': u'master',
}
self.assertEqual(expected, definition)
self.assertEqual(

View File

@ -173,7 +173,7 @@ class TemplateDefinitionTestCase(base.TestCase):
definition = k8sa_dr.Driver().get_template_definition()
mock_args = [1, 3, 4]
mock_kwargs = {'test': 'test'}
mock_kwargs = {'cluster_attr': 'test'}
mock_mapping_type = mock.MagicMock()
mock_mapping_type.return_value = mock.MagicMock()
definition.add_output(mapping_type=mock_mapping_type, *mock_args,
@ -566,8 +566,6 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
min_node_count = mock_cluster.labels.get('min_node_count')
max_node_count = mock_cluster.labels.get('max_node_count')
npd_enabled = mock_cluster.labels.get('npd_enabled')
master_image = mock_cluster_template.image_id
minion_image = mock_cluster_template.image_id
boot_volume_size = mock_cluster.labels.get('boot_volume_size')
boot_volume_type = mock_cluster.labels.get('boot_volume_type')
etcd_volume_type = mock_cluster.labels.get('etcd_volume_type')
@ -647,8 +645,6 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'min_node_count': min_node_count,
'max_node_count': max_node_count,
'traefik_ingress_controller_tag': traefik_ingress_controller_tag,
'master_image': master_image,
'minion_image': minion_image,
'npd_enabled': npd_enabled,
'kube_version': kube_tag,
'master_kube_tag': kube_tag,
@ -1001,8 +997,6 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
min_node_count = mock_cluster.labels.get('min_node_count')
max_node_count = mock_cluster.labels.get('max_node_count')
npd_enabled = mock_cluster.labels.get('npd_enabled')
master_image = mock_cluster_template.image_id
minion_image = mock_cluster_template.image_id
boot_volume_size = mock_cluster.labels.get('boot_volume_size')
boot_volume_type = mock_cluster.labels.get('boot_volume_type')
etcd_volume_type = mock_cluster.labels.get('etcd_volume_type')
@ -1084,8 +1078,6 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'min_node_count': min_node_count,
'max_node_count': max_node_count,
'traefik_ingress_controller_tag': traefik_ingress_controller_tag,
'master_image': master_image,
'minion_image': minion_image,
'npd_enabled': npd_enabled,
'kube_version': kube_tag,
'master_kube_tag': kube_tag,